2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 10
43 static const u16 mgmt_commands
[] = {
44 MGMT_OP_READ_INDEX_LIST
,
47 MGMT_OP_SET_DISCOVERABLE
,
48 MGMT_OP_SET_CONNECTABLE
,
49 MGMT_OP_SET_FAST_CONNECTABLE
,
51 MGMT_OP_SET_LINK_SECURITY
,
55 MGMT_OP_SET_DEV_CLASS
,
56 MGMT_OP_SET_LOCAL_NAME
,
59 MGMT_OP_LOAD_LINK_KEYS
,
60 MGMT_OP_LOAD_LONG_TERM_KEYS
,
62 MGMT_OP_GET_CONNECTIONS
,
63 MGMT_OP_PIN_CODE_REPLY
,
64 MGMT_OP_PIN_CODE_NEG_REPLY
,
65 MGMT_OP_SET_IO_CAPABILITY
,
67 MGMT_OP_CANCEL_PAIR_DEVICE
,
68 MGMT_OP_UNPAIR_DEVICE
,
69 MGMT_OP_USER_CONFIRM_REPLY
,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
71 MGMT_OP_USER_PASSKEY_REPLY
,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
73 MGMT_OP_READ_LOCAL_OOB_DATA
,
74 MGMT_OP_ADD_REMOTE_OOB_DATA
,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
76 MGMT_OP_START_DISCOVERY
,
77 MGMT_OP_STOP_DISCOVERY
,
80 MGMT_OP_UNBLOCK_DEVICE
,
81 MGMT_OP_SET_DEVICE_ID
,
82 MGMT_OP_SET_ADVERTISING
,
84 MGMT_OP_SET_STATIC_ADDRESS
,
85 MGMT_OP_SET_SCAN_PARAMS
,
86 MGMT_OP_SET_SECURE_CONN
,
87 MGMT_OP_SET_DEBUG_KEYS
,
90 MGMT_OP_GET_CONN_INFO
,
91 MGMT_OP_GET_CLOCK_INFO
,
93 MGMT_OP_REMOVE_DEVICE
,
94 MGMT_OP_LOAD_CONN_PARAM
,
95 MGMT_OP_READ_UNCONF_INDEX_LIST
,
96 MGMT_OP_READ_CONFIG_INFO
,
97 MGMT_OP_SET_EXTERNAL_CONFIG
,
98 MGMT_OP_SET_PUBLIC_ADDRESS
,
99 MGMT_OP_START_SERVICE_DISCOVERY
,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
101 MGMT_OP_READ_EXT_INDEX_LIST
,
102 MGMT_OP_READ_ADV_FEATURES
,
103 MGMT_OP_ADD_ADVERTISING
,
104 MGMT_OP_REMOVE_ADVERTISING
,
107 static const u16 mgmt_events
[] = {
108 MGMT_EV_CONTROLLER_ERROR
,
110 MGMT_EV_INDEX_REMOVED
,
111 MGMT_EV_NEW_SETTINGS
,
112 MGMT_EV_CLASS_OF_DEV_CHANGED
,
113 MGMT_EV_LOCAL_NAME_CHANGED
,
114 MGMT_EV_NEW_LINK_KEY
,
115 MGMT_EV_NEW_LONG_TERM_KEY
,
116 MGMT_EV_DEVICE_CONNECTED
,
117 MGMT_EV_DEVICE_DISCONNECTED
,
118 MGMT_EV_CONNECT_FAILED
,
119 MGMT_EV_PIN_CODE_REQUEST
,
120 MGMT_EV_USER_CONFIRM_REQUEST
,
121 MGMT_EV_USER_PASSKEY_REQUEST
,
123 MGMT_EV_DEVICE_FOUND
,
125 MGMT_EV_DEVICE_BLOCKED
,
126 MGMT_EV_DEVICE_UNBLOCKED
,
127 MGMT_EV_DEVICE_UNPAIRED
,
128 MGMT_EV_PASSKEY_NOTIFY
,
131 MGMT_EV_DEVICE_ADDED
,
132 MGMT_EV_DEVICE_REMOVED
,
133 MGMT_EV_NEW_CONN_PARAM
,
134 MGMT_EV_UNCONF_INDEX_ADDED
,
135 MGMT_EV_UNCONF_INDEX_REMOVED
,
136 MGMT_EV_NEW_CONFIG_OPTIONS
,
137 MGMT_EV_EXT_INDEX_ADDED
,
138 MGMT_EV_EXT_INDEX_REMOVED
,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
140 MGMT_EV_ADVERTISING_ADDED
,
141 MGMT_EV_ADVERTISING_REMOVED
,
144 static const u16 mgmt_untrusted_commands
[] = {
145 MGMT_OP_READ_INDEX_LIST
,
147 MGMT_OP_READ_UNCONF_INDEX_LIST
,
148 MGMT_OP_READ_CONFIG_INFO
,
149 MGMT_OP_READ_EXT_INDEX_LIST
,
152 static const u16 mgmt_untrusted_events
[] = {
154 MGMT_EV_INDEX_REMOVED
,
155 MGMT_EV_NEW_SETTINGS
,
156 MGMT_EV_CLASS_OF_DEV_CHANGED
,
157 MGMT_EV_LOCAL_NAME_CHANGED
,
158 MGMT_EV_UNCONF_INDEX_ADDED
,
159 MGMT_EV_UNCONF_INDEX_REMOVED
,
160 MGMT_EV_NEW_CONFIG_OPTIONS
,
161 MGMT_EV_EXT_INDEX_ADDED
,
162 MGMT_EV_EXT_INDEX_REMOVED
,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table
[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
175 MGMT_STATUS_FAILED
, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
180 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY
, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED
, /* Rejected Security */
187 MGMT_STATUS_REJECTED
, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
195 MGMT_STATUS_BUSY
, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED
, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED
, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED
, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED
, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY
, /* Role Switch Pending */
221 MGMT_STATUS_FAILED
, /* Slot Violation */
222 MGMT_STATUS_FAILED
, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY
, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
235 static u8
mgmt_status(u8 hci_status
)
237 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
238 return mgmt_status_table
[hci_status
];
240 return MGMT_STATUS_FAILED
;
243 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
246 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
250 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
251 u16 len
, int flag
, struct sock
*skip_sk
)
253 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
257 static int mgmt_generic_event(u16 event
, struct hci_dev
*hdev
, void *data
,
258 u16 len
, struct sock
*skip_sk
)
260 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
261 HCI_MGMT_GENERIC_EVENTS
, skip_sk
);
264 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
265 struct sock
*skip_sk
)
267 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
268 HCI_SOCK_TRUSTED
, skip_sk
);
271 static u8
le_addr_type(u8 mgmt_addr_type
)
273 if (mgmt_addr_type
== BDADDR_LE_PUBLIC
)
274 return ADDR_LE_DEV_PUBLIC
;
276 return ADDR_LE_DEV_RANDOM
;
279 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
282 struct mgmt_rp_read_version rp
;
284 BT_DBG("sock %p", sk
);
286 rp
.version
= MGMT_VERSION
;
287 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
289 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
293 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
296 struct mgmt_rp_read_commands
*rp
;
297 u16 num_commands
, num_events
;
301 BT_DBG("sock %p", sk
);
303 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
304 num_commands
= ARRAY_SIZE(mgmt_commands
);
305 num_events
= ARRAY_SIZE(mgmt_events
);
307 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
308 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
311 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
313 rp
= kmalloc(rp_size
, GFP_KERNEL
);
317 rp
->num_commands
= cpu_to_le16(num_commands
);
318 rp
->num_events
= cpu_to_le16(num_events
);
320 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
321 __le16
*opcode
= rp
->opcodes
;
323 for (i
= 0; i
< num_commands
; i
++, opcode
++)
324 put_unaligned_le16(mgmt_commands
[i
], opcode
);
326 for (i
= 0; i
< num_events
; i
++, opcode
++)
327 put_unaligned_le16(mgmt_events
[i
], opcode
);
329 __le16
*opcode
= rp
->opcodes
;
331 for (i
= 0; i
< num_commands
; i
++, opcode
++)
332 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
334 for (i
= 0; i
< num_events
; i
++, opcode
++)
335 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
338 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
345 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
348 struct mgmt_rp_read_index_list
*rp
;
354 BT_DBG("sock %p", sk
);
356 read_lock(&hci_dev_list_lock
);
359 list_for_each_entry(d
, &hci_dev_list
, list
) {
360 if (d
->dev_type
== HCI_BREDR
&&
361 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
365 rp_len
= sizeof(*rp
) + (2 * count
);
366 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
368 read_unlock(&hci_dev_list_lock
);
373 list_for_each_entry(d
, &hci_dev_list
, list
) {
374 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
375 hci_dev_test_flag(d
, HCI_CONFIG
) ||
376 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
379 /* Devices marked as raw-only are neither configured
380 * nor unconfigured controllers.
382 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
385 if (d
->dev_type
== HCI_BREDR
&&
386 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
387 rp
->index
[count
++] = cpu_to_le16(d
->id
);
388 BT_DBG("Added hci%u", d
->id
);
392 rp
->num_controllers
= cpu_to_le16(count
);
393 rp_len
= sizeof(*rp
) + (2 * count
);
395 read_unlock(&hci_dev_list_lock
);
397 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
405 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
406 void *data
, u16 data_len
)
408 struct mgmt_rp_read_unconf_index_list
*rp
;
414 BT_DBG("sock %p", sk
);
416 read_lock(&hci_dev_list_lock
);
419 list_for_each_entry(d
, &hci_dev_list
, list
) {
420 if (d
->dev_type
== HCI_BREDR
&&
421 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
425 rp_len
= sizeof(*rp
) + (2 * count
);
426 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
428 read_unlock(&hci_dev_list_lock
);
433 list_for_each_entry(d
, &hci_dev_list
, list
) {
434 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
435 hci_dev_test_flag(d
, HCI_CONFIG
) ||
436 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
439 /* Devices marked as raw-only are neither configured
440 * nor unconfigured controllers.
442 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
445 if (d
->dev_type
== HCI_BREDR
&&
446 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
447 rp
->index
[count
++] = cpu_to_le16(d
->id
);
448 BT_DBG("Added hci%u", d
->id
);
452 rp
->num_controllers
= cpu_to_le16(count
);
453 rp_len
= sizeof(*rp
) + (2 * count
);
455 read_unlock(&hci_dev_list_lock
);
457 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
458 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
465 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
466 void *data
, u16 data_len
)
468 struct mgmt_rp_read_ext_index_list
*rp
;
474 BT_DBG("sock %p", sk
);
476 read_lock(&hci_dev_list_lock
);
479 list_for_each_entry(d
, &hci_dev_list
, list
) {
480 if (d
->dev_type
== HCI_BREDR
|| d
->dev_type
== HCI_AMP
)
484 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
485 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
487 read_unlock(&hci_dev_list_lock
);
492 list_for_each_entry(d
, &hci_dev_list
, list
) {
493 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
494 hci_dev_test_flag(d
, HCI_CONFIG
) ||
495 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
498 /* Devices marked as raw-only are neither configured
499 * nor unconfigured controllers.
501 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
504 if (d
->dev_type
== HCI_BREDR
) {
505 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
506 rp
->entry
[count
].type
= 0x01;
508 rp
->entry
[count
].type
= 0x00;
509 } else if (d
->dev_type
== HCI_AMP
) {
510 rp
->entry
[count
].type
= 0x02;
515 rp
->entry
[count
].bus
= d
->bus
;
516 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
517 BT_DBG("Added hci%u", d
->id
);
520 rp
->num_controllers
= cpu_to_le16(count
);
521 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
523 read_unlock(&hci_dev_list_lock
);
525 /* If this command is called at least once, then all the
526 * default index and unconfigured index events are disabled
527 * and from now on only extended index events are used.
529 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
530 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
531 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
533 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
534 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
, rp_len
);
541 static bool is_configured(struct hci_dev
*hdev
)
543 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
544 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
547 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
548 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
554 static __le32
get_missing_options(struct hci_dev
*hdev
)
558 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
559 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
560 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
562 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
563 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
564 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
566 return cpu_to_le32(options
);
569 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
571 __le32 options
= get_missing_options(hdev
);
573 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
574 sizeof(options
), skip
);
577 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
579 __le32 options
= get_missing_options(hdev
);
581 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
585 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
586 void *data
, u16 data_len
)
588 struct mgmt_rp_read_config_info rp
;
591 BT_DBG("sock %p %s", sk
, hdev
->name
);
595 memset(&rp
, 0, sizeof(rp
));
596 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
598 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
599 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
601 if (hdev
->set_bdaddr
)
602 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
604 rp
.supported_options
= cpu_to_le32(options
);
605 rp
.missing_options
= get_missing_options(hdev
);
607 hci_dev_unlock(hdev
);
609 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
613 static u32
get_supported_settings(struct hci_dev
*hdev
)
617 settings
|= MGMT_SETTING_POWERED
;
618 settings
|= MGMT_SETTING_BONDABLE
;
619 settings
|= MGMT_SETTING_DEBUG_KEYS
;
620 settings
|= MGMT_SETTING_CONNECTABLE
;
621 settings
|= MGMT_SETTING_DISCOVERABLE
;
623 if (lmp_bredr_capable(hdev
)) {
624 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
625 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
626 settings
|= MGMT_SETTING_BREDR
;
627 settings
|= MGMT_SETTING_LINK_SECURITY
;
629 if (lmp_ssp_capable(hdev
)) {
630 settings
|= MGMT_SETTING_SSP
;
631 settings
|= MGMT_SETTING_HS
;
634 if (lmp_sc_capable(hdev
))
635 settings
|= MGMT_SETTING_SECURE_CONN
;
638 if (lmp_le_capable(hdev
)) {
639 settings
|= MGMT_SETTING_LE
;
640 settings
|= MGMT_SETTING_ADVERTISING
;
641 settings
|= MGMT_SETTING_SECURE_CONN
;
642 settings
|= MGMT_SETTING_PRIVACY
;
643 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
646 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
648 settings
|= MGMT_SETTING_CONFIGURATION
;
653 static u32
get_current_settings(struct hci_dev
*hdev
)
657 if (hdev_is_powered(hdev
))
658 settings
|= MGMT_SETTING_POWERED
;
660 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
661 settings
|= MGMT_SETTING_CONNECTABLE
;
663 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
664 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
666 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
667 settings
|= MGMT_SETTING_DISCOVERABLE
;
669 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
670 settings
|= MGMT_SETTING_BONDABLE
;
672 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
673 settings
|= MGMT_SETTING_BREDR
;
675 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
676 settings
|= MGMT_SETTING_LE
;
678 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
679 settings
|= MGMT_SETTING_LINK_SECURITY
;
681 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
682 settings
|= MGMT_SETTING_SSP
;
684 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
685 settings
|= MGMT_SETTING_HS
;
687 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
688 settings
|= MGMT_SETTING_ADVERTISING
;
690 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
691 settings
|= MGMT_SETTING_SECURE_CONN
;
693 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
694 settings
|= MGMT_SETTING_DEBUG_KEYS
;
696 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
697 settings
|= MGMT_SETTING_PRIVACY
;
699 /* The current setting for static address has two purposes. The
700 * first is to indicate if the static address will be used and
701 * the second is to indicate if it is actually set.
703 * This means if the static address is not configured, this flag
704 * will never be set. If the address is configured, then if the
705 * address is actually used decides if the flag is set or not.
707 * For single mode LE only controllers and dual-mode controllers
708 * with BR/EDR disabled, the existence of the static address will
711 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
712 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
713 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
714 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
715 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
721 #define PNP_INFO_SVCLASS_ID 0x1200
723 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
725 u8
*ptr
= data
, *uuids_start
= NULL
;
726 struct bt_uuid
*uuid
;
731 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
734 if (uuid
->size
!= 16)
737 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
741 if (uuid16
== PNP_INFO_SVCLASS_ID
)
747 uuids_start
[1] = EIR_UUID16_ALL
;
751 /* Stop if not enough space to put next UUID */
752 if ((ptr
- data
) + sizeof(u16
) > len
) {
753 uuids_start
[1] = EIR_UUID16_SOME
;
757 *ptr
++ = (uuid16
& 0x00ff);
758 *ptr
++ = (uuid16
& 0xff00) >> 8;
759 uuids_start
[0] += sizeof(uuid16
);
765 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
767 u8
*ptr
= data
, *uuids_start
= NULL
;
768 struct bt_uuid
*uuid
;
773 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
774 if (uuid
->size
!= 32)
780 uuids_start
[1] = EIR_UUID32_ALL
;
784 /* Stop if not enough space to put next UUID */
785 if ((ptr
- data
) + sizeof(u32
) > len
) {
786 uuids_start
[1] = EIR_UUID32_SOME
;
790 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
792 uuids_start
[0] += sizeof(u32
);
798 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
800 u8
*ptr
= data
, *uuids_start
= NULL
;
801 struct bt_uuid
*uuid
;
806 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
807 if (uuid
->size
!= 128)
813 uuids_start
[1] = EIR_UUID128_ALL
;
817 /* Stop if not enough space to put next UUID */
818 if ((ptr
- data
) + 16 > len
) {
819 uuids_start
[1] = EIR_UUID128_SOME
;
823 memcpy(ptr
, uuid
->uuid
, 16);
825 uuids_start
[0] += 16;
831 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
833 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
836 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
837 struct hci_dev
*hdev
,
840 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
843 static u8
get_current_adv_instance(struct hci_dev
*hdev
)
845 /* The "Set Advertising" setting supersedes the "Add Advertising"
846 * setting. Here we set the advertising data based on which
847 * setting was set. When neither apply, default to the global settings,
848 * represented by instance "0".
850 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
851 !hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
852 return hdev
->cur_adv_instance
;
857 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
862 name_len
= strlen(hdev
->dev_name
);
864 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
866 if (name_len
> max_len
) {
868 ptr
[1] = EIR_NAME_SHORT
;
870 ptr
[1] = EIR_NAME_COMPLETE
;
872 ptr
[0] = name_len
+ 1;
874 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
876 ad_len
+= (name_len
+ 2);
877 ptr
+= (name_len
+ 2);
883 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
886 struct adv_info
*adv_instance
;
888 adv_instance
= hci_find_adv_instance(hdev
, instance
);
892 /* TODO: Set the appropriate entries based on advertising instance flags
893 * here once flags other than 0 are supported.
895 memcpy(ptr
, adv_instance
->scan_rsp_data
,
896 adv_instance
->scan_rsp_len
);
898 return adv_instance
->scan_rsp_len
;
901 static void update_inst_scan_rsp_data(struct hci_request
*req
, u8 instance
)
903 struct hci_dev
*hdev
= req
->hdev
;
904 struct hci_cp_le_set_scan_rsp_data cp
;
907 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
910 memset(&cp
, 0, sizeof(cp
));
913 len
= create_instance_scan_rsp_data(hdev
, instance
, cp
.data
);
915 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
917 if (hdev
->scan_rsp_data_len
== len
&&
918 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
921 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
922 hdev
->scan_rsp_data_len
= len
;
926 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
929 static void update_scan_rsp_data(struct hci_request
*req
)
931 update_inst_scan_rsp_data(req
, get_current_adv_instance(req
->hdev
));
934 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
936 struct mgmt_pending_cmd
*cmd
;
938 /* If there's a pending mgmt command the flags will not yet have
939 * their final values, so check for this first.
941 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
943 struct mgmt_mode
*cp
= cmd
->param
;
945 return LE_AD_GENERAL
;
946 else if (cp
->val
== 0x02)
947 return LE_AD_LIMITED
;
949 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
950 return LE_AD_LIMITED
;
951 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
952 return LE_AD_GENERAL
;
958 static bool get_connectable(struct hci_dev
*hdev
)
960 struct mgmt_pending_cmd
*cmd
;
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
965 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
967 struct mgmt_mode
*cp
= cmd
->param
;
972 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
975 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
978 struct adv_info
*adv_instance
;
980 if (instance
== 0x00) {
981 /* Instance 0 always manages the "Tx Power" and "Flags"
984 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
986 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
987 * corresponds to the "connectable" instance flag.
989 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
990 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
995 adv_instance
= hci_find_adv_instance(hdev
, instance
);
997 /* Return 0 when we got an invalid instance identifier. */
1001 return adv_instance
->flags
;
1004 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
1006 u8 instance
= get_current_adv_instance(hdev
);
1007 struct adv_info
*adv_instance
;
1009 /* Ignore instance 0 */
1010 if (instance
== 0x00)
1013 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1017 /* TODO: Take into account the "appearance" and "local-name" flags here.
1018 * These are currently being ignored as they are not supported.
1020 return adv_instance
->scan_rsp_len
;
1023 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1025 struct adv_info
*adv_instance
= NULL
;
1026 u8 ad_len
= 0, flags
= 0;
1029 /* Return 0 when the current instance identifier is invalid. */
1031 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1036 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1038 /* The Add Advertising command allows userspace to set both the general
1039 * and limited discoverable flags.
1041 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1042 flags
|= LE_AD_GENERAL
;
1044 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1045 flags
|= LE_AD_LIMITED
;
1047 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1048 /* If a discovery flag wasn't provided, simply use the global
1052 flags
|= get_adv_discov_flags(hdev
);
1054 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1055 flags
|= LE_AD_NO_BREDR
;
1057 /* If flags would still be empty, then there is no need to
1058 * include the "Flags" AD field".
1071 memcpy(ptr
, adv_instance
->adv_data
,
1072 adv_instance
->adv_data_len
);
1073 ad_len
+= adv_instance
->adv_data_len
;
1074 ptr
+= adv_instance
->adv_data_len
;
1077 /* Provide Tx Power only if we can provide a valid value for it */
1078 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
&&
1079 (instance_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
1081 ptr
[1] = EIR_TX_POWER
;
1082 ptr
[2] = (u8
)hdev
->adv_tx_power
;
1091 static void update_inst_adv_data(struct hci_request
*req
, u8 instance
)
1093 struct hci_dev
*hdev
= req
->hdev
;
1094 struct hci_cp_le_set_adv_data cp
;
1097 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1100 memset(&cp
, 0, sizeof(cp
));
1102 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1104 /* There's nothing to do if the data hasn't changed */
1105 if (hdev
->adv_data_len
== len
&&
1106 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1109 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1110 hdev
->adv_data_len
= len
;
1114 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1117 static void update_adv_data(struct hci_request
*req
)
1119 update_inst_adv_data(req
, get_current_adv_instance(req
->hdev
));
1122 int mgmt_update_adv_data(struct hci_dev
*hdev
)
1124 struct hci_request req
;
1126 hci_req_init(&req
, hdev
);
1127 update_adv_data(&req
);
1129 return hci_req_run(&req
, NULL
);
1132 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
1137 name_len
= strlen(hdev
->dev_name
);
1141 if (name_len
> 48) {
1143 ptr
[1] = EIR_NAME_SHORT
;
1145 ptr
[1] = EIR_NAME_COMPLETE
;
1147 /* EIR Data length */
1148 ptr
[0] = name_len
+ 1;
1150 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
1152 ptr
+= (name_len
+ 2);
1155 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
1157 ptr
[1] = EIR_TX_POWER
;
1158 ptr
[2] = (u8
) hdev
->inq_tx_power
;
1163 if (hdev
->devid_source
> 0) {
1165 ptr
[1] = EIR_DEVICE_ID
;
1167 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
1168 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
1169 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
1170 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
1175 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1176 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1177 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1180 static void update_eir(struct hci_request
*req
)
1182 struct hci_dev
*hdev
= req
->hdev
;
1183 struct hci_cp_write_eir cp
;
1185 if (!hdev_is_powered(hdev
))
1188 if (!lmp_ext_inq_capable(hdev
))
1191 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1194 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1197 memset(&cp
, 0, sizeof(cp
));
1199 create_eir(hdev
, cp
.data
);
1201 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
1204 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
1206 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1209 static u8
get_service_classes(struct hci_dev
*hdev
)
1211 struct bt_uuid
*uuid
;
1214 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1215 val
|= uuid
->svc_hint
;
1220 static void update_class(struct hci_request
*req
)
1222 struct hci_dev
*hdev
= req
->hdev
;
1225 BT_DBG("%s", hdev
->name
);
1227 if (!hdev_is_powered(hdev
))
1230 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1233 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1236 cod
[0] = hdev
->minor_class
;
1237 cod
[1] = hdev
->major_class
;
1238 cod
[2] = get_service_classes(hdev
);
1240 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
1243 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1246 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1249 static void disable_advertising(struct hci_request
*req
)
1253 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1256 static void enable_advertising(struct hci_request
*req
)
1258 struct hci_dev
*hdev
= req
->hdev
;
1259 struct hci_cp_le_set_adv_param cp
;
1260 u8 own_addr_type
, enable
= 0x01;
1265 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1268 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1269 disable_advertising(req
);
1271 /* Clear the HCI_LE_ADV bit temporarily so that the
1272 * hci_update_random_address knows that it's safe to go ahead
1273 * and write a new random address. The flag will be set back on
1274 * as soon as the SET_ADV_ENABLE HCI command completes.
1276 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1278 instance
= get_current_adv_instance(hdev
);
1279 flags
= get_adv_instance_flags(hdev
, instance
);
1281 /* If the "connectable" instance flag was not set, then choose between
1282 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1284 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1285 get_connectable(hdev
);
1287 /* Set require_privacy to true only when non-connectable
1288 * advertising is used. In that case it is fine to use a
1289 * non-resolvable private address.
1291 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1294 memset(&cp
, 0, sizeof(cp
));
1295 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1296 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1299 cp
.type
= LE_ADV_IND
;
1300 else if (get_cur_adv_instance_scan_rsp_len(hdev
))
1301 cp
.type
= LE_ADV_SCAN_IND
;
1303 cp
.type
= LE_ADV_NONCONN_IND
;
1305 cp
.own_address_type
= own_addr_type
;
1306 cp
.channel_map
= hdev
->le_adv_channel_map
;
1308 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1310 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1313 static void service_cache_off(struct work_struct
*work
)
1315 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1316 service_cache
.work
);
1317 struct hci_request req
;
1319 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1322 hci_req_init(&req
, hdev
);
1329 hci_dev_unlock(hdev
);
1331 hci_req_run(&req
, NULL
);
1334 static void rpa_expired(struct work_struct
*work
)
1336 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1338 struct hci_request req
;
1342 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1344 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1347 /* The generation of a new RPA and programming it into the
1348 * controller happens in the enable_advertising() function.
1350 hci_req_init(&req
, hdev
);
1351 enable_advertising(&req
);
1352 hci_req_run(&req
, NULL
);
1355 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1357 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
1360 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1361 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1363 /* Non-mgmt controlled devices get this bit set
1364 * implicitly so that pairing works for them, however
1365 * for mgmt we require user-space to explicitly enable
1368 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
1371 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1372 void *data
, u16 data_len
)
1374 struct mgmt_rp_read_info rp
;
1376 BT_DBG("sock %p %s", sk
, hdev
->name
);
1380 memset(&rp
, 0, sizeof(rp
));
1382 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1384 rp
.version
= hdev
->hci_ver
;
1385 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1387 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1388 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1390 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1392 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1393 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1395 hci_dev_unlock(hdev
);
1397 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1401 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1403 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1405 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1409 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1411 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1413 if (hci_conn_count(hdev
) == 0) {
1414 cancel_delayed_work(&hdev
->power_off
);
1415 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1419 static bool hci_stop_discovery(struct hci_request
*req
)
1421 struct hci_dev
*hdev
= req
->hdev
;
1422 struct hci_cp_remote_name_req_cancel cp
;
1423 struct inquiry_entry
*e
;
1425 switch (hdev
->discovery
.state
) {
1426 case DISCOVERY_FINDING
:
1427 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1428 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1430 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1431 cancel_delayed_work(&hdev
->le_scan_disable
);
1432 hci_req_add_le_scan_disable(req
);
1437 case DISCOVERY_RESOLVING
:
1438 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1443 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1444 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1450 /* Passive scanning */
1451 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1452 hci_req_add_le_scan_disable(req
);
1462 static void advertising_added(struct sock
*sk
, struct hci_dev
*hdev
,
1465 struct mgmt_ev_advertising_added ev
;
1467 ev
.instance
= instance
;
1469 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
1472 static void advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1475 struct mgmt_ev_advertising_removed ev
;
1477 ev
.instance
= instance
;
1479 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1482 static int schedule_adv_instance(struct hci_request
*req
, u8 instance
,
1484 struct hci_dev
*hdev
= req
->hdev
;
1485 struct adv_info
*adv_instance
= NULL
;
1488 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1489 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
1492 if (hdev
->adv_instance_timeout
)
1495 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1499 /* A zero timeout means unlimited advertising. As long as there is
1500 * only one instance, duration should be ignored. We still set a timeout
1501 * in case further instances are being added later on.
1503 * If the remaining lifetime of the instance is more than the duration
1504 * then the timeout corresponds to the duration, otherwise it will be
1505 * reduced to the remaining instance lifetime.
1507 if (adv_instance
->timeout
== 0 ||
1508 adv_instance
->duration
<= adv_instance
->remaining_time
)
1509 timeout
= adv_instance
->duration
;
1511 timeout
= adv_instance
->remaining_time
;
1513 /* The remaining time is being reduced unless the instance is being
1514 * advertised without time limit.
1516 if (adv_instance
->timeout
)
1517 adv_instance
->remaining_time
=
1518 adv_instance
->remaining_time
- timeout
;
1520 hdev
->adv_instance_timeout
= timeout
;
1521 queue_delayed_work(hdev
->workqueue
,
1522 &hdev
->adv_instance_expire
,
1523 msecs_to_jiffies(timeout
* 1000));
1525 /* If we're just re-scheduling the same instance again then do not
1526 * execute any HCI commands. This happens when a single instance is
1529 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1530 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1533 hdev
->cur_adv_instance
= instance
;
1534 update_adv_data(req
);
1535 update_scan_rsp_data(req
);
1536 enable_advertising(req
);
1541 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1543 if (hdev
->adv_instance_timeout
) {
1544 hdev
->adv_instance_timeout
= 0;
1545 cancel_delayed_work(&hdev
->adv_instance_expire
);
1549 /* For a single instance:
1550 * - force == true: The instance will be removed even when its remaining
1551 * lifetime is not zero.
1552 * - force == false: the instance will be deactivated but kept stored unless
1553 * the remaining lifetime is zero.
1555 * For instance == 0x00:
1556 * - force == true: All instances will be removed regardless of their timeout
1558 * - force == false: Only instances that have a timeout will be removed.
1560 static void clear_adv_instance(struct hci_dev
*hdev
, struct hci_request
*req
,
1561 u8 instance
, bool force
)
1563 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1567 /* Cancel any timeout concerning the removed instance(s). */
1568 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1569 cancel_adv_timeout(hdev
);
1571 /* Get the next instance to advertise BEFORE we remove
1572 * the current one. This can be the same instance again
1573 * if there is only one instance.
1575 if (instance
&& hdev
->cur_adv_instance
== instance
)
1576 next_instance
= hci_get_next_instance(hdev
, instance
);
1578 if (instance
== 0x00) {
1579 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1581 if (!(force
|| adv_instance
->timeout
))
1584 rem_inst
= adv_instance
->instance
;
1585 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1587 advertising_removed(NULL
, hdev
, rem_inst
);
1589 hdev
->cur_adv_instance
= 0x00;
1591 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1593 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1594 !adv_instance
->remaining_time
)) {
1595 /* Don't advertise a removed instance. */
1596 if (next_instance
&&
1597 next_instance
->instance
== instance
)
1598 next_instance
= NULL
;
1600 err
= hci_remove_adv_instance(hdev
, instance
);
1602 advertising_removed(NULL
, hdev
, instance
);
1606 if (list_empty(&hdev
->adv_instances
)) {
1607 hdev
->cur_adv_instance
= 0x00;
1608 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
1611 if (!req
|| !hdev_is_powered(hdev
) ||
1612 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1616 schedule_adv_instance(req
, next_instance
->instance
, false);
1619 static int clean_up_hci_state(struct hci_dev
*hdev
)
1621 struct hci_request req
;
1622 struct hci_conn
*conn
;
1623 bool discov_stopped
;
1626 hci_req_init(&req
, hdev
);
1628 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1629 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1631 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1634 clear_adv_instance(hdev
, NULL
, 0x00, false);
1636 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1637 disable_advertising(&req
);
1639 discov_stopped
= hci_stop_discovery(&req
);
1641 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1642 /* 0x15 == Terminated due to Power Off */
1643 __hci_abort_conn(&req
, conn
, 0x15);
1646 err
= hci_req_run(&req
, clean_up_hci_complete
);
1647 if (!err
&& discov_stopped
)
1648 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1653 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1656 struct mgmt_mode
*cp
= data
;
1657 struct mgmt_pending_cmd
*cmd
;
1660 BT_DBG("request for %s", hdev
->name
);
1662 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1663 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1664 MGMT_STATUS_INVALID_PARAMS
);
1668 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1669 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1674 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
1675 cancel_delayed_work(&hdev
->power_off
);
1678 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1680 err
= mgmt_powered(hdev
, 1);
1685 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1686 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1690 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1697 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1700 /* Disconnect connections, stop scans, etc */
1701 err
= clean_up_hci_state(hdev
);
1703 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1704 HCI_POWER_OFF_TIMEOUT
);
1706 /* ENODATA means there were no HCI commands queued */
1707 if (err
== -ENODATA
) {
1708 cancel_delayed_work(&hdev
->power_off
);
1709 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1715 hci_dev_unlock(hdev
);
1719 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1721 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1723 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1727 int mgmt_new_settings(struct hci_dev
*hdev
)
1729 return new_settings(hdev
, NULL
);
1734 struct hci_dev
*hdev
;
1738 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1740 struct cmd_lookup
*match
= data
;
1742 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1744 list_del(&cmd
->list
);
1746 if (match
->sk
== NULL
) {
1747 match
->sk
= cmd
->sk
;
1748 sock_hold(match
->sk
);
1751 mgmt_pending_free(cmd
);
1754 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1758 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1759 mgmt_pending_remove(cmd
);
1762 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1764 if (cmd
->cmd_complete
) {
1767 cmd
->cmd_complete(cmd
, *status
);
1768 mgmt_pending_remove(cmd
);
1773 cmd_status_rsp(cmd
, data
);
1776 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1778 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1779 cmd
->param
, cmd
->param_len
);
1782 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1784 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1785 cmd
->param
, sizeof(struct mgmt_addr_info
));
1788 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1790 if (!lmp_bredr_capable(hdev
))
1791 return MGMT_STATUS_NOT_SUPPORTED
;
1792 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1793 return MGMT_STATUS_REJECTED
;
1795 return MGMT_STATUS_SUCCESS
;
1798 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1800 if (!lmp_le_capable(hdev
))
1801 return MGMT_STATUS_NOT_SUPPORTED
;
1802 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1803 return MGMT_STATUS_REJECTED
;
1805 return MGMT_STATUS_SUCCESS
;
1808 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
,
1811 struct mgmt_pending_cmd
*cmd
;
1812 struct mgmt_mode
*cp
;
1813 struct hci_request req
;
1816 BT_DBG("status 0x%02x", status
);
1820 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1825 u8 mgmt_err
= mgmt_status(status
);
1826 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1827 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1833 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_DISCOVERABLE
);
1835 if (hdev
->discov_timeout
> 0) {
1836 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1837 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1841 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_DISCOVERABLE
);
1844 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1847 new_settings(hdev
, cmd
->sk
);
1849 /* When the discoverable mode gets changed, make sure
1850 * that class of device has the limited discoverable
1851 * bit correctly set. Also update page scan based on whitelist
1854 hci_req_init(&req
, hdev
);
1855 __hci_update_page_scan(&req
);
1857 hci_req_run(&req
, NULL
);
1860 mgmt_pending_remove(cmd
);
1863 hci_dev_unlock(hdev
);
1866 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1869 struct mgmt_cp_set_discoverable
*cp
= data
;
1870 struct mgmt_pending_cmd
*cmd
;
1871 struct hci_request req
;
1876 BT_DBG("request for %s", hdev
->name
);
1878 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1879 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1880 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1881 MGMT_STATUS_REJECTED
);
1883 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1884 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1885 MGMT_STATUS_INVALID_PARAMS
);
1887 timeout
= __le16_to_cpu(cp
->timeout
);
1889 /* Disabling discoverable requires that no timeout is set,
1890 * and enabling limited discoverable requires a timeout.
1892 if ((cp
->val
== 0x00 && timeout
> 0) ||
1893 (cp
->val
== 0x02 && timeout
== 0))
1894 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1895 MGMT_STATUS_INVALID_PARAMS
);
1899 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1900 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1901 MGMT_STATUS_NOT_POWERED
);
1905 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1906 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1907 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1912 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1913 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1914 MGMT_STATUS_REJECTED
);
1918 if (!hdev_is_powered(hdev
)) {
1919 bool changed
= false;
1921 /* Setting limited discoverable when powered off is
1922 * not a valid operation since it requires a timeout
1923 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1925 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1926 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1930 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1935 err
= new_settings(hdev
, sk
);
1940 /* If the current mode is the same, then just update the timeout
1941 * value with the new value. And if only the timeout gets updated,
1942 * then no need for any HCI transactions.
1944 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1945 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1946 HCI_LIMITED_DISCOVERABLE
)) {
1947 cancel_delayed_work(&hdev
->discov_off
);
1948 hdev
->discov_timeout
= timeout
;
1950 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1951 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1952 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1956 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1960 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1966 /* Cancel any potential discoverable timeout that might be
1967 * still active and store new timeout value. The arming of
1968 * the timeout happens in the complete handler.
1970 cancel_delayed_work(&hdev
->discov_off
);
1971 hdev
->discov_timeout
= timeout
;
1973 /* Limited discoverable mode */
1974 if (cp
->val
== 0x02)
1975 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1977 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1979 hci_req_init(&req
, hdev
);
1981 /* The procedure for LE-only controllers is much simpler - just
1982 * update the advertising data.
1984 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1990 struct hci_cp_write_current_iac_lap hci_cp
;
1992 if (cp
->val
== 0x02) {
1993 /* Limited discoverable mode */
1994 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1995 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1996 hci_cp
.iac_lap
[1] = 0x8b;
1997 hci_cp
.iac_lap
[2] = 0x9e;
1998 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1999 hci_cp
.iac_lap
[4] = 0x8b;
2000 hci_cp
.iac_lap
[5] = 0x9e;
2002 /* General discoverable mode */
2004 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
2005 hci_cp
.iac_lap
[1] = 0x8b;
2006 hci_cp
.iac_lap
[2] = 0x9e;
2009 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
2010 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
2012 scan
|= SCAN_INQUIRY
;
2014 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2017 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
2020 update_adv_data(&req
);
2022 err
= hci_req_run(&req
, set_discoverable_complete
);
2024 mgmt_pending_remove(cmd
);
2027 hci_dev_unlock(hdev
);
2031 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
2033 struct hci_dev
*hdev
= req
->hdev
;
2034 struct hci_cp_write_page_scan_activity acp
;
2037 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2040 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
2044 type
= PAGE_SCAN_TYPE_INTERLACED
;
2046 /* 160 msec page scan interval */
2047 acp
.interval
= cpu_to_le16(0x0100);
2049 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
2051 /* default 1.28 sec page scan */
2052 acp
.interval
= cpu_to_le16(0x0800);
2055 acp
.window
= cpu_to_le16(0x0012);
2057 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
2058 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
2059 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
2062 if (hdev
->page_scan_type
!= type
)
2063 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
2066 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
,
2069 struct mgmt_pending_cmd
*cmd
;
2070 struct mgmt_mode
*cp
;
2071 bool conn_changed
, discov_changed
;
2073 BT_DBG("status 0x%02x", status
);
2077 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
2082 u8 mgmt_err
= mgmt_status(status
);
2083 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
2089 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
2091 discov_changed
= false;
2093 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
2095 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
2099 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2101 if (conn_changed
|| discov_changed
) {
2102 new_settings(hdev
, cmd
->sk
);
2103 hci_update_page_scan(hdev
);
2105 mgmt_update_adv_data(hdev
);
2106 hci_update_background_scan(hdev
);
2110 mgmt_pending_remove(cmd
);
2113 hci_dev_unlock(hdev
);
2116 static int set_connectable_update_settings(struct hci_dev
*hdev
,
2117 struct sock
*sk
, u8 val
)
2119 bool changed
= false;
2122 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
2126 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
2128 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
2129 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2132 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2137 hci_update_page_scan(hdev
);
2138 hci_update_background_scan(hdev
);
2139 return new_settings(hdev
, sk
);
2145 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2148 struct mgmt_mode
*cp
= data
;
2149 struct mgmt_pending_cmd
*cmd
;
2150 struct hci_request req
;
2154 BT_DBG("request for %s", hdev
->name
);
2156 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2157 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2158 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2159 MGMT_STATUS_REJECTED
);
2161 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2162 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2163 MGMT_STATUS_INVALID_PARAMS
);
2167 if (!hdev_is_powered(hdev
)) {
2168 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
2172 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
2173 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
2174 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2179 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
2185 hci_req_init(&req
, hdev
);
2187 /* If BR/EDR is not enabled and we disable advertising as a
2188 * by-product of disabling connectable, we need to update the
2189 * advertising flags.
2191 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2193 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2194 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2196 update_adv_data(&req
);
2197 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
2201 /* If we don't have any whitelist entries just
2202 * disable all scanning. If there are entries
2203 * and we had both page and inquiry scanning
2204 * enabled then fall back to only page scanning.
2205 * Otherwise no changes are needed.
2207 if (list_empty(&hdev
->whitelist
))
2208 scan
= SCAN_DISABLED
;
2209 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
2212 goto no_scan_update
;
2214 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
2215 hdev
->discov_timeout
> 0)
2216 cancel_delayed_work(&hdev
->discov_off
);
2219 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
2223 /* Update the advertising parameters if necessary */
2224 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2225 hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
2226 enable_advertising(&req
);
2228 err
= hci_req_run(&req
, set_connectable_complete
);
2230 mgmt_pending_remove(cmd
);
2231 if (err
== -ENODATA
)
2232 err
= set_connectable_update_settings(hdev
, sk
,
2238 hci_dev_unlock(hdev
);
2242 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2245 struct mgmt_mode
*cp
= data
;
2249 BT_DBG("request for %s", hdev
->name
);
2251 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2252 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
2253 MGMT_STATUS_INVALID_PARAMS
);
2258 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
2260 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
2262 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
2267 err
= new_settings(hdev
, sk
);
2270 hci_dev_unlock(hdev
);
2274 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2277 struct mgmt_mode
*cp
= data
;
2278 struct mgmt_pending_cmd
*cmd
;
2282 BT_DBG("request for %s", hdev
->name
);
2284 status
= mgmt_bredr_support(hdev
);
2286 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2289 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2290 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2291 MGMT_STATUS_INVALID_PARAMS
);
2295 if (!hdev_is_powered(hdev
)) {
2296 bool changed
= false;
2298 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
2299 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
2303 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2308 err
= new_settings(hdev
, sk
);
2313 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2314 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2321 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2322 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2326 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2332 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2334 mgmt_pending_remove(cmd
);
2339 hci_dev_unlock(hdev
);
2343 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2345 struct mgmt_mode
*cp
= data
;
2346 struct mgmt_pending_cmd
*cmd
;
2350 BT_DBG("request for %s", hdev
->name
);
2352 status
= mgmt_bredr_support(hdev
);
2354 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2356 if (!lmp_ssp_capable(hdev
))
2357 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2358 MGMT_STATUS_NOT_SUPPORTED
);
2360 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2361 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2362 MGMT_STATUS_INVALID_PARAMS
);
2366 if (!hdev_is_powered(hdev
)) {
2370 changed
= !hci_dev_test_and_set_flag(hdev
,
2373 changed
= hci_dev_test_and_clear_flag(hdev
,
2376 changed
= hci_dev_test_and_clear_flag(hdev
,
2379 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
2382 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2387 err
= new_settings(hdev
, sk
);
2392 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2393 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2398 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
2399 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2403 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2409 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
2410 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2411 sizeof(cp
->val
), &cp
->val
);
2413 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2415 mgmt_pending_remove(cmd
);
2420 hci_dev_unlock(hdev
);
2424 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2426 struct mgmt_mode
*cp
= data
;
2431 BT_DBG("request for %s", hdev
->name
);
2433 status
= mgmt_bredr_support(hdev
);
2435 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2437 if (!lmp_ssp_capable(hdev
))
2438 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2439 MGMT_STATUS_NOT_SUPPORTED
);
2441 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
2442 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2443 MGMT_STATUS_REJECTED
);
2445 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2446 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2447 MGMT_STATUS_INVALID_PARAMS
);
2451 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2452 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2458 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
2460 if (hdev_is_powered(hdev
)) {
2461 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2462 MGMT_STATUS_REJECTED
);
2466 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
2469 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2474 err
= new_settings(hdev
, sk
);
2477 hci_dev_unlock(hdev
);
2481 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2483 struct cmd_lookup match
= { NULL
, hdev
};
2488 u8 mgmt_err
= mgmt_status(status
);
2490 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2495 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2497 new_settings(hdev
, match
.sk
);
2502 /* Make sure the controller has a good default for
2503 * advertising data. Restrict the update to when LE
2504 * has actually been enabled. During power on, the
2505 * update in powered_update_hci will take care of it.
2507 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2508 struct hci_request req
;
2510 hci_req_init(&req
, hdev
);
2511 update_adv_data(&req
);
2512 update_scan_rsp_data(&req
);
2513 __hci_update_background_scan(&req
);
2514 hci_req_run(&req
, NULL
);
2518 hci_dev_unlock(hdev
);
2521 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2523 struct mgmt_mode
*cp
= data
;
2524 struct hci_cp_write_le_host_supported hci_cp
;
2525 struct mgmt_pending_cmd
*cmd
;
2526 struct hci_request req
;
2530 BT_DBG("request for %s", hdev
->name
);
2532 if (!lmp_le_capable(hdev
))
2533 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2534 MGMT_STATUS_NOT_SUPPORTED
);
2536 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2537 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2538 MGMT_STATUS_INVALID_PARAMS
);
2540 /* Bluetooth single mode LE only controllers or dual-mode
2541 * controllers configured as LE only devices, do not allow
2542 * switching LE off. These have either LE enabled explicitly
2543 * or BR/EDR has been previously switched off.
2545 * When trying to enable an already enabled LE, then gracefully
2546 * send a positive response. Trying to disable it however will
2547 * result into rejection.
2549 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2550 if (cp
->val
== 0x01)
2551 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2553 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2554 MGMT_STATUS_REJECTED
);
2560 enabled
= lmp_host_le_capable(hdev
);
2563 clear_adv_instance(hdev
, NULL
, 0x00, true);
2565 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2566 bool changed
= false;
2568 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2569 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
2573 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2574 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
2578 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2583 err
= new_settings(hdev
, sk
);
2588 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
2589 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2590 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2595 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2601 hci_req_init(&req
, hdev
);
2603 memset(&hci_cp
, 0, sizeof(hci_cp
));
2607 hci_cp
.simul
= 0x00;
2609 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
2610 disable_advertising(&req
);
2613 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2616 err
= hci_req_run(&req
, le_enable_complete
);
2618 mgmt_pending_remove(cmd
);
2621 hci_dev_unlock(hdev
);
2625 /* This is a helper function to test for pending mgmt commands that can
2626 * cause CoD or EIR HCI commands. We can only allow one such pending
2627 * mgmt command at a time since otherwise we cannot easily track what
2628 * the current values are, will be, and based on that calculate if a new
2629 * HCI command needs to be sent and if yes with what value.
2631 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2633 struct mgmt_pending_cmd
*cmd
;
2635 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2636 switch (cmd
->opcode
) {
2637 case MGMT_OP_ADD_UUID
:
2638 case MGMT_OP_REMOVE_UUID
:
2639 case MGMT_OP_SET_DEV_CLASS
:
2640 case MGMT_OP_SET_POWERED
:
2648 static const u8 bluetooth_base_uuid
[] = {
2649 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2650 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2653 static u8
get_uuid_size(const u8
*uuid
)
2657 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2660 val
= get_unaligned_le32(&uuid
[12]);
2667 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2669 struct mgmt_pending_cmd
*cmd
;
2673 cmd
= pending_find(mgmt_op
, hdev
);
2677 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
2678 mgmt_status(status
), hdev
->dev_class
, 3);
2680 mgmt_pending_remove(cmd
);
2683 hci_dev_unlock(hdev
);
2686 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2688 BT_DBG("status 0x%02x", status
);
2690 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2693 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2695 struct mgmt_cp_add_uuid
*cp
= data
;
2696 struct mgmt_pending_cmd
*cmd
;
2697 struct hci_request req
;
2698 struct bt_uuid
*uuid
;
2701 BT_DBG("request for %s", hdev
->name
);
2705 if (pending_eir_or_class(hdev
)) {
2706 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2711 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2717 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2718 uuid
->svc_hint
= cp
->svc_hint
;
2719 uuid
->size
= get_uuid_size(cp
->uuid
);
2721 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2723 hci_req_init(&req
, hdev
);
2728 err
= hci_req_run(&req
, add_uuid_complete
);
2730 if (err
!= -ENODATA
)
2733 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2734 hdev
->dev_class
, 3);
2738 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2747 hci_dev_unlock(hdev
);
2751 static bool enable_service_cache(struct hci_dev
*hdev
)
2753 if (!hdev_is_powered(hdev
))
2756 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
2757 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2765 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2767 BT_DBG("status 0x%02x", status
);
2769 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2772 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2775 struct mgmt_cp_remove_uuid
*cp
= data
;
2776 struct mgmt_pending_cmd
*cmd
;
2777 struct bt_uuid
*match
, *tmp
;
2778 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2779 struct hci_request req
;
2782 BT_DBG("request for %s", hdev
->name
);
2786 if (pending_eir_or_class(hdev
)) {
2787 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2792 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2793 hci_uuids_clear(hdev
);
2795 if (enable_service_cache(hdev
)) {
2796 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2797 MGMT_OP_REMOVE_UUID
,
2798 0, hdev
->dev_class
, 3);
2807 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2808 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2811 list_del(&match
->list
);
2817 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2818 MGMT_STATUS_INVALID_PARAMS
);
2823 hci_req_init(&req
, hdev
);
2828 err
= hci_req_run(&req
, remove_uuid_complete
);
2830 if (err
!= -ENODATA
)
2833 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2834 hdev
->dev_class
, 3);
2838 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2847 hci_dev_unlock(hdev
);
2851 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2853 BT_DBG("status 0x%02x", status
);
2855 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2858 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2861 struct mgmt_cp_set_dev_class
*cp
= data
;
2862 struct mgmt_pending_cmd
*cmd
;
2863 struct hci_request req
;
2866 BT_DBG("request for %s", hdev
->name
);
2868 if (!lmp_bredr_capable(hdev
))
2869 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2870 MGMT_STATUS_NOT_SUPPORTED
);
2874 if (pending_eir_or_class(hdev
)) {
2875 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2880 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2881 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2882 MGMT_STATUS_INVALID_PARAMS
);
2886 hdev
->major_class
= cp
->major
;
2887 hdev
->minor_class
= cp
->minor
;
2889 if (!hdev_is_powered(hdev
)) {
2890 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2891 hdev
->dev_class
, 3);
2895 hci_req_init(&req
, hdev
);
2897 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2898 hci_dev_unlock(hdev
);
2899 cancel_delayed_work_sync(&hdev
->service_cache
);
2906 err
= hci_req_run(&req
, set_class_complete
);
2908 if (err
!= -ENODATA
)
2911 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2912 hdev
->dev_class
, 3);
2916 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2925 hci_dev_unlock(hdev
);
2929 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2932 struct mgmt_cp_load_link_keys
*cp
= data
;
2933 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2934 sizeof(struct mgmt_link_key_info
));
2935 u16 key_count
, expected_len
;
2939 BT_DBG("request for %s", hdev
->name
);
2941 if (!lmp_bredr_capable(hdev
))
2942 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2943 MGMT_STATUS_NOT_SUPPORTED
);
2945 key_count
= __le16_to_cpu(cp
->key_count
);
2946 if (key_count
> max_key_count
) {
2947 BT_ERR("load_link_keys: too big key_count value %u",
2949 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2950 MGMT_STATUS_INVALID_PARAMS
);
2953 expected_len
= sizeof(*cp
) + key_count
*
2954 sizeof(struct mgmt_link_key_info
);
2955 if (expected_len
!= len
) {
2956 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2958 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2959 MGMT_STATUS_INVALID_PARAMS
);
2962 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2963 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2964 MGMT_STATUS_INVALID_PARAMS
);
2966 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2969 for (i
= 0; i
< key_count
; i
++) {
2970 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2972 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2973 return mgmt_cmd_status(sk
, hdev
->id
,
2974 MGMT_OP_LOAD_LINK_KEYS
,
2975 MGMT_STATUS_INVALID_PARAMS
);
2980 hci_link_keys_clear(hdev
);
2983 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2985 changed
= hci_dev_test_and_clear_flag(hdev
,
2986 HCI_KEEP_DEBUG_KEYS
);
2989 new_settings(hdev
, NULL
);
2991 for (i
= 0; i
< key_count
; i
++) {
2992 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2994 /* Always ignore debug keys and require a new pairing if
2995 * the user wants to use them.
2997 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
3000 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
3001 key
->type
, key
->pin_len
, NULL
);
3004 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
3006 hci_dev_unlock(hdev
);
3011 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3012 u8 addr_type
, struct sock
*skip_sk
)
3014 struct mgmt_ev_device_unpaired ev
;
3016 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
3017 ev
.addr
.type
= addr_type
;
3019 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
3023 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3026 struct mgmt_cp_unpair_device
*cp
= data
;
3027 struct mgmt_rp_unpair_device rp
;
3028 struct hci_conn_params
*params
;
3029 struct mgmt_pending_cmd
*cmd
;
3030 struct hci_conn
*conn
;
3034 memset(&rp
, 0, sizeof(rp
));
3035 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3036 rp
.addr
.type
= cp
->addr
.type
;
3038 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3039 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3040 MGMT_STATUS_INVALID_PARAMS
,
3043 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
3044 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3045 MGMT_STATUS_INVALID_PARAMS
,
3050 if (!hdev_is_powered(hdev
)) {
3051 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3052 MGMT_STATUS_NOT_POWERED
, &rp
,
3057 if (cp
->addr
.type
== BDADDR_BREDR
) {
3058 /* If disconnection is requested, then look up the
3059 * connection. If the remote device is connected, it
3060 * will be later used to terminate the link.
3062 * Setting it to NULL explicitly will cause no
3063 * termination of the link.
3066 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3071 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
3073 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3074 MGMT_OP_UNPAIR_DEVICE
,
3075 MGMT_STATUS_NOT_PAIRED
, &rp
,
3083 /* LE address type */
3084 addr_type
= le_addr_type(cp
->addr
.type
);
3086 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3088 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3090 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3091 MGMT_STATUS_NOT_PAIRED
, &rp
,
3096 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3098 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3102 /* Abort any ongoing SMP pairing */
3103 smp_cancel_pairing(conn
);
3105 /* Defer clearing up the connection parameters until closing to
3106 * give a chance of keeping them if a repairing happens.
3108 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3110 /* Disable auto-connection parameters if present */
3111 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3113 if (params
->explicit_connect
)
3114 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
3116 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3119 /* If disconnection is not requested, then clear the connection
3120 * variable so that the link is not terminated.
3122 if (!cp
->disconnect
)
3126 /* If the connection variable is set, then termination of the
3127 * link is requested.
3130 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
3132 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
3136 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
3143 cmd
->cmd_complete
= addr_cmd_complete
;
3145 err
= hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3147 mgmt_pending_remove(cmd
);
3150 hci_dev_unlock(hdev
);
3154 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3157 struct mgmt_cp_disconnect
*cp
= data
;
3158 struct mgmt_rp_disconnect rp
;
3159 struct mgmt_pending_cmd
*cmd
;
3160 struct hci_conn
*conn
;
3165 memset(&rp
, 0, sizeof(rp
));
3166 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3167 rp
.addr
.type
= cp
->addr
.type
;
3169 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3170 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3171 MGMT_STATUS_INVALID_PARAMS
,
3176 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
3177 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3178 MGMT_STATUS_NOT_POWERED
, &rp
,
3183 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
3184 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3185 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3189 if (cp
->addr
.type
== BDADDR_BREDR
)
3190 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3193 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
3194 le_addr_type(cp
->addr
.type
));
3196 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
3197 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3198 MGMT_STATUS_NOT_CONNECTED
, &rp
,
3203 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
3209 cmd
->cmd_complete
= generic_cmd_complete
;
3211 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3213 mgmt_pending_remove(cmd
);
3216 hci_dev_unlock(hdev
);
3220 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
3222 switch (link_type
) {
3224 switch (addr_type
) {
3225 case ADDR_LE_DEV_PUBLIC
:
3226 return BDADDR_LE_PUBLIC
;
3229 /* Fallback to LE Random address type */
3230 return BDADDR_LE_RANDOM
;
3234 /* Fallback to BR/EDR type */
3235 return BDADDR_BREDR
;
3239 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3242 struct mgmt_rp_get_connections
*rp
;
3252 if (!hdev_is_powered(hdev
)) {
3253 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
3254 MGMT_STATUS_NOT_POWERED
);
3259 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3260 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3264 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3265 rp
= kmalloc(rp_len
, GFP_KERNEL
);
3272 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3273 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3275 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
3276 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
3277 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
3282 rp
->conn_count
= cpu_to_le16(i
);
3284 /* Recalculate length in case of filtered SCO connections, etc */
3285 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3287 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
3293 hci_dev_unlock(hdev
);
3297 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3298 struct mgmt_cp_pin_code_neg_reply
*cp
)
3300 struct mgmt_pending_cmd
*cmd
;
3303 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
3308 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
3309 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
3311 mgmt_pending_remove(cmd
);
3316 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3319 struct hci_conn
*conn
;
3320 struct mgmt_cp_pin_code_reply
*cp
= data
;
3321 struct hci_cp_pin_code_reply reply
;
3322 struct mgmt_pending_cmd
*cmd
;
3329 if (!hdev_is_powered(hdev
)) {
3330 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3331 MGMT_STATUS_NOT_POWERED
);
3335 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3337 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3338 MGMT_STATUS_NOT_CONNECTED
);
3342 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3343 struct mgmt_cp_pin_code_neg_reply ncp
;
3345 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3347 BT_ERR("PIN code is not 16 bytes long");
3349 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3351 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3352 MGMT_STATUS_INVALID_PARAMS
);
3357 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3363 cmd
->cmd_complete
= addr_cmd_complete
;
3365 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3366 reply
.pin_len
= cp
->pin_len
;
3367 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3369 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3371 mgmt_pending_remove(cmd
);
3374 hci_dev_unlock(hdev
);
3378 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3381 struct mgmt_cp_set_io_capability
*cp
= data
;
3385 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3386 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3387 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3391 hdev
->io_capability
= cp
->io_capability
;
3393 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3394 hdev
->io_capability
);
3396 hci_dev_unlock(hdev
);
3398 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
3402 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
3404 struct hci_dev
*hdev
= conn
->hdev
;
3405 struct mgmt_pending_cmd
*cmd
;
3407 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3408 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3411 if (cmd
->user_data
!= conn
)
3420 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
3422 struct mgmt_rp_pair_device rp
;
3423 struct hci_conn
*conn
= cmd
->user_data
;
3426 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3427 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3429 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
3430 status
, &rp
, sizeof(rp
));
3432 /* So we don't get further callbacks for this connection */
3433 conn
->connect_cfm_cb
= NULL
;
3434 conn
->security_cfm_cb
= NULL
;
3435 conn
->disconn_cfm_cb
= NULL
;
3437 hci_conn_drop(conn
);
3439 /* The device is paired so there is no need to remove
3440 * its connection parameters anymore.
3442 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3449 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3451 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3452 struct mgmt_pending_cmd
*cmd
;
3454 cmd
= find_pairing(conn
);
3456 cmd
->cmd_complete(cmd
, status
);
3457 mgmt_pending_remove(cmd
);
3461 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3463 struct mgmt_pending_cmd
*cmd
;
3465 BT_DBG("status %u", status
);
3467 cmd
= find_pairing(conn
);
3469 BT_DBG("Unable to find a pending command");
3473 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3474 mgmt_pending_remove(cmd
);
3477 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3479 struct mgmt_pending_cmd
*cmd
;
3481 BT_DBG("status %u", status
);
3486 cmd
= find_pairing(conn
);
3488 BT_DBG("Unable to find a pending command");
3492 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3493 mgmt_pending_remove(cmd
);
3496 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3499 struct mgmt_cp_pair_device
*cp
= data
;
3500 struct mgmt_rp_pair_device rp
;
3501 struct mgmt_pending_cmd
*cmd
;
3502 u8 sec_level
, auth_type
;
3503 struct hci_conn
*conn
;
3508 memset(&rp
, 0, sizeof(rp
));
3509 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3510 rp
.addr
.type
= cp
->addr
.type
;
3512 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3513 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3514 MGMT_STATUS_INVALID_PARAMS
,
3517 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3518 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3519 MGMT_STATUS_INVALID_PARAMS
,
3524 if (!hdev_is_powered(hdev
)) {
3525 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3526 MGMT_STATUS_NOT_POWERED
, &rp
,
3531 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
3532 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3533 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
3538 sec_level
= BT_SECURITY_MEDIUM
;
3539 auth_type
= HCI_AT_DEDICATED_BONDING
;
3541 if (cp
->addr
.type
== BDADDR_BREDR
) {
3542 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3545 u8 addr_type
= le_addr_type(cp
->addr
.type
);
3546 struct hci_conn_params
*p
;
3548 /* When pairing a new device, it is expected to remember
3549 * this device for future connections. Adding the connection
3550 * parameter information ahead of time allows tracking
3551 * of the slave preferred values and will speed up any
3552 * further connection establishment.
3554 * If connection parameters already exist, then they
3555 * will be kept and this function does nothing.
3557 p
= hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3559 if (p
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
)
3560 p
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3562 conn
= hci_connect_le_scan(hdev
, &cp
->addr
.bdaddr
,
3563 addr_type
, sec_level
,
3564 HCI_LE_CONN_TIMEOUT
,
3571 if (PTR_ERR(conn
) == -EBUSY
)
3572 status
= MGMT_STATUS_BUSY
;
3573 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
3574 status
= MGMT_STATUS_NOT_SUPPORTED
;
3575 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
3576 status
= MGMT_STATUS_REJECTED
;
3578 status
= MGMT_STATUS_CONNECT_FAILED
;
3580 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3581 status
, &rp
, sizeof(rp
));
3585 if (conn
->connect_cfm_cb
) {
3586 hci_conn_drop(conn
);
3587 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3588 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3592 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3595 hci_conn_drop(conn
);
3599 cmd
->cmd_complete
= pairing_complete
;
3601 /* For LE, just connecting isn't a proof that the pairing finished */
3602 if (cp
->addr
.type
== BDADDR_BREDR
) {
3603 conn
->connect_cfm_cb
= pairing_complete_cb
;
3604 conn
->security_cfm_cb
= pairing_complete_cb
;
3605 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3607 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3608 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3609 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3612 conn
->io_capability
= cp
->io_cap
;
3613 cmd
->user_data
= hci_conn_get(conn
);
3615 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3616 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3617 cmd
->cmd_complete(cmd
, 0);
3618 mgmt_pending_remove(cmd
);
3624 hci_dev_unlock(hdev
);
3628 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3631 struct mgmt_addr_info
*addr
= data
;
3632 struct mgmt_pending_cmd
*cmd
;
3633 struct hci_conn
*conn
;
3640 if (!hdev_is_powered(hdev
)) {
3641 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3642 MGMT_STATUS_NOT_POWERED
);
3646 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3648 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3649 MGMT_STATUS_INVALID_PARAMS
);
3653 conn
= cmd
->user_data
;
3655 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3656 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3657 MGMT_STATUS_INVALID_PARAMS
);
3661 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3662 mgmt_pending_remove(cmd
);
3664 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3665 addr
, sizeof(*addr
));
3667 hci_dev_unlock(hdev
);
3671 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3672 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3673 u16 hci_op
, __le32 passkey
)
3675 struct mgmt_pending_cmd
*cmd
;
3676 struct hci_conn
*conn
;
3681 if (!hdev_is_powered(hdev
)) {
3682 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3683 MGMT_STATUS_NOT_POWERED
, addr
,
3688 if (addr
->type
== BDADDR_BREDR
)
3689 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3691 conn
= hci_conn_hash_lookup_le(hdev
, &addr
->bdaddr
,
3692 le_addr_type(addr
->type
));
3695 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3696 MGMT_STATUS_NOT_CONNECTED
, addr
,
3701 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3702 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3704 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3705 MGMT_STATUS_SUCCESS
, addr
,
3708 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3709 MGMT_STATUS_FAILED
, addr
,
3715 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3721 cmd
->cmd_complete
= addr_cmd_complete
;
3723 /* Continue with pairing via HCI */
3724 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3725 struct hci_cp_user_passkey_reply cp
;
3727 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3728 cp
.passkey
= passkey
;
3729 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3731 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3735 mgmt_pending_remove(cmd
);
3738 hci_dev_unlock(hdev
);
3742 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3743 void *data
, u16 len
)
3745 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3749 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3750 MGMT_OP_PIN_CODE_NEG_REPLY
,
3751 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3754 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3757 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3761 if (len
!= sizeof(*cp
))
3762 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3763 MGMT_STATUS_INVALID_PARAMS
);
3765 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3766 MGMT_OP_USER_CONFIRM_REPLY
,
3767 HCI_OP_USER_CONFIRM_REPLY
, 0);
3770 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3771 void *data
, u16 len
)
3773 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3777 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3778 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3779 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3782 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3785 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3789 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3790 MGMT_OP_USER_PASSKEY_REPLY
,
3791 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3794 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3795 void *data
, u16 len
)
3797 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3801 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3802 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3803 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3806 static void update_name(struct hci_request
*req
)
3808 struct hci_dev
*hdev
= req
->hdev
;
3809 struct hci_cp_write_local_name cp
;
3811 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3813 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3816 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3818 struct mgmt_cp_set_local_name
*cp
;
3819 struct mgmt_pending_cmd
*cmd
;
3821 BT_DBG("status 0x%02x", status
);
3825 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3832 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3833 mgmt_status(status
));
3835 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3838 mgmt_pending_remove(cmd
);
3841 hci_dev_unlock(hdev
);
3844 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3847 struct mgmt_cp_set_local_name
*cp
= data
;
3848 struct mgmt_pending_cmd
*cmd
;
3849 struct hci_request req
;
3856 /* If the old values are the same as the new ones just return a
3857 * direct command complete event.
3859 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3860 !memcmp(hdev
->short_name
, cp
->short_name
,
3861 sizeof(hdev
->short_name
))) {
3862 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3867 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3869 if (!hdev_is_powered(hdev
)) {
3870 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3872 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3877 err
= mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
,
3883 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3889 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3891 hci_req_init(&req
, hdev
);
3893 if (lmp_bredr_capable(hdev
)) {
3898 /* The name is stored in the scan response data and so
3899 * no need to udpate the advertising data here.
3901 if (lmp_le_capable(hdev
))
3902 update_scan_rsp_data(&req
);
3904 err
= hci_req_run(&req
, set_name_complete
);
3906 mgmt_pending_remove(cmd
);
3909 hci_dev_unlock(hdev
);
3913 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
3914 u16 opcode
, struct sk_buff
*skb
)
3916 struct mgmt_rp_read_local_oob_data mgmt_rp
;
3917 size_t rp_size
= sizeof(mgmt_rp
);
3918 struct mgmt_pending_cmd
*cmd
;
3920 BT_DBG("%s status %u", hdev
->name
, status
);
3922 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
3926 if (status
|| !skb
) {
3927 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3928 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
3932 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
3934 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
3935 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
3937 if (skb
->len
< sizeof(*rp
)) {
3938 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3939 MGMT_OP_READ_LOCAL_OOB_DATA
,
3940 MGMT_STATUS_FAILED
);
3944 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
3945 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
3947 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
3949 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
3951 if (skb
->len
< sizeof(*rp
)) {
3952 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3953 MGMT_OP_READ_LOCAL_OOB_DATA
,
3954 MGMT_STATUS_FAILED
);
3958 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
3959 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
3961 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
3962 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
3965 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3966 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
3969 mgmt_pending_remove(cmd
);
3972 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3973 void *data
, u16 data_len
)
3975 struct mgmt_pending_cmd
*cmd
;
3976 struct hci_request req
;
3979 BT_DBG("%s", hdev
->name
);
3983 if (!hdev_is_powered(hdev
)) {
3984 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3985 MGMT_STATUS_NOT_POWERED
);
3989 if (!lmp_ssp_capable(hdev
)) {
3990 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3991 MGMT_STATUS_NOT_SUPPORTED
);
3995 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3996 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4001 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
4007 hci_req_init(&req
, hdev
);
4009 if (bredr_sc_enabled(hdev
))
4010 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
4012 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
4014 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
4016 mgmt_pending_remove(cmd
);
4019 hci_dev_unlock(hdev
);
4023 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4024 void *data
, u16 len
)
4026 struct mgmt_addr_info
*addr
= data
;
4029 BT_DBG("%s ", hdev
->name
);
4031 if (!bdaddr_type_is_valid(addr
->type
))
4032 return mgmt_cmd_complete(sk
, hdev
->id
,
4033 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4034 MGMT_STATUS_INVALID_PARAMS
,
4035 addr
, sizeof(*addr
));
4039 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
4040 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
4043 if (cp
->addr
.type
!= BDADDR_BREDR
) {
4044 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4045 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4046 MGMT_STATUS_INVALID_PARAMS
,
4047 &cp
->addr
, sizeof(cp
->addr
));
4051 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4052 cp
->addr
.type
, cp
->hash
,
4053 cp
->rand
, NULL
, NULL
);
4055 status
= MGMT_STATUS_FAILED
;
4057 status
= MGMT_STATUS_SUCCESS
;
4059 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4060 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
4061 &cp
->addr
, sizeof(cp
->addr
));
4062 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
4063 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
4064 u8
*rand192
, *hash192
, *rand256
, *hash256
;
4067 if (bdaddr_type_is_le(cp
->addr
.type
)) {
4068 /* Enforce zero-valued 192-bit parameters as
4069 * long as legacy SMP OOB isn't implemented.
4071 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4072 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4073 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4074 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4075 MGMT_STATUS_INVALID_PARAMS
,
4076 addr
, sizeof(*addr
));
4083 /* In case one of the P-192 values is set to zero,
4084 * then just disable OOB data for P-192.
4086 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4087 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4091 rand192
= cp
->rand192
;
4092 hash192
= cp
->hash192
;
4096 /* In case one of the P-256 values is set to zero, then just
4097 * disable OOB data for P-256.
4099 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
4100 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
4104 rand256
= cp
->rand256
;
4105 hash256
= cp
->hash256
;
4108 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4109 cp
->addr
.type
, hash192
, rand192
,
4112 status
= MGMT_STATUS_FAILED
;
4114 status
= MGMT_STATUS_SUCCESS
;
4116 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4117 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4118 status
, &cp
->addr
, sizeof(cp
->addr
));
4120 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
4121 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
4122 MGMT_STATUS_INVALID_PARAMS
);
4126 hci_dev_unlock(hdev
);
4130 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4131 void *data
, u16 len
)
4133 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
4137 BT_DBG("%s", hdev
->name
);
4139 if (cp
->addr
.type
!= BDADDR_BREDR
)
4140 return mgmt_cmd_complete(sk
, hdev
->id
,
4141 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4142 MGMT_STATUS_INVALID_PARAMS
,
4143 &cp
->addr
, sizeof(cp
->addr
));
4147 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4148 hci_remote_oob_data_clear(hdev
);
4149 status
= MGMT_STATUS_SUCCESS
;
4153 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
4155 status
= MGMT_STATUS_INVALID_PARAMS
;
4157 status
= MGMT_STATUS_SUCCESS
;
4160 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4161 status
, &cp
->addr
, sizeof(cp
->addr
));
4163 hci_dev_unlock(hdev
);
4167 static bool trigger_bredr_inquiry(struct hci_request
*req
, u8
*status
)
4169 struct hci_dev
*hdev
= req
->hdev
;
4170 struct hci_cp_inquiry cp
;
4171 /* General inquiry access code (GIAC) */
4172 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
4174 *status
= mgmt_bredr_support(hdev
);
4178 if (hci_dev_test_flag(hdev
, HCI_INQUIRY
)) {
4179 *status
= MGMT_STATUS_BUSY
;
4183 hci_inquiry_cache_flush(hdev
);
4185 memset(&cp
, 0, sizeof(cp
));
4186 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
4187 cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
4189 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
4194 static bool trigger_le_scan(struct hci_request
*req
, u16 interval
, u8
*status
)
4196 struct hci_dev
*hdev
= req
->hdev
;
4197 struct hci_cp_le_set_scan_param param_cp
;
4198 struct hci_cp_le_set_scan_enable enable_cp
;
4202 *status
= mgmt_le_support(hdev
);
4206 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
4207 /* Don't let discovery abort an outgoing connection attempt
4208 * that's using directed advertising.
4210 if (hci_lookup_le_connect(hdev
)) {
4211 *status
= MGMT_STATUS_REJECTED
;
4215 cancel_adv_timeout(hdev
);
4216 disable_advertising(req
);
4219 /* If controller is scanning, it means the background scanning is
4220 * running. Thus, we should temporarily stop it in order to set the
4221 * discovery scanning parameters.
4223 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
4224 hci_req_add_le_scan_disable(req
);
4226 /* All active scans will be done with either a resolvable private
4227 * address (when privacy feature has been enabled) or non-resolvable
4230 err
= hci_update_random_address(req
, true, &own_addr_type
);
4232 *status
= MGMT_STATUS_FAILED
;
4236 memset(¶m_cp
, 0, sizeof(param_cp
));
4237 param_cp
.type
= LE_SCAN_ACTIVE
;
4238 param_cp
.interval
= cpu_to_le16(interval
);
4239 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
4240 param_cp
.own_address_type
= own_addr_type
;
4242 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
4245 memset(&enable_cp
, 0, sizeof(enable_cp
));
4246 enable_cp
.enable
= LE_SCAN_ENABLE
;
4247 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
4249 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
4255 static bool trigger_discovery(struct hci_request
*req
, u8
*status
)
4257 struct hci_dev
*hdev
= req
->hdev
;
4259 switch (hdev
->discovery
.type
) {
4260 case DISCOV_TYPE_BREDR
:
4261 if (!trigger_bredr_inquiry(req
, status
))
4265 case DISCOV_TYPE_INTERLEAVED
:
4266 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
4268 /* During simultaneous discovery, we double LE scan
4269 * interval. We must leave some time for the controller
4270 * to do BR/EDR inquiry.
4272 if (!trigger_le_scan(req
, DISCOV_LE_SCAN_INT
* 2,
4276 if (!trigger_bredr_inquiry(req
, status
))
4282 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4283 *status
= MGMT_STATUS_NOT_SUPPORTED
;
4288 case DISCOV_TYPE_LE
:
4289 if (!trigger_le_scan(req
, DISCOV_LE_SCAN_INT
, status
))
4294 *status
= MGMT_STATUS_INVALID_PARAMS
;
4301 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
,
4304 struct mgmt_pending_cmd
*cmd
;
4305 unsigned long timeout
;
4307 BT_DBG("status %d", status
);
4311 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
4313 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
4316 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4317 mgmt_pending_remove(cmd
);
4321 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4325 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
4327 /* If the scan involves LE scan, pick proper timeout to schedule
4328 * hdev->le_scan_disable that will stop it.
4330 switch (hdev
->discovery
.type
) {
4331 case DISCOV_TYPE_LE
:
4332 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
4334 case DISCOV_TYPE_INTERLEAVED
:
4335 /* When running simultaneous discovery, the LE scanning time
4336 * should occupy the whole discovery time sine BR/EDR inquiry
4337 * and LE scanning are scheduled by the controller.
4339 * For interleaving discovery in comparison, BR/EDR inquiry
4340 * and LE scanning are done sequentially with separate
4343 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
))
4344 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
4346 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
4348 case DISCOV_TYPE_BREDR
:
4352 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
4358 /* When service discovery is used and the controller has
4359 * a strict duplicate filter, it is important to remember
4360 * the start and duration of the scan. This is required
4361 * for restarting scanning during the discovery phase.
4363 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
,
4365 hdev
->discovery
.result_filtering
) {
4366 hdev
->discovery
.scan_start
= jiffies
;
4367 hdev
->discovery
.scan_duration
= timeout
;
4370 queue_delayed_work(hdev
->workqueue
,
4371 &hdev
->le_scan_disable
, timeout
);
4375 hci_dev_unlock(hdev
);
4378 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4379 void *data
, u16 len
)
4381 struct mgmt_cp_start_discovery
*cp
= data
;
4382 struct mgmt_pending_cmd
*cmd
;
4383 struct hci_request req
;
4387 BT_DBG("%s", hdev
->name
);
4391 if (!hdev_is_powered(hdev
)) {
4392 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4393 MGMT_STATUS_NOT_POWERED
,
4394 &cp
->type
, sizeof(cp
->type
));
4398 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4399 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4400 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4401 MGMT_STATUS_BUSY
, &cp
->type
,
4406 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
4412 cmd
->cmd_complete
= generic_cmd_complete
;
4414 /* Clear the discovery filter first to free any previously
4415 * allocated memory for the UUID list.
4417 hci_discovery_filter_clear(hdev
);
4419 hdev
->discovery
.type
= cp
->type
;
4420 hdev
->discovery
.report_invalid_rssi
= false;
4422 hci_req_init(&req
, hdev
);
4424 if (!trigger_discovery(&req
, &status
)) {
4425 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4426 status
, &cp
->type
, sizeof(cp
->type
));
4427 mgmt_pending_remove(cmd
);
4431 err
= hci_req_run(&req
, start_discovery_complete
);
4433 mgmt_pending_remove(cmd
);
4437 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4440 hci_dev_unlock(hdev
);
4444 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
4447 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
4451 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4452 void *data
, u16 len
)
4454 struct mgmt_cp_start_service_discovery
*cp
= data
;
4455 struct mgmt_pending_cmd
*cmd
;
4456 struct hci_request req
;
4457 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
4458 u16 uuid_count
, expected_len
;
4462 BT_DBG("%s", hdev
->name
);
4466 if (!hdev_is_powered(hdev
)) {
4467 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4468 MGMT_OP_START_SERVICE_DISCOVERY
,
4469 MGMT_STATUS_NOT_POWERED
,
4470 &cp
->type
, sizeof(cp
->type
));
4474 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4475 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4476 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4477 MGMT_OP_START_SERVICE_DISCOVERY
,
4478 MGMT_STATUS_BUSY
, &cp
->type
,
4483 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
4484 if (uuid_count
> max_uuid_count
) {
4485 BT_ERR("service_discovery: too big uuid_count value %u",
4487 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4488 MGMT_OP_START_SERVICE_DISCOVERY
,
4489 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4494 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4495 if (expected_len
!= len
) {
4496 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4498 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4499 MGMT_OP_START_SERVICE_DISCOVERY
,
4500 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4505 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4512 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4514 /* Clear the discovery filter first to free any previously
4515 * allocated memory for the UUID list.
4517 hci_discovery_filter_clear(hdev
);
4519 hdev
->discovery
.result_filtering
= true;
4520 hdev
->discovery
.type
= cp
->type
;
4521 hdev
->discovery
.rssi
= cp
->rssi
;
4522 hdev
->discovery
.uuid_count
= uuid_count
;
4524 if (uuid_count
> 0) {
4525 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4527 if (!hdev
->discovery
.uuids
) {
4528 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4529 MGMT_OP_START_SERVICE_DISCOVERY
,
4531 &cp
->type
, sizeof(cp
->type
));
4532 mgmt_pending_remove(cmd
);
4537 hci_req_init(&req
, hdev
);
4539 if (!trigger_discovery(&req
, &status
)) {
4540 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4541 MGMT_OP_START_SERVICE_DISCOVERY
,
4542 status
, &cp
->type
, sizeof(cp
->type
));
4543 mgmt_pending_remove(cmd
);
4547 err
= hci_req_run(&req
, start_discovery_complete
);
4549 mgmt_pending_remove(cmd
);
4553 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4556 hci_dev_unlock(hdev
);
4560 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4562 struct mgmt_pending_cmd
*cmd
;
4564 BT_DBG("status %d", status
);
4568 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4570 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4571 mgmt_pending_remove(cmd
);
4575 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4577 hci_dev_unlock(hdev
);
4580 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4583 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4584 struct mgmt_pending_cmd
*cmd
;
4585 struct hci_request req
;
4588 BT_DBG("%s", hdev
->name
);
4592 if (!hci_discovery_active(hdev
)) {
4593 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4594 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4595 sizeof(mgmt_cp
->type
));
4599 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4600 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4601 MGMT_STATUS_INVALID_PARAMS
,
4602 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4606 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4612 cmd
->cmd_complete
= generic_cmd_complete
;
4614 hci_req_init(&req
, hdev
);
4616 hci_stop_discovery(&req
);
4618 err
= hci_req_run(&req
, stop_discovery_complete
);
4620 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4624 mgmt_pending_remove(cmd
);
4626 /* If no HCI commands were sent we're done */
4627 if (err
== -ENODATA
) {
4628 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
4629 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4630 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4634 hci_dev_unlock(hdev
);
4638 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4641 struct mgmt_cp_confirm_name
*cp
= data
;
4642 struct inquiry_entry
*e
;
4645 BT_DBG("%s", hdev
->name
);
4649 if (!hci_discovery_active(hdev
)) {
4650 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4651 MGMT_STATUS_FAILED
, &cp
->addr
,
4656 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4658 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4659 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4664 if (cp
->name_known
) {
4665 e
->name_state
= NAME_KNOWN
;
4668 e
->name_state
= NAME_NEEDED
;
4669 hci_inquiry_cache_update_resolve(hdev
, e
);
4672 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
4673 &cp
->addr
, sizeof(cp
->addr
));
4676 hci_dev_unlock(hdev
);
4680 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4683 struct mgmt_cp_block_device
*cp
= data
;
4687 BT_DBG("%s", hdev
->name
);
4689 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4690 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4691 MGMT_STATUS_INVALID_PARAMS
,
4692 &cp
->addr
, sizeof(cp
->addr
));
4696 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4699 status
= MGMT_STATUS_FAILED
;
4703 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4705 status
= MGMT_STATUS_SUCCESS
;
4708 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4709 &cp
->addr
, sizeof(cp
->addr
));
4711 hci_dev_unlock(hdev
);
4716 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4719 struct mgmt_cp_unblock_device
*cp
= data
;
4723 BT_DBG("%s", hdev
->name
);
4725 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4726 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4727 MGMT_STATUS_INVALID_PARAMS
,
4728 &cp
->addr
, sizeof(cp
->addr
));
4732 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4735 status
= MGMT_STATUS_INVALID_PARAMS
;
4739 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4741 status
= MGMT_STATUS_SUCCESS
;
4744 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4745 &cp
->addr
, sizeof(cp
->addr
));
4747 hci_dev_unlock(hdev
);
4752 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4755 struct mgmt_cp_set_device_id
*cp
= data
;
4756 struct hci_request req
;
4760 BT_DBG("%s", hdev
->name
);
4762 source
= __le16_to_cpu(cp
->source
);
4764 if (source
> 0x0002)
4765 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4766 MGMT_STATUS_INVALID_PARAMS
);
4770 hdev
->devid_source
= source
;
4771 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4772 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4773 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4775 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
4778 hci_req_init(&req
, hdev
);
4780 hci_req_run(&req
, NULL
);
4782 hci_dev_unlock(hdev
);
4787 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
4790 BT_DBG("status %d", status
);
4793 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
4796 struct cmd_lookup match
= { NULL
, hdev
};
4797 struct hci_request req
;
4799 struct adv_info
*adv_instance
;
4805 u8 mgmt_err
= mgmt_status(status
);
4807 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4808 cmd_status_rsp
, &mgmt_err
);
4812 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
4813 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
4815 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
4817 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4820 new_settings(hdev
, match
.sk
);
4825 /* If "Set Advertising" was just disabled and instance advertising was
4826 * set up earlier, then re-enable multi-instance advertising.
4828 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
4829 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) ||
4830 list_empty(&hdev
->adv_instances
))
4833 instance
= hdev
->cur_adv_instance
;
4835 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
4836 struct adv_info
, list
);
4840 instance
= adv_instance
->instance
;
4843 hci_req_init(&req
, hdev
);
4845 err
= schedule_adv_instance(&req
, instance
, true);
4848 err
= hci_req_run(&req
, enable_advertising_instance
);
4851 BT_ERR("Failed to re-configure advertising");
4854 hci_dev_unlock(hdev
);
4857 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4860 struct mgmt_mode
*cp
= data
;
4861 struct mgmt_pending_cmd
*cmd
;
4862 struct hci_request req
;
4866 BT_DBG("request for %s", hdev
->name
);
4868 status
= mgmt_le_support(hdev
);
4870 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4873 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4874 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4875 MGMT_STATUS_INVALID_PARAMS
);
4881 /* The following conditions are ones which mean that we should
4882 * not do any HCI communication but directly send a mgmt
4883 * response to user space (after toggling the flag if
4886 if (!hdev_is_powered(hdev
) ||
4887 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
4888 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
4889 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4890 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4891 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4895 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
4896 if (cp
->val
== 0x02)
4897 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4899 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4901 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
4902 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4905 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4910 err
= new_settings(hdev
, sk
);
4915 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4916 pending_find(MGMT_OP_SET_LE
, hdev
)) {
4917 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4922 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4928 hci_req_init(&req
, hdev
);
4930 if (cp
->val
== 0x02)
4931 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4933 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4935 cancel_adv_timeout(hdev
);
4938 /* Switch to instance "0" for the Set Advertising setting.
4939 * We cannot use update_[adv|scan_rsp]_data() here as the
4940 * HCI_ADVERTISING flag is not yet set.
4942 update_inst_adv_data(&req
, 0x00);
4943 update_inst_scan_rsp_data(&req
, 0x00);
4944 enable_advertising(&req
);
4946 disable_advertising(&req
);
4949 err
= hci_req_run(&req
, set_advertising_complete
);
4951 mgmt_pending_remove(cmd
);
4954 hci_dev_unlock(hdev
);
4958 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4959 void *data
, u16 len
)
4961 struct mgmt_cp_set_static_address
*cp
= data
;
4964 BT_DBG("%s", hdev
->name
);
4966 if (!lmp_le_capable(hdev
))
4967 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4968 MGMT_STATUS_NOT_SUPPORTED
);
4970 if (hdev_is_powered(hdev
))
4971 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4972 MGMT_STATUS_REJECTED
);
4974 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4975 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4976 return mgmt_cmd_status(sk
, hdev
->id
,
4977 MGMT_OP_SET_STATIC_ADDRESS
,
4978 MGMT_STATUS_INVALID_PARAMS
);
4980 /* Two most significant bits shall be set */
4981 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4982 return mgmt_cmd_status(sk
, hdev
->id
,
4983 MGMT_OP_SET_STATIC_ADDRESS
,
4984 MGMT_STATUS_INVALID_PARAMS
);
4989 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4991 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
4995 err
= new_settings(hdev
, sk
);
4998 hci_dev_unlock(hdev
);
5002 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
5003 void *data
, u16 len
)
5005 struct mgmt_cp_set_scan_params
*cp
= data
;
5006 __u16 interval
, window
;
5009 BT_DBG("%s", hdev
->name
);
5011 if (!lmp_le_capable(hdev
))
5012 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5013 MGMT_STATUS_NOT_SUPPORTED
);
5015 interval
= __le16_to_cpu(cp
->interval
);
5017 if (interval
< 0x0004 || interval
> 0x4000)
5018 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5019 MGMT_STATUS_INVALID_PARAMS
);
5021 window
= __le16_to_cpu(cp
->window
);
5023 if (window
< 0x0004 || window
> 0x4000)
5024 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5025 MGMT_STATUS_INVALID_PARAMS
);
5027 if (window
> interval
)
5028 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5029 MGMT_STATUS_INVALID_PARAMS
);
5033 hdev
->le_scan_interval
= interval
;
5034 hdev
->le_scan_window
= window
;
5036 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
5039 /* If background scan is running, restart it so new parameters are
5042 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
5043 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
5044 struct hci_request req
;
5046 hci_req_init(&req
, hdev
);
5048 hci_req_add_le_scan_disable(&req
);
5049 hci_req_add_le_passive_scan(&req
);
5051 hci_req_run(&req
, NULL
);
5054 hci_dev_unlock(hdev
);
5059 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
5062 struct mgmt_pending_cmd
*cmd
;
5064 BT_DBG("status 0x%02x", status
);
5068 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
5073 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5074 mgmt_status(status
));
5076 struct mgmt_mode
*cp
= cmd
->param
;
5079 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
5081 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5083 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
5084 new_settings(hdev
, cmd
->sk
);
5087 mgmt_pending_remove(cmd
);
5090 hci_dev_unlock(hdev
);
5093 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
5094 void *data
, u16 len
)
5096 struct mgmt_mode
*cp
= data
;
5097 struct mgmt_pending_cmd
*cmd
;
5098 struct hci_request req
;
5101 BT_DBG("%s", hdev
->name
);
5103 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
5104 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
5105 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5106 MGMT_STATUS_NOT_SUPPORTED
);
5108 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5109 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5110 MGMT_STATUS_INVALID_PARAMS
);
5114 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
5115 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5120 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
5121 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
5126 if (!hdev_is_powered(hdev
)) {
5127 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
5128 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
5130 new_settings(hdev
, sk
);
5134 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
5141 hci_req_init(&req
, hdev
);
5143 write_fast_connectable(&req
, cp
->val
);
5145 err
= hci_req_run(&req
, fast_connectable_complete
);
5147 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5148 MGMT_STATUS_FAILED
);
5149 mgmt_pending_remove(cmd
);
5153 hci_dev_unlock(hdev
);
5158 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5160 struct mgmt_pending_cmd
*cmd
;
5162 BT_DBG("status 0x%02x", status
);
5166 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
5171 u8 mgmt_err
= mgmt_status(status
);
5173 /* We need to restore the flag if related HCI commands
5176 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
5178 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
5180 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
5181 new_settings(hdev
, cmd
->sk
);
5184 mgmt_pending_remove(cmd
);
5187 hci_dev_unlock(hdev
);
5190 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
5192 struct mgmt_mode
*cp
= data
;
5193 struct mgmt_pending_cmd
*cmd
;
5194 struct hci_request req
;
5197 BT_DBG("request for %s", hdev
->name
);
5199 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
5200 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5201 MGMT_STATUS_NOT_SUPPORTED
);
5203 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5204 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5205 MGMT_STATUS_REJECTED
);
5207 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5208 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5209 MGMT_STATUS_INVALID_PARAMS
);
5213 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5214 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5218 if (!hdev_is_powered(hdev
)) {
5220 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
5221 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
5222 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
5223 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5224 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
5227 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
5229 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5233 err
= new_settings(hdev
, sk
);
5237 /* Reject disabling when powered on */
5239 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5240 MGMT_STATUS_REJECTED
);
5243 /* When configuring a dual-mode controller to operate
5244 * with LE only and using a static address, then switching
5245 * BR/EDR back on is not allowed.
5247 * Dual-mode controllers shall operate with the public
5248 * address as its identity address for BR/EDR and LE. So
5249 * reject the attempt to create an invalid configuration.
5251 * The same restrictions applies when secure connections
5252 * has been enabled. For BR/EDR this is a controller feature
5253 * while for LE it is a host stack feature. This means that
5254 * switching BR/EDR back on when secure connections has been
5255 * enabled is not a supported transaction.
5257 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5258 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
5259 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
5260 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5261 MGMT_STATUS_REJECTED
);
5266 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
5267 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5272 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
5278 /* We need to flip the bit already here so that update_adv_data
5279 * generates the correct flags.
5281 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
5283 hci_req_init(&req
, hdev
);
5285 write_fast_connectable(&req
, false);
5286 __hci_update_page_scan(&req
);
5288 /* Since only the advertising data flags will change, there
5289 * is no need to update the scan response data.
5291 update_adv_data(&req
);
5293 err
= hci_req_run(&req
, set_bredr_complete
);
5295 mgmt_pending_remove(cmd
);
5298 hci_dev_unlock(hdev
);
5302 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5304 struct mgmt_pending_cmd
*cmd
;
5305 struct mgmt_mode
*cp
;
5307 BT_DBG("%s status %u", hdev
->name
, status
);
5311 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
5316 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5317 mgmt_status(status
));
5325 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
5326 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5329 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5330 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5333 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5334 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5338 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5339 new_settings(hdev
, cmd
->sk
);
5342 mgmt_pending_remove(cmd
);
5344 hci_dev_unlock(hdev
);
5347 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
5348 void *data
, u16 len
)
5350 struct mgmt_mode
*cp
= data
;
5351 struct mgmt_pending_cmd
*cmd
;
5352 struct hci_request req
;
5356 BT_DBG("request for %s", hdev
->name
);
5358 if (!lmp_sc_capable(hdev
) &&
5359 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5360 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5361 MGMT_STATUS_NOT_SUPPORTED
);
5363 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5364 lmp_sc_capable(hdev
) &&
5365 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
5366 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5367 MGMT_STATUS_REJECTED
);
5369 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5370 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5371 MGMT_STATUS_INVALID_PARAMS
);
5375 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
5376 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5380 changed
= !hci_dev_test_and_set_flag(hdev
,
5382 if (cp
->val
== 0x02)
5383 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5385 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5387 changed
= hci_dev_test_and_clear_flag(hdev
,
5389 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5392 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5397 err
= new_settings(hdev
, sk
);
5402 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
5403 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5410 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5411 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5412 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5416 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
5422 hci_req_init(&req
, hdev
);
5423 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
5424 err
= hci_req_run(&req
, sc_enable_complete
);
5426 mgmt_pending_remove(cmd
);
5431 hci_dev_unlock(hdev
);
5435 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5436 void *data
, u16 len
)
5438 struct mgmt_mode
*cp
= data
;
5439 bool changed
, use_changed
;
5442 BT_DBG("request for %s", hdev
->name
);
5444 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5445 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
5446 MGMT_STATUS_INVALID_PARAMS
);
5451 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
5453 changed
= hci_dev_test_and_clear_flag(hdev
,
5454 HCI_KEEP_DEBUG_KEYS
);
5456 if (cp
->val
== 0x02)
5457 use_changed
= !hci_dev_test_and_set_flag(hdev
,
5458 HCI_USE_DEBUG_KEYS
);
5460 use_changed
= hci_dev_test_and_clear_flag(hdev
,
5461 HCI_USE_DEBUG_KEYS
);
5463 if (hdev_is_powered(hdev
) && use_changed
&&
5464 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5465 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
5466 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
5467 sizeof(mode
), &mode
);
5470 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
5475 err
= new_settings(hdev
, sk
);
5478 hci_dev_unlock(hdev
);
5482 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5485 struct mgmt_cp_set_privacy
*cp
= cp_data
;
5489 BT_DBG("request for %s", hdev
->name
);
5491 if (!lmp_le_capable(hdev
))
5492 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5493 MGMT_STATUS_NOT_SUPPORTED
);
5495 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
5496 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5497 MGMT_STATUS_INVALID_PARAMS
);
5499 if (hdev_is_powered(hdev
))
5500 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5501 MGMT_STATUS_REJECTED
);
5505 /* If user space supports this command it is also expected to
5506 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5508 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5511 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
5512 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
5513 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
5515 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
5516 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
5517 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
5520 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
5525 err
= new_settings(hdev
, sk
);
5528 hci_dev_unlock(hdev
);
5532 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
5534 switch (irk
->addr
.type
) {
5535 case BDADDR_LE_PUBLIC
:
5538 case BDADDR_LE_RANDOM
:
5539 /* Two most significant bits shall be set */
5540 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5548 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5551 struct mgmt_cp_load_irks
*cp
= cp_data
;
5552 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
5553 sizeof(struct mgmt_irk_info
));
5554 u16 irk_count
, expected_len
;
5557 BT_DBG("request for %s", hdev
->name
);
5559 if (!lmp_le_capable(hdev
))
5560 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5561 MGMT_STATUS_NOT_SUPPORTED
);
5563 irk_count
= __le16_to_cpu(cp
->irk_count
);
5564 if (irk_count
> max_irk_count
) {
5565 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
5566 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5567 MGMT_STATUS_INVALID_PARAMS
);
5570 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
5571 if (expected_len
!= len
) {
5572 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5574 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5575 MGMT_STATUS_INVALID_PARAMS
);
5578 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
5580 for (i
= 0; i
< irk_count
; i
++) {
5581 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
5583 if (!irk_is_valid(key
))
5584 return mgmt_cmd_status(sk
, hdev
->id
,
5586 MGMT_STATUS_INVALID_PARAMS
);
5591 hci_smp_irks_clear(hdev
);
5593 for (i
= 0; i
< irk_count
; i
++) {
5594 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
5596 hci_add_irk(hdev
, &irk
->addr
.bdaddr
,
5597 le_addr_type(irk
->addr
.type
), irk
->val
,
5601 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5603 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
5605 hci_dev_unlock(hdev
);
5610 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
5612 if (key
->master
!= 0x00 && key
->master
!= 0x01)
5615 switch (key
->addr
.type
) {
5616 case BDADDR_LE_PUBLIC
:
5619 case BDADDR_LE_RANDOM
:
5620 /* Two most significant bits shall be set */
5621 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5629 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5630 void *cp_data
, u16 len
)
5632 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5633 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5634 sizeof(struct mgmt_ltk_info
));
5635 u16 key_count
, expected_len
;
5638 BT_DBG("request for %s", hdev
->name
);
5640 if (!lmp_le_capable(hdev
))
5641 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5642 MGMT_STATUS_NOT_SUPPORTED
);
5644 key_count
= __le16_to_cpu(cp
->key_count
);
5645 if (key_count
> max_key_count
) {
5646 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5647 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5648 MGMT_STATUS_INVALID_PARAMS
);
5651 expected_len
= sizeof(*cp
) + key_count
*
5652 sizeof(struct mgmt_ltk_info
);
5653 if (expected_len
!= len
) {
5654 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5656 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5657 MGMT_STATUS_INVALID_PARAMS
);
5660 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5662 for (i
= 0; i
< key_count
; i
++) {
5663 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5665 if (!ltk_is_valid(key
))
5666 return mgmt_cmd_status(sk
, hdev
->id
,
5667 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5668 MGMT_STATUS_INVALID_PARAMS
);
5673 hci_smp_ltks_clear(hdev
);
5675 for (i
= 0; i
< key_count
; i
++) {
5676 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5677 u8 type
, authenticated
;
5679 switch (key
->type
) {
5680 case MGMT_LTK_UNAUTHENTICATED
:
5681 authenticated
= 0x00;
5682 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5684 case MGMT_LTK_AUTHENTICATED
:
5685 authenticated
= 0x01;
5686 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5688 case MGMT_LTK_P256_UNAUTH
:
5689 authenticated
= 0x00;
5690 type
= SMP_LTK_P256
;
5692 case MGMT_LTK_P256_AUTH
:
5693 authenticated
= 0x01;
5694 type
= SMP_LTK_P256
;
5696 case MGMT_LTK_P256_DEBUG
:
5697 authenticated
= 0x00;
5698 type
= SMP_LTK_P256_DEBUG
;
5703 hci_add_ltk(hdev
, &key
->addr
.bdaddr
,
5704 le_addr_type(key
->addr
.type
), type
, authenticated
,
5705 key
->val
, key
->enc_size
, key
->ediv
, key
->rand
);
5708 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5711 hci_dev_unlock(hdev
);
5716 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5718 struct hci_conn
*conn
= cmd
->user_data
;
5719 struct mgmt_rp_get_conn_info rp
;
5722 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5724 if (status
== MGMT_STATUS_SUCCESS
) {
5725 rp
.rssi
= conn
->rssi
;
5726 rp
.tx_power
= conn
->tx_power
;
5727 rp
.max_tx_power
= conn
->max_tx_power
;
5729 rp
.rssi
= HCI_RSSI_INVALID
;
5730 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5731 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5734 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
5735 status
, &rp
, sizeof(rp
));
5737 hci_conn_drop(conn
);
5743 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
5746 struct hci_cp_read_rssi
*cp
;
5747 struct mgmt_pending_cmd
*cmd
;
5748 struct hci_conn
*conn
;
5752 BT_DBG("status 0x%02x", hci_status
);
5756 /* Commands sent in request are either Read RSSI or Read Transmit Power
5757 * Level so we check which one was last sent to retrieve connection
5758 * handle. Both commands have handle as first parameter so it's safe to
5759 * cast data on the same command struct.
5761 * First command sent is always Read RSSI and we fail only if it fails.
5762 * In other case we simply override error to indicate success as we
5763 * already remembered if TX power value is actually valid.
5765 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5767 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5768 status
= MGMT_STATUS_SUCCESS
;
5770 status
= mgmt_status(hci_status
);
5774 BT_ERR("invalid sent_cmd in conn_info response");
5778 handle
= __le16_to_cpu(cp
->handle
);
5779 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5781 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5785 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5789 cmd
->cmd_complete(cmd
, status
);
5790 mgmt_pending_remove(cmd
);
5793 hci_dev_unlock(hdev
);
5796 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5799 struct mgmt_cp_get_conn_info
*cp
= data
;
5800 struct mgmt_rp_get_conn_info rp
;
5801 struct hci_conn
*conn
;
5802 unsigned long conn_info_age
;
5805 BT_DBG("%s", hdev
->name
);
5807 memset(&rp
, 0, sizeof(rp
));
5808 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5809 rp
.addr
.type
= cp
->addr
.type
;
5811 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5812 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5813 MGMT_STATUS_INVALID_PARAMS
,
5818 if (!hdev_is_powered(hdev
)) {
5819 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5820 MGMT_STATUS_NOT_POWERED
, &rp
,
5825 if (cp
->addr
.type
== BDADDR_BREDR
)
5826 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5829 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5831 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5832 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5833 MGMT_STATUS_NOT_CONNECTED
, &rp
,
5838 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5839 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5840 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5844 /* To avoid client trying to guess when to poll again for information we
5845 * calculate conn info age as random value between min/max set in hdev.
5847 conn_info_age
= hdev
->conn_info_min_age
+
5848 prandom_u32_max(hdev
->conn_info_max_age
-
5849 hdev
->conn_info_min_age
);
5851 /* Query controller to refresh cached values if they are too old or were
5854 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5855 msecs_to_jiffies(conn_info_age
)) ||
5856 !conn
->conn_info_timestamp
) {
5857 struct hci_request req
;
5858 struct hci_cp_read_tx_power req_txp_cp
;
5859 struct hci_cp_read_rssi req_rssi_cp
;
5860 struct mgmt_pending_cmd
*cmd
;
5862 hci_req_init(&req
, hdev
);
5863 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5864 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5867 /* For LE links TX power does not change thus we don't need to
5868 * query for it once value is known.
5870 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5871 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5872 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5873 req_txp_cp
.type
= 0x00;
5874 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5875 sizeof(req_txp_cp
), &req_txp_cp
);
5878 /* Max TX power needs to be read only once per connection */
5879 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5880 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5881 req_txp_cp
.type
= 0x01;
5882 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5883 sizeof(req_txp_cp
), &req_txp_cp
);
5886 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5890 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5897 hci_conn_hold(conn
);
5898 cmd
->user_data
= hci_conn_get(conn
);
5899 cmd
->cmd_complete
= conn_info_cmd_complete
;
5901 conn
->conn_info_timestamp
= jiffies
;
5903 /* Cache is valid, just reply with values cached in hci_conn */
5904 rp
.rssi
= conn
->rssi
;
5905 rp
.tx_power
= conn
->tx_power
;
5906 rp
.max_tx_power
= conn
->max_tx_power
;
5908 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5909 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5913 hci_dev_unlock(hdev
);
5917 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5919 struct hci_conn
*conn
= cmd
->user_data
;
5920 struct mgmt_rp_get_clock_info rp
;
5921 struct hci_dev
*hdev
;
5924 memset(&rp
, 0, sizeof(rp
));
5925 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5930 hdev
= hci_dev_get(cmd
->index
);
5932 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5937 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5938 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5942 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
5946 hci_conn_drop(conn
);
5953 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5955 struct hci_cp_read_clock
*hci_cp
;
5956 struct mgmt_pending_cmd
*cmd
;
5957 struct hci_conn
*conn
;
5959 BT_DBG("%s status %u", hdev
->name
, status
);
5963 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5967 if (hci_cp
->which
) {
5968 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5969 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5974 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5978 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5979 mgmt_pending_remove(cmd
);
5982 hci_dev_unlock(hdev
);
5985 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5988 struct mgmt_cp_get_clock_info
*cp
= data
;
5989 struct mgmt_rp_get_clock_info rp
;
5990 struct hci_cp_read_clock hci_cp
;
5991 struct mgmt_pending_cmd
*cmd
;
5992 struct hci_request req
;
5993 struct hci_conn
*conn
;
5996 BT_DBG("%s", hdev
->name
);
5998 memset(&rp
, 0, sizeof(rp
));
5999 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6000 rp
.addr
.type
= cp
->addr
.type
;
6002 if (cp
->addr
.type
!= BDADDR_BREDR
)
6003 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
6004 MGMT_STATUS_INVALID_PARAMS
,
6009 if (!hdev_is_powered(hdev
)) {
6010 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
6011 MGMT_STATUS_NOT_POWERED
, &rp
,
6016 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6017 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
6019 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
6020 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6021 MGMT_OP_GET_CLOCK_INFO
,
6022 MGMT_STATUS_NOT_CONNECTED
,
6030 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
6036 cmd
->cmd_complete
= clock_info_cmd_complete
;
6038 hci_req_init(&req
, hdev
);
6040 memset(&hci_cp
, 0, sizeof(hci_cp
));
6041 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
6044 hci_conn_hold(conn
);
6045 cmd
->user_data
= hci_conn_get(conn
);
6047 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
6048 hci_cp
.which
= 0x01; /* Piconet clock */
6049 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
6052 err
= hci_req_run(&req
, get_clock_info_complete
);
6054 mgmt_pending_remove(cmd
);
6057 hci_dev_unlock(hdev
);
6061 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
6063 struct hci_conn
*conn
;
6065 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
6069 if (conn
->dst_type
!= type
)
6072 if (conn
->state
!= BT_CONNECTED
)
6078 /* This function requires the caller holds hdev->lock */
6079 static int hci_conn_params_set(struct hci_request
*req
, bdaddr_t
*addr
,
6080 u8 addr_type
, u8 auto_connect
)
6082 struct hci_dev
*hdev
= req
->hdev
;
6083 struct hci_conn_params
*params
;
6085 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
6089 if (params
->auto_connect
== auto_connect
)
6092 list_del_init(¶ms
->action
);
6094 switch (auto_connect
) {
6095 case HCI_AUTO_CONN_DISABLED
:
6096 case HCI_AUTO_CONN_LINK_LOSS
:
6097 /* If auto connect is being disabled when we're trying to
6098 * connect to device, keep connecting.
6100 if (params
->explicit_connect
)
6101 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6103 __hci_update_background_scan(req
);
6105 case HCI_AUTO_CONN_REPORT
:
6106 if (params
->explicit_connect
)
6107 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6109 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
6110 __hci_update_background_scan(req
);
6112 case HCI_AUTO_CONN_DIRECT
:
6113 case HCI_AUTO_CONN_ALWAYS
:
6114 if (!is_connected(hdev
, addr
, addr_type
)) {
6115 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6116 /* If we are in scan phase of connecting, we were
6117 * already added to pend_le_conns and scanning.
6119 if (params
->auto_connect
!= HCI_AUTO_CONN_EXPLICIT
)
6120 __hci_update_background_scan(req
);
6125 params
->auto_connect
= auto_connect
;
6127 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
6133 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
6134 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
6136 struct mgmt_ev_device_added ev
;
6138 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6139 ev
.addr
.type
= type
;
6142 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
6145 static void add_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6147 struct mgmt_pending_cmd
*cmd
;
6149 BT_DBG("status 0x%02x", status
);
6153 cmd
= pending_find(MGMT_OP_ADD_DEVICE
, hdev
);
6157 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6158 mgmt_pending_remove(cmd
);
6161 hci_dev_unlock(hdev
);
6164 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
6165 void *data
, u16 len
)
6167 struct mgmt_cp_add_device
*cp
= data
;
6168 struct mgmt_pending_cmd
*cmd
;
6169 struct hci_request req
;
6170 u8 auto_conn
, addr_type
;
6173 BT_DBG("%s", hdev
->name
);
6175 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
6176 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
6177 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6178 MGMT_STATUS_INVALID_PARAMS
,
6179 &cp
->addr
, sizeof(cp
->addr
));
6181 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
6182 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6183 MGMT_STATUS_INVALID_PARAMS
,
6184 &cp
->addr
, sizeof(cp
->addr
));
6186 hci_req_init(&req
, hdev
);
6190 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_DEVICE
, hdev
, data
, len
);
6196 cmd
->cmd_complete
= addr_cmd_complete
;
6198 if (cp
->addr
.type
== BDADDR_BREDR
) {
6199 /* Only incoming connections action is supported for now */
6200 if (cp
->action
!= 0x01) {
6201 err
= cmd
->cmd_complete(cmd
,
6202 MGMT_STATUS_INVALID_PARAMS
);
6203 mgmt_pending_remove(cmd
);
6207 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
6212 __hci_update_page_scan(&req
);
6217 addr_type
= le_addr_type(cp
->addr
.type
);
6219 if (cp
->action
== 0x02)
6220 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
6221 else if (cp
->action
== 0x01)
6222 auto_conn
= HCI_AUTO_CONN_DIRECT
;
6224 auto_conn
= HCI_AUTO_CONN_REPORT
;
6226 /* Kernel internally uses conn_params with resolvable private
6227 * address, but Add Device allows only identity addresses.
6228 * Make sure it is enforced before calling
6229 * hci_conn_params_lookup.
6231 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
6232 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_INVALID_PARAMS
);
6233 mgmt_pending_remove(cmd
);
6237 /* If the connection parameters don't exist for this device,
6238 * they will be created and configured with defaults.
6240 if (hci_conn_params_set(&req
, &cp
->addr
.bdaddr
, addr_type
,
6242 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_FAILED
);
6243 mgmt_pending_remove(cmd
);
6248 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
6250 err
= hci_req_run(&req
, add_device_complete
);
6252 /* ENODATA means no HCI commands were needed (e.g. if
6253 * the adapter is powered off).
6255 if (err
== -ENODATA
)
6256 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
6257 mgmt_pending_remove(cmd
);
6261 hci_dev_unlock(hdev
);
6265 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
6266 bdaddr_t
*bdaddr
, u8 type
)
6268 struct mgmt_ev_device_removed ev
;
6270 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6271 ev
.addr
.type
= type
;
6273 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
6276 static void remove_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6278 struct mgmt_pending_cmd
*cmd
;
6280 BT_DBG("status 0x%02x", status
);
6284 cmd
= pending_find(MGMT_OP_REMOVE_DEVICE
, hdev
);
6288 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6289 mgmt_pending_remove(cmd
);
6292 hci_dev_unlock(hdev
);
6295 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
6296 void *data
, u16 len
)
6298 struct mgmt_cp_remove_device
*cp
= data
;
6299 struct mgmt_pending_cmd
*cmd
;
6300 struct hci_request req
;
6303 BT_DBG("%s", hdev
->name
);
6305 hci_req_init(&req
, hdev
);
6309 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_DEVICE
, hdev
, data
, len
);
6315 cmd
->cmd_complete
= addr_cmd_complete
;
6317 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6318 struct hci_conn_params
*params
;
6321 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
6322 err
= cmd
->cmd_complete(cmd
,
6323 MGMT_STATUS_INVALID_PARAMS
);
6324 mgmt_pending_remove(cmd
);
6328 if (cp
->addr
.type
== BDADDR_BREDR
) {
6329 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
6333 err
= cmd
->cmd_complete(cmd
,
6334 MGMT_STATUS_INVALID_PARAMS
);
6335 mgmt_pending_remove(cmd
);
6339 __hci_update_page_scan(&req
);
6341 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
6346 addr_type
= le_addr_type(cp
->addr
.type
);
6348 /* Kernel internally uses conn_params with resolvable private
6349 * address, but Remove Device allows only identity addresses.
6350 * Make sure it is enforced before calling
6351 * hci_conn_params_lookup.
6353 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
6354 err
= cmd
->cmd_complete(cmd
,
6355 MGMT_STATUS_INVALID_PARAMS
);
6356 mgmt_pending_remove(cmd
);
6360 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
6363 err
= cmd
->cmd_complete(cmd
,
6364 MGMT_STATUS_INVALID_PARAMS
);
6365 mgmt_pending_remove(cmd
);
6369 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
||
6370 params
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
) {
6371 err
= cmd
->cmd_complete(cmd
,
6372 MGMT_STATUS_INVALID_PARAMS
);
6373 mgmt_pending_remove(cmd
);
6377 list_del(¶ms
->action
);
6378 list_del(¶ms
->list
);
6380 __hci_update_background_scan(&req
);
6382 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
6384 struct hci_conn_params
*p
, *tmp
;
6385 struct bdaddr_list
*b
, *btmp
;
6387 if (cp
->addr
.type
) {
6388 err
= cmd
->cmd_complete(cmd
,
6389 MGMT_STATUS_INVALID_PARAMS
);
6390 mgmt_pending_remove(cmd
);
6394 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
6395 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
6400 __hci_update_page_scan(&req
);
6402 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
6403 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
6405 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
6406 if (p
->explicit_connect
) {
6407 p
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
6410 list_del(&p
->action
);
6415 BT_DBG("All LE connection parameters were removed");
6417 __hci_update_background_scan(&req
);
6421 err
= hci_req_run(&req
, remove_device_complete
);
6423 /* ENODATA means no HCI commands were needed (e.g. if
6424 * the adapter is powered off).
6426 if (err
== -ENODATA
)
6427 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
6428 mgmt_pending_remove(cmd
);
6432 hci_dev_unlock(hdev
);
6436 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6439 struct mgmt_cp_load_conn_param
*cp
= data
;
6440 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
6441 sizeof(struct mgmt_conn_param
));
6442 u16 param_count
, expected_len
;
6445 if (!lmp_le_capable(hdev
))
6446 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6447 MGMT_STATUS_NOT_SUPPORTED
);
6449 param_count
= __le16_to_cpu(cp
->param_count
);
6450 if (param_count
> max_param_count
) {
6451 BT_ERR("load_conn_param: too big param_count value %u",
6453 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6454 MGMT_STATUS_INVALID_PARAMS
);
6457 expected_len
= sizeof(*cp
) + param_count
*
6458 sizeof(struct mgmt_conn_param
);
6459 if (expected_len
!= len
) {
6460 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6462 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6463 MGMT_STATUS_INVALID_PARAMS
);
6466 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
6470 hci_conn_params_clear_disabled(hdev
);
6472 for (i
= 0; i
< param_count
; i
++) {
6473 struct mgmt_conn_param
*param
= &cp
->params
[i
];
6474 struct hci_conn_params
*hci_param
;
6475 u16 min
, max
, latency
, timeout
;
6478 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
6481 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
6482 addr_type
= ADDR_LE_DEV_PUBLIC
;
6483 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
6484 addr_type
= ADDR_LE_DEV_RANDOM
;
6486 BT_ERR("Ignoring invalid connection parameters");
6490 min
= le16_to_cpu(param
->min_interval
);
6491 max
= le16_to_cpu(param
->max_interval
);
6492 latency
= le16_to_cpu(param
->latency
);
6493 timeout
= le16_to_cpu(param
->timeout
);
6495 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6496 min
, max
, latency
, timeout
);
6498 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
6499 BT_ERR("Ignoring invalid connection parameters");
6503 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
6506 BT_ERR("Failed to add connection parameters");
6510 hci_param
->conn_min_interval
= min
;
6511 hci_param
->conn_max_interval
= max
;
6512 hci_param
->conn_latency
= latency
;
6513 hci_param
->supervision_timeout
= timeout
;
6516 hci_dev_unlock(hdev
);
6518 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
6522 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
6523 void *data
, u16 len
)
6525 struct mgmt_cp_set_external_config
*cp
= data
;
6529 BT_DBG("%s", hdev
->name
);
6531 if (hdev_is_powered(hdev
))
6532 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6533 MGMT_STATUS_REJECTED
);
6535 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
6536 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6537 MGMT_STATUS_INVALID_PARAMS
);
6539 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
6540 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6541 MGMT_STATUS_NOT_SUPPORTED
);
6546 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
6548 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
6550 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
6557 err
= new_options(hdev
, sk
);
6559 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
6560 mgmt_index_removed(hdev
);
6562 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
6563 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6564 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6566 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6568 set_bit(HCI_RAW
, &hdev
->flags
);
6569 mgmt_index_added(hdev
);
6574 hci_dev_unlock(hdev
);
6578 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
6579 void *data
, u16 len
)
6581 struct mgmt_cp_set_public_address
*cp
= data
;
6585 BT_DBG("%s", hdev
->name
);
6587 if (hdev_is_powered(hdev
))
6588 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6589 MGMT_STATUS_REJECTED
);
6591 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
6592 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6593 MGMT_STATUS_INVALID_PARAMS
);
6595 if (!hdev
->set_bdaddr
)
6596 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6597 MGMT_STATUS_NOT_SUPPORTED
);
6601 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
6602 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
6604 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
6611 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
6612 err
= new_options(hdev
, sk
);
6614 if (is_configured(hdev
)) {
6615 mgmt_index_removed(hdev
);
6617 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
6619 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6620 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6622 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6626 hci_dev_unlock(hdev
);
6630 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6633 eir
[eir_len
++] = sizeof(type
) + data_len
;
6634 eir
[eir_len
++] = type
;
6635 memcpy(&eir
[eir_len
], data
, data_len
);
6636 eir_len
+= data_len
;
6641 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
6642 u16 opcode
, struct sk_buff
*skb
)
6644 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
6645 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
6646 u8
*h192
, *r192
, *h256
, *r256
;
6647 struct mgmt_pending_cmd
*cmd
;
6651 BT_DBG("%s status %u", hdev
->name
, status
);
6653 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
6657 mgmt_cp
= cmd
->param
;
6660 status
= mgmt_status(status
);
6667 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
6668 struct hci_rp_read_local_oob_data
*rp
;
6670 if (skb
->len
!= sizeof(*rp
)) {
6671 status
= MGMT_STATUS_FAILED
;
6674 status
= MGMT_STATUS_SUCCESS
;
6675 rp
= (void *)skb
->data
;
6677 eir_len
= 5 + 18 + 18;
6684 struct hci_rp_read_local_oob_ext_data
*rp
;
6686 if (skb
->len
!= sizeof(*rp
)) {
6687 status
= MGMT_STATUS_FAILED
;
6690 status
= MGMT_STATUS_SUCCESS
;
6691 rp
= (void *)skb
->data
;
6693 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
6694 eir_len
= 5 + 18 + 18;
6698 eir_len
= 5 + 18 + 18 + 18 + 18;
6708 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
6715 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
6716 hdev
->dev_class
, 3);
6719 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6720 EIR_SSP_HASH_C192
, h192
, 16);
6721 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6722 EIR_SSP_RAND_R192
, r192
, 16);
6726 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6727 EIR_SSP_HASH_C256
, h256
, 16);
6728 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6729 EIR_SSP_RAND_R256
, r256
, 16);
6733 mgmt_rp
->type
= mgmt_cp
->type
;
6734 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
6736 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
6737 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
6738 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
6739 if (err
< 0 || status
)
6742 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6744 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6745 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
6746 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
6749 mgmt_pending_remove(cmd
);
6752 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
6753 struct mgmt_cp_read_local_oob_ext_data
*cp
)
6755 struct mgmt_pending_cmd
*cmd
;
6756 struct hci_request req
;
6759 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
6764 hci_req_init(&req
, hdev
);
6766 if (bredr_sc_enabled(hdev
))
6767 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
6769 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
6771 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
6773 mgmt_pending_remove(cmd
);
6780 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
6781 void *data
, u16 data_len
)
6783 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
6784 struct mgmt_rp_read_local_oob_ext_data
*rp
;
6787 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
6790 BT_DBG("%s", hdev
->name
);
6792 if (hdev_is_powered(hdev
)) {
6794 case BIT(BDADDR_BREDR
):
6795 status
= mgmt_bredr_support(hdev
);
6801 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6802 status
= mgmt_le_support(hdev
);
6806 eir_len
= 9 + 3 + 18 + 18 + 3;
6809 status
= MGMT_STATUS_INVALID_PARAMS
;
6814 status
= MGMT_STATUS_NOT_POWERED
;
6818 rp_len
= sizeof(*rp
) + eir_len
;
6819 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6830 case BIT(BDADDR_BREDR
):
6831 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
6832 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
6833 hci_dev_unlock(hdev
);
6837 status
= MGMT_STATUS_FAILED
;
6840 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6842 hdev
->dev_class
, 3);
6845 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6846 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
6847 smp_generate_oob(hdev
, hash
, rand
) < 0) {
6848 hci_dev_unlock(hdev
);
6849 status
= MGMT_STATUS_FAILED
;
6853 /* This should return the active RPA, but since the RPA
6854 * is only programmed on demand, it is really hard to fill
6855 * this in at the moment. For now disallow retrieving
6856 * local out-of-band data when privacy is in use.
6858 * Returning the identity address will not help here since
6859 * pairing happens before the identity resolving key is
6860 * known and thus the connection establishment happens
6861 * based on the RPA and not the identity address.
6863 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
6864 hci_dev_unlock(hdev
);
6865 status
= MGMT_STATUS_REJECTED
;
6869 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
6870 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
6871 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
6872 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
6873 memcpy(addr
, &hdev
->static_addr
, 6);
6876 memcpy(addr
, &hdev
->bdaddr
, 6);
6880 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
6881 addr
, sizeof(addr
));
6883 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
6888 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
6889 &role
, sizeof(role
));
6891 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
6892 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6894 hash
, sizeof(hash
));
6896 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6898 rand
, sizeof(rand
));
6901 flags
= get_adv_discov_flags(hdev
);
6903 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
6904 flags
|= LE_AD_NO_BREDR
;
6906 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
6907 &flags
, sizeof(flags
));
6911 hci_dev_unlock(hdev
);
6913 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6915 status
= MGMT_STATUS_SUCCESS
;
6918 rp
->type
= cp
->type
;
6919 rp
->eir_len
= cpu_to_le16(eir_len
);
6921 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
6922 status
, rp
, sizeof(*rp
) + eir_len
);
6923 if (err
< 0 || status
)
6926 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6927 rp
, sizeof(*rp
) + eir_len
,
6928 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
6936 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
6940 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
6941 flags
|= MGMT_ADV_FLAG_DISCOV
;
6942 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
6943 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
6945 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
)
6946 flags
|= MGMT_ADV_FLAG_TX_POWER
;
6951 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
6952 void *data
, u16 data_len
)
6954 struct mgmt_rp_read_adv_features
*rp
;
6958 struct adv_info
*adv_instance
;
6959 u32 supported_flags
;
6961 BT_DBG("%s", hdev
->name
);
6963 if (!lmp_le_capable(hdev
))
6964 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6965 MGMT_STATUS_REJECTED
);
6969 rp_len
= sizeof(*rp
);
6971 instance
= hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6973 rp_len
+= hdev
->adv_instance_cnt
;
6975 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6977 hci_dev_unlock(hdev
);
6981 supported_flags
= get_supported_adv_flags(hdev
);
6983 rp
->supported_flags
= cpu_to_le32(supported_flags
);
6984 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
6985 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
6986 rp
->max_instances
= HCI_MAX_ADV_INSTANCES
;
6990 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
6991 if (i
>= hdev
->adv_instance_cnt
)
6994 rp
->instance
[i
] = adv_instance
->instance
;
6997 rp
->num_instances
= hdev
->adv_instance_cnt
;
6999 rp
->num_instances
= 0;
7002 hci_dev_unlock(hdev
);
7004 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
7005 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
7012 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
7013 u8 len
, bool is_adv_data
)
7015 u8 max_len
= HCI_MAX_AD_LENGTH
;
7017 bool flags_managed
= false;
7018 bool tx_power_managed
= false;
7019 u32 flags_params
= MGMT_ADV_FLAG_DISCOV
| MGMT_ADV_FLAG_LIMITED_DISCOV
|
7020 MGMT_ADV_FLAG_MANAGED_FLAGS
;
7022 if (is_adv_data
&& (adv_flags
& flags_params
)) {
7023 flags_managed
= true;
7027 if (is_adv_data
&& (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
7028 tx_power_managed
= true;
7035 /* Make sure that the data is correctly formatted. */
7036 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
7039 if (flags_managed
&& data
[i
+ 1] == EIR_FLAGS
)
7042 if (tx_power_managed
&& data
[i
+ 1] == EIR_TX_POWER
)
7045 /* If the current field length would exceed the total data
7046 * length, then it's invalid.
7048 if (i
+ cur_len
>= len
)
7055 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7058 struct mgmt_pending_cmd
*cmd
;
7059 struct mgmt_cp_add_advertising
*cp
;
7060 struct mgmt_rp_add_advertising rp
;
7061 struct adv_info
*adv_instance
, *n
;
7064 BT_DBG("status %d", status
);
7068 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
7071 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
7073 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
7074 if (!adv_instance
->pending
)
7078 adv_instance
->pending
= false;
7082 instance
= adv_instance
->instance
;
7084 if (hdev
->cur_adv_instance
== instance
)
7085 cancel_adv_timeout(hdev
);
7087 hci_remove_adv_instance(hdev
, instance
);
7088 advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, instance
);
7095 rp
.instance
= cp
->instance
;
7098 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7099 mgmt_status(status
));
7101 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7102 mgmt_status(status
), &rp
, sizeof(rp
));
7104 mgmt_pending_remove(cmd
);
7107 hci_dev_unlock(hdev
);
7110 void mgmt_adv_timeout_expired(struct hci_dev
*hdev
)
7113 struct hci_request req
;
7115 hdev
->adv_instance_timeout
= 0;
7117 instance
= get_current_adv_instance(hdev
);
7118 if (instance
== 0x00)
7122 hci_req_init(&req
, hdev
);
7124 clear_adv_instance(hdev
, &req
, instance
, false);
7126 if (list_empty(&hdev
->adv_instances
))
7127 disable_advertising(&req
);
7129 if (!skb_queue_empty(&req
.cmd_q
))
7130 hci_req_run(&req
, NULL
);
7132 hci_dev_unlock(hdev
);
7135 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7136 void *data
, u16 data_len
)
7138 struct mgmt_cp_add_advertising
*cp
= data
;
7139 struct mgmt_rp_add_advertising rp
;
7141 u32 supported_flags
;
7143 u16 timeout
, duration
;
7144 unsigned int prev_instance_cnt
= hdev
->adv_instance_cnt
;
7145 u8 schedule_instance
= 0;
7146 struct adv_info
*next_instance
;
7148 struct mgmt_pending_cmd
*cmd
;
7149 struct hci_request req
;
7151 BT_DBG("%s", hdev
->name
);
7153 status
= mgmt_le_support(hdev
);
7155 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7158 if (data_len
!= sizeof(*cp
) + cp
->adv_data_len
+ cp
->scan_rsp_len
)
7159 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7160 MGMT_STATUS_INVALID_PARAMS
);
7162 flags
= __le32_to_cpu(cp
->flags
);
7163 timeout
= __le16_to_cpu(cp
->timeout
);
7164 duration
= __le16_to_cpu(cp
->duration
);
7166 /* The current implementation only supports a subset of the specified
7169 supported_flags
= get_supported_adv_flags(hdev
);
7170 if (flags
& ~supported_flags
)
7171 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7172 MGMT_STATUS_INVALID_PARAMS
);
7176 if (timeout
&& !hdev_is_powered(hdev
)) {
7177 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7178 MGMT_STATUS_REJECTED
);
7182 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7183 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7184 pending_find(MGMT_OP_SET_LE
, hdev
)) {
7185 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7190 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
7191 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
7192 cp
->scan_rsp_len
, false)) {
7193 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7194 MGMT_STATUS_INVALID_PARAMS
);
7198 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
7199 cp
->adv_data_len
, cp
->data
,
7201 cp
->data
+ cp
->adv_data_len
,
7204 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7205 MGMT_STATUS_FAILED
);
7209 /* Only trigger an advertising added event if a new instance was
7212 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
7213 advertising_added(sk
, hdev
, cp
->instance
);
7215 hci_dev_set_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
7217 if (hdev
->cur_adv_instance
== cp
->instance
) {
7218 /* If the currently advertised instance is being changed then
7219 * cancel the current advertising and schedule the next
7220 * instance. If there is only one instance then the overridden
7221 * advertising data will be visible right away.
7223 cancel_adv_timeout(hdev
);
7225 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
7227 schedule_instance
= next_instance
->instance
;
7228 } else if (!hdev
->adv_instance_timeout
) {
7229 /* Immediately advertise the new instance if no other
7230 * instance is currently being advertised.
7232 schedule_instance
= cp
->instance
;
7235 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7236 * there is no instance to be advertised then we have no HCI
7237 * communication to make. Simply return.
7239 if (!hdev_is_powered(hdev
) ||
7240 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7241 !schedule_instance
) {
7242 rp
.instance
= cp
->instance
;
7243 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7244 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7248 /* We're good to go, update advertising data, parameters, and start
7251 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
7258 hci_req_init(&req
, hdev
);
7260 err
= schedule_adv_instance(&req
, schedule_instance
, true);
7263 err
= hci_req_run(&req
, add_advertising_complete
);
7266 mgmt_pending_remove(cmd
);
7269 hci_dev_unlock(hdev
);
7274 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7277 struct mgmt_pending_cmd
*cmd
;
7278 struct mgmt_cp_remove_advertising
*cp
;
7279 struct mgmt_rp_remove_advertising rp
;
7281 BT_DBG("status %d", status
);
7285 /* A failure status here only means that we failed to disable
7286 * advertising. Otherwise, the advertising instance has been removed,
7287 * so report success.
7289 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
7294 rp
.instance
= cp
->instance
;
7296 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
7298 mgmt_pending_remove(cmd
);
7301 hci_dev_unlock(hdev
);
7304 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7305 void *data
, u16 data_len
)
7307 struct mgmt_cp_remove_advertising
*cp
= data
;
7308 struct mgmt_rp_remove_advertising rp
;
7309 struct mgmt_pending_cmd
*cmd
;
7310 struct hci_request req
;
7313 BT_DBG("%s", hdev
->name
);
7317 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
7318 err
= mgmt_cmd_status(sk
, hdev
->id
,
7319 MGMT_OP_REMOVE_ADVERTISING
,
7320 MGMT_STATUS_INVALID_PARAMS
);
7324 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7325 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7326 pending_find(MGMT_OP_SET_LE
, hdev
)) {
7327 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7332 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
)) {
7333 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7334 MGMT_STATUS_INVALID_PARAMS
);
7338 hci_req_init(&req
, hdev
);
7340 clear_adv_instance(hdev
, &req
, cp
->instance
, true);
7342 if (list_empty(&hdev
->adv_instances
))
7343 disable_advertising(&req
);
7345 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7346 * flag is set or the device isn't powered then we have no HCI
7347 * communication to make. Simply return.
7349 if (skb_queue_empty(&req
.cmd_q
) ||
7350 !hdev_is_powered(hdev
) ||
7351 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
7352 rp
.instance
= cp
->instance
;
7353 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7354 MGMT_OP_REMOVE_ADVERTISING
,
7355 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7359 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
7366 err
= hci_req_run(&req
, remove_advertising_complete
);
7368 mgmt_pending_remove(cmd
);
7371 hci_dev_unlock(hdev
);
7376 static const struct hci_mgmt_handler mgmt_handlers
[] = {
7377 { NULL
}, /* 0x0000 (no command) */
7378 { read_version
, MGMT_READ_VERSION_SIZE
,
7380 HCI_MGMT_UNTRUSTED
},
7381 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
7383 HCI_MGMT_UNTRUSTED
},
7384 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
7386 HCI_MGMT_UNTRUSTED
},
7387 { read_controller_info
, MGMT_READ_INFO_SIZE
,
7388 HCI_MGMT_UNTRUSTED
},
7389 { set_powered
, MGMT_SETTING_SIZE
},
7390 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
7391 { set_connectable
, MGMT_SETTING_SIZE
},
7392 { set_fast_connectable
, MGMT_SETTING_SIZE
},
7393 { set_bondable
, MGMT_SETTING_SIZE
},
7394 { set_link_security
, MGMT_SETTING_SIZE
},
7395 { set_ssp
, MGMT_SETTING_SIZE
},
7396 { set_hs
, MGMT_SETTING_SIZE
},
7397 { set_le
, MGMT_SETTING_SIZE
},
7398 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
7399 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
7400 { add_uuid
, MGMT_ADD_UUID_SIZE
},
7401 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
7402 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
7404 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
7406 { disconnect
, MGMT_DISCONNECT_SIZE
},
7407 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
7408 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
7409 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
7410 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
7411 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
7412 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
7413 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
7414 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
7415 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
7416 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
7417 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
7418 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
7419 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
7421 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
7422 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
7423 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
7424 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
7425 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
7426 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
7427 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
7428 { set_advertising
, MGMT_SETTING_SIZE
},
7429 { set_bredr
, MGMT_SETTING_SIZE
},
7430 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
7431 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
7432 { set_secure_conn
, MGMT_SETTING_SIZE
},
7433 { set_debug_keys
, MGMT_SETTING_SIZE
},
7434 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
7435 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
7437 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
7438 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
7439 { add_device
, MGMT_ADD_DEVICE_SIZE
},
7440 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
7441 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
7443 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
7445 HCI_MGMT_UNTRUSTED
},
7446 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
7447 HCI_MGMT_UNCONFIGURED
|
7448 HCI_MGMT_UNTRUSTED
},
7449 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
7450 HCI_MGMT_UNCONFIGURED
},
7451 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
7452 HCI_MGMT_UNCONFIGURED
},
7453 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
7455 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
7456 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
7458 HCI_MGMT_UNTRUSTED
},
7459 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
7460 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
7462 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
7465 void mgmt_index_added(struct hci_dev
*hdev
)
7467 struct mgmt_ev_ext_index ev
;
7469 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7472 switch (hdev
->dev_type
) {
7474 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7475 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
7476 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7479 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
7480 HCI_MGMT_INDEX_EVENTS
);
7493 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
7494 HCI_MGMT_EXT_INDEX_EVENTS
);
7497 void mgmt_index_removed(struct hci_dev
*hdev
)
7499 struct mgmt_ev_ext_index ev
;
7500 u8 status
= MGMT_STATUS_INVALID_INDEX
;
7502 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7505 switch (hdev
->dev_type
) {
7507 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7509 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7510 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
7511 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7514 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
7515 HCI_MGMT_INDEX_EVENTS
);
7528 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
7529 HCI_MGMT_EXT_INDEX_EVENTS
);
7532 /* This function requires the caller holds hdev->lock */
7533 static void restart_le_actions(struct hci_request
*req
)
7535 struct hci_dev
*hdev
= req
->hdev
;
7536 struct hci_conn_params
*p
;
7538 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
7539 /* Needed for AUTO_OFF case where might not "really"
7540 * have been powered off.
7542 list_del_init(&p
->action
);
7544 switch (p
->auto_connect
) {
7545 case HCI_AUTO_CONN_DIRECT
:
7546 case HCI_AUTO_CONN_ALWAYS
:
7547 list_add(&p
->action
, &hdev
->pend_le_conns
);
7549 case HCI_AUTO_CONN_REPORT
:
7550 list_add(&p
->action
, &hdev
->pend_le_reports
);
7557 __hci_update_background_scan(req
);
7560 static void powered_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
7562 struct cmd_lookup match
= { NULL
, hdev
};
7564 BT_DBG("status 0x%02x", status
);
7567 /* Register the available SMP channels (BR/EDR and LE) only
7568 * when successfully powering on the controller. This late
7569 * registration is required so that LE SMP can clearly
7570 * decide if the public address or static address is used.
7577 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7579 new_settings(hdev
, match
.sk
);
7581 hci_dev_unlock(hdev
);
7587 static int powered_update_hci(struct hci_dev
*hdev
)
7589 struct hci_request req
;
7590 struct adv_info
*adv_instance
;
7593 hci_req_init(&req
, hdev
);
7595 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
7596 !lmp_host_ssp_capable(hdev
)) {
7599 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
7601 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
7604 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
,
7605 sizeof(support
), &support
);
7609 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7610 lmp_bredr_capable(hdev
)) {
7611 struct hci_cp_write_le_host_supported cp
;
7616 /* Check first if we already have the right
7617 * host state (host features set)
7619 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
7620 cp
.simul
!= lmp_host_le_br_capable(hdev
))
7621 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
7625 if (lmp_le_capable(hdev
)) {
7626 /* Make sure the controller has a good default for
7627 * advertising data. This also applies to the case
7628 * where BR/EDR was toggled during the AUTO_OFF phase.
7630 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7631 (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7632 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))) {
7633 update_adv_data(&req
);
7634 update_scan_rsp_data(&req
);
7637 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
7638 hdev
->cur_adv_instance
== 0x00 &&
7639 !list_empty(&hdev
->adv_instances
)) {
7640 adv_instance
= list_first_entry(&hdev
->adv_instances
,
7641 struct adv_info
, list
);
7642 hdev
->cur_adv_instance
= adv_instance
->instance
;
7645 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7646 enable_advertising(&req
);
7647 else if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
7648 hdev
->cur_adv_instance
)
7649 schedule_adv_instance(&req
, hdev
->cur_adv_instance
,
7652 restart_le_actions(&req
);
7655 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
7656 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
7657 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
7658 sizeof(link_sec
), &link_sec
);
7660 if (lmp_bredr_capable(hdev
)) {
7661 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
7662 write_fast_connectable(&req
, true);
7664 write_fast_connectable(&req
, false);
7665 __hci_update_page_scan(&req
);
7671 return hci_req_run(&req
, powered_complete
);
7674 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
7676 struct cmd_lookup match
= { NULL
, hdev
};
7677 u8 status
, zero_cod
[] = { 0, 0, 0 };
7680 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
7684 if (powered_update_hci(hdev
) == 0)
7687 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
7692 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7694 /* If the power off is because of hdev unregistration let
7695 * use the appropriate INVALID_INDEX status. Otherwise use
7696 * NOT_POWERED. We cover both scenarios here since later in
7697 * mgmt_index_removed() any hci_conn callbacks will have already
7698 * been triggered, potentially causing misleading DISCONNECTED
7701 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
7702 status
= MGMT_STATUS_INVALID_INDEX
;
7704 status
= MGMT_STATUS_NOT_POWERED
;
7706 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7708 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
7709 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
7710 zero_cod
, sizeof(zero_cod
), NULL
);
7713 err
= new_settings(hdev
, match
.sk
);
7721 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
7723 struct mgmt_pending_cmd
*cmd
;
7726 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7730 if (err
== -ERFKILL
)
7731 status
= MGMT_STATUS_RFKILLED
;
7733 status
= MGMT_STATUS_FAILED
;
7735 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
7737 mgmt_pending_remove(cmd
);
7740 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
7742 struct hci_request req
;
7746 /* When discoverable timeout triggers, then just make sure
7747 * the limited discoverable flag is cleared. Even in the case
7748 * of a timeout triggered from general discoverable, it is
7749 * safe to unconditionally clear the flag.
7751 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
7752 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
7754 hci_req_init(&req
, hdev
);
7755 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
7756 u8 scan
= SCAN_PAGE
;
7757 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
7758 sizeof(scan
), &scan
);
7762 /* Advertising instances don't use the global discoverable setting, so
7763 * only update AD if advertising was enabled using Set Advertising.
7765 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7766 update_adv_data(&req
);
7768 hci_req_run(&req
, NULL
);
7770 hdev
->discov_timeout
= 0;
7772 new_settings(hdev
, NULL
);
7774 hci_dev_unlock(hdev
);
7777 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
7780 struct mgmt_ev_new_link_key ev
;
7782 memset(&ev
, 0, sizeof(ev
));
7784 ev
.store_hint
= persistent
;
7785 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7786 ev
.key
.addr
.type
= BDADDR_BREDR
;
7787 ev
.key
.type
= key
->type
;
7788 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
7789 ev
.key
.pin_len
= key
->pin_len
;
7791 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7794 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
7796 switch (ltk
->type
) {
7799 if (ltk
->authenticated
)
7800 return MGMT_LTK_AUTHENTICATED
;
7801 return MGMT_LTK_UNAUTHENTICATED
;
7803 if (ltk
->authenticated
)
7804 return MGMT_LTK_P256_AUTH
;
7805 return MGMT_LTK_P256_UNAUTH
;
7806 case SMP_LTK_P256_DEBUG
:
7807 return MGMT_LTK_P256_DEBUG
;
7810 return MGMT_LTK_UNAUTHENTICATED
;
7813 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
7815 struct mgmt_ev_new_long_term_key ev
;
7817 memset(&ev
, 0, sizeof(ev
));
7819 /* Devices using resolvable or non-resolvable random addresses
7820 * without providing an identity resolving key don't require
7821 * to store long term keys. Their addresses will change the
7824 * Only when a remote device provides an identity address
7825 * make sure the long term key is stored. If the remote
7826 * identity is known, the long term keys are internally
7827 * mapped to the identity address. So allow static random
7828 * and public addresses here.
7830 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7831 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7832 ev
.store_hint
= 0x00;
7834 ev
.store_hint
= persistent
;
7836 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7837 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
7838 ev
.key
.type
= mgmt_ltk_type(key
);
7839 ev
.key
.enc_size
= key
->enc_size
;
7840 ev
.key
.ediv
= key
->ediv
;
7841 ev
.key
.rand
= key
->rand
;
7843 if (key
->type
== SMP_LTK
)
7846 /* Make sure we copy only the significant bytes based on the
7847 * encryption key size, and set the rest of the value to zeroes.
7849 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
7850 memset(ev
.key
.val
+ key
->enc_size
, 0,
7851 sizeof(ev
.key
.val
) - key
->enc_size
);
7853 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7856 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
, bool persistent
)
7858 struct mgmt_ev_new_irk ev
;
7860 memset(&ev
, 0, sizeof(ev
));
7862 ev
.store_hint
= persistent
;
7864 bacpy(&ev
.rpa
, &irk
->rpa
);
7865 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
7866 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
7867 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
7869 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7872 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
7875 struct mgmt_ev_new_csrk ev
;
7877 memset(&ev
, 0, sizeof(ev
));
7879 /* Devices using resolvable or non-resolvable random addresses
7880 * without providing an identity resolving key don't require
7881 * to store signature resolving keys. Their addresses will change
7882 * the next time around.
7884 * Only when a remote device provides an identity address
7885 * make sure the signature resolving key is stored. So allow
7886 * static random and public addresses here.
7888 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7889 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7890 ev
.store_hint
= 0x00;
7892 ev
.store_hint
= persistent
;
7894 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
7895 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
7896 ev
.key
.type
= csrk
->type
;
7897 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
7899 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7902 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7903 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
7904 u16 max_interval
, u16 latency
, u16 timeout
)
7906 struct mgmt_ev_new_conn_param ev
;
7908 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
7911 memset(&ev
, 0, sizeof(ev
));
7912 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7913 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
7914 ev
.store_hint
= store_hint
;
7915 ev
.min_interval
= cpu_to_le16(min_interval
);
7916 ev
.max_interval
= cpu_to_le16(max_interval
);
7917 ev
.latency
= cpu_to_le16(latency
);
7918 ev
.timeout
= cpu_to_le16(timeout
);
7920 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
7923 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
7924 u32 flags
, u8
*name
, u8 name_len
)
7927 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
7930 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
7931 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7933 ev
->flags
= __cpu_to_le32(flags
);
7935 /* We must ensure that the EIR Data fields are ordered and
7936 * unique. Keep it simple for now and avoid the problem by not
7937 * adding any BR/EDR data to the LE adv.
7939 if (conn
->le_adv_data_len
> 0) {
7940 memcpy(&ev
->eir
[eir_len
],
7941 conn
->le_adv_data
, conn
->le_adv_data_len
);
7942 eir_len
= conn
->le_adv_data_len
;
7945 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
7948 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
7949 eir_len
= eir_append_data(ev
->eir
, eir_len
,
7951 conn
->dev_class
, 3);
7954 ev
->eir_len
= cpu_to_le16(eir_len
);
7956 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
7957 sizeof(*ev
) + eir_len
, NULL
);
7960 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7962 struct sock
**sk
= data
;
7964 cmd
->cmd_complete(cmd
, 0);
7969 mgmt_pending_remove(cmd
);
7972 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7974 struct hci_dev
*hdev
= data
;
7975 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
7977 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
7979 cmd
->cmd_complete(cmd
, 0);
7980 mgmt_pending_remove(cmd
);
7983 bool mgmt_powering_down(struct hci_dev
*hdev
)
7985 struct mgmt_pending_cmd
*cmd
;
7986 struct mgmt_mode
*cp
;
7988 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7999 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8000 u8 link_type
, u8 addr_type
, u8 reason
,
8001 bool mgmt_connected
)
8003 struct mgmt_ev_device_disconnected ev
;
8004 struct sock
*sk
= NULL
;
8006 /* The connection is still in hci_conn_hash so test for 1
8007 * instead of 0 to know if this is the last one.
8009 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
8010 cancel_delayed_work(&hdev
->power_off
);
8011 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
8014 if (!mgmt_connected
)
8017 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
8020 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
8022 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8023 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8026 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
8031 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
8035 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8036 u8 link_type
, u8 addr_type
, u8 status
)
8038 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
8039 struct mgmt_cp_disconnect
*cp
;
8040 struct mgmt_pending_cmd
*cmd
;
8042 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
8045 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
8051 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
8054 if (cp
->addr
.type
!= bdaddr_type
)
8057 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8058 mgmt_pending_remove(cmd
);
8061 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8062 u8 addr_type
, u8 status
)
8064 struct mgmt_ev_connect_failed ev
;
8066 /* The connection is still in hci_conn_hash so test for 1
8067 * instead of 0 to know if this is the last one.
8069 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
8070 cancel_delayed_work(&hdev
->power_off
);
8071 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
8074 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8075 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8076 ev
.status
= mgmt_status(status
);
8078 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
8081 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
8083 struct mgmt_ev_pin_code_request ev
;
8085 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8086 ev
.addr
.type
= BDADDR_BREDR
;
8089 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
8092 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8095 struct mgmt_pending_cmd
*cmd
;
8097 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
8101 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8102 mgmt_pending_remove(cmd
);
8105 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8108 struct mgmt_pending_cmd
*cmd
;
8110 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
8114 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8115 mgmt_pending_remove(cmd
);
8118 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8119 u8 link_type
, u8 addr_type
, u32 value
,
8122 struct mgmt_ev_user_confirm_request ev
;
8124 BT_DBG("%s", hdev
->name
);
8126 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8127 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8128 ev
.confirm_hint
= confirm_hint
;
8129 ev
.value
= cpu_to_le32(value
);
8131 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
8135 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8136 u8 link_type
, u8 addr_type
)
8138 struct mgmt_ev_user_passkey_request ev
;
8140 BT_DBG("%s", hdev
->name
);
8142 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8143 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8145 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
8149 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8150 u8 link_type
, u8 addr_type
, u8 status
,
8153 struct mgmt_pending_cmd
*cmd
;
8155 cmd
= pending_find(opcode
, hdev
);
8159 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8160 mgmt_pending_remove(cmd
);
8165 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8166 u8 link_type
, u8 addr_type
, u8 status
)
8168 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8169 status
, MGMT_OP_USER_CONFIRM_REPLY
);
8172 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8173 u8 link_type
, u8 addr_type
, u8 status
)
8175 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8177 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
8180 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8181 u8 link_type
, u8 addr_type
, u8 status
)
8183 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8184 status
, MGMT_OP_USER_PASSKEY_REPLY
);
8187 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8188 u8 link_type
, u8 addr_type
, u8 status
)
8190 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8192 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
8195 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8196 u8 link_type
, u8 addr_type
, u32 passkey
,
8199 struct mgmt_ev_passkey_notify ev
;
8201 BT_DBG("%s", hdev
->name
);
8203 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8204 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8205 ev
.passkey
= __cpu_to_le32(passkey
);
8206 ev
.entered
= entered
;
8208 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
8211 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
8213 struct mgmt_ev_auth_failed ev
;
8214 struct mgmt_pending_cmd
*cmd
;
8215 u8 status
= mgmt_status(hci_status
);
8217 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
8218 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
8221 cmd
= find_pairing(conn
);
8223 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
8224 cmd
? cmd
->sk
: NULL
);
8227 cmd
->cmd_complete(cmd
, status
);
8228 mgmt_pending_remove(cmd
);
8232 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
8234 struct cmd_lookup match
= { NULL
, hdev
};
8238 u8 mgmt_err
= mgmt_status(status
);
8239 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
8240 cmd_status_rsp
, &mgmt_err
);
8244 if (test_bit(HCI_AUTH
, &hdev
->flags
))
8245 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
8247 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
8249 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
8253 new_settings(hdev
, match
.sk
);
8259 static void clear_eir(struct hci_request
*req
)
8261 struct hci_dev
*hdev
= req
->hdev
;
8262 struct hci_cp_write_eir cp
;
8264 if (!lmp_ext_inq_capable(hdev
))
8267 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
8269 memset(&cp
, 0, sizeof(cp
));
8271 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
8274 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
8276 struct cmd_lookup match
= { NULL
, hdev
};
8277 struct hci_request req
;
8278 bool changed
= false;
8281 u8 mgmt_err
= mgmt_status(status
);
8283 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
8285 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8286 new_settings(hdev
, NULL
);
8289 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
8295 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
8297 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
8299 changed
= hci_dev_test_and_clear_flag(hdev
,
8302 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8305 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
8308 new_settings(hdev
, match
.sk
);
8313 hci_req_init(&req
, hdev
);
8315 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
8316 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
8317 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
8318 sizeof(enable
), &enable
);
8324 hci_req_run(&req
, NULL
);
8327 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
8329 struct cmd_lookup
*match
= data
;
8331 if (match
->sk
== NULL
) {
8332 match
->sk
= cmd
->sk
;
8333 sock_hold(match
->sk
);
8337 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
8340 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
8342 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
8343 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
8344 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
8347 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
8348 dev_class
, 3, NULL
);
8354 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
8356 struct mgmt_cp_set_local_name ev
;
8357 struct mgmt_pending_cmd
*cmd
;
8362 memset(&ev
, 0, sizeof(ev
));
8363 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
8364 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
8366 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
8368 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
8370 /* If this is a HCI command related to powering on the
8371 * HCI dev don't send any mgmt signals.
8373 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
8377 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
8378 cmd
? cmd
->sk
: NULL
);
8381 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
8385 for (i
= 0; i
< uuid_count
; i
++) {
8386 if (!memcmp(uuid
, uuids
[i
], 16))
8393 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
8397 while (parsed
< eir_len
) {
8398 u8 field_len
= eir
[0];
8405 if (eir_len
- parsed
< field_len
+ 1)
8409 case EIR_UUID16_ALL
:
8410 case EIR_UUID16_SOME
:
8411 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
8412 memcpy(uuid
, bluetooth_base_uuid
, 16);
8413 uuid
[13] = eir
[i
+ 3];
8414 uuid
[12] = eir
[i
+ 2];
8415 if (has_uuid(uuid
, uuid_count
, uuids
))
8419 case EIR_UUID32_ALL
:
8420 case EIR_UUID32_SOME
:
8421 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
8422 memcpy(uuid
, bluetooth_base_uuid
, 16);
8423 uuid
[15] = eir
[i
+ 5];
8424 uuid
[14] = eir
[i
+ 4];
8425 uuid
[13] = eir
[i
+ 3];
8426 uuid
[12] = eir
[i
+ 2];
8427 if (has_uuid(uuid
, uuid_count
, uuids
))
8431 case EIR_UUID128_ALL
:
8432 case EIR_UUID128_SOME
:
8433 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
8434 memcpy(uuid
, eir
+ i
+ 2, 16);
8435 if (has_uuid(uuid
, uuid_count
, uuids
))
8441 parsed
+= field_len
+ 1;
8442 eir
+= field_len
+ 1;
8448 static void restart_le_scan(struct hci_dev
*hdev
)
8450 /* If controller is not scanning we are done. */
8451 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
8454 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
8455 hdev
->discovery
.scan_start
+
8456 hdev
->discovery
.scan_duration
))
8459 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_restart
,
8460 DISCOV_LE_RESTART_DELAY
);
8463 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
8464 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8466 /* If a RSSI threshold has been specified, and
8467 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8468 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8469 * is set, let it through for further processing, as we might need to
8472 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8473 * the results are also dropped.
8475 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8476 (rssi
== HCI_RSSI_INVALID
||
8477 (rssi
< hdev
->discovery
.rssi
&&
8478 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
8481 if (hdev
->discovery
.uuid_count
!= 0) {
8482 /* If a list of UUIDs is provided in filter, results with no
8483 * matching UUID should be dropped.
8485 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
8486 hdev
->discovery
.uuids
) &&
8487 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
8488 hdev
->discovery
.uuid_count
,
8489 hdev
->discovery
.uuids
))
8493 /* If duplicate filtering does not report RSSI changes, then restart
8494 * scanning to ensure updated result with updated RSSI values.
8496 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
8497 restart_le_scan(hdev
);
8499 /* Validate RSSI value against the RSSI threshold once more. */
8500 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8501 rssi
< hdev
->discovery
.rssi
)
8508 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8509 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
8510 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8513 struct mgmt_ev_device_found
*ev
= (void *)buf
;
8516 /* Don't send events for a non-kernel initiated discovery. With
8517 * LE one exception is if we have pend_le_reports > 0 in which
8518 * case we're doing passive scanning and want these events.
8520 if (!hci_discovery_active(hdev
)) {
8521 if (link_type
== ACL_LINK
)
8523 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
8527 if (hdev
->discovery
.result_filtering
) {
8528 /* We are using service discovery */
8529 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
8534 /* Make sure that the buffer is big enough. The 5 extra bytes
8535 * are for the potential CoD field.
8537 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
8540 memset(buf
, 0, sizeof(buf
));
8542 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8543 * RSSI value was reported as 0 when not available. This behavior
8544 * is kept when using device discovery. This is required for full
8545 * backwards compatibility with the API.
8547 * However when using service discovery, the value 127 will be
8548 * returned when the RSSI is not available.
8550 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
8551 link_type
== ACL_LINK
)
8554 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8555 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8557 ev
->flags
= cpu_to_le32(flags
);
8560 /* Copy EIR or advertising data into event */
8561 memcpy(ev
->eir
, eir
, eir_len
);
8563 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
8564 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
8567 if (scan_rsp_len
> 0)
8568 /* Append scan response data to event */
8569 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
8571 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
8572 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
8574 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
8577 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8578 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
8580 struct mgmt_ev_device_found
*ev
;
8581 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
8584 ev
= (struct mgmt_ev_device_found
*) buf
;
8586 memset(buf
, 0, sizeof(buf
));
8588 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8589 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8592 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
8595 ev
->eir_len
= cpu_to_le16(eir_len
);
8597 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
8600 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
8602 struct mgmt_ev_discovering ev
;
8604 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
8606 memset(&ev
, 0, sizeof(ev
));
8607 ev
.type
= hdev
->discovery
.type
;
8608 ev
.discovering
= discovering
;
8610 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
8613 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
8615 BT_DBG("%s status %u", hdev
->name
, status
);
8618 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
8620 struct hci_request req
;
8623 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
8624 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
8627 instance
= get_current_adv_instance(hdev
);
8629 hci_req_init(&req
, hdev
);
8632 schedule_adv_instance(&req
, instance
, true);
8634 update_adv_data(&req
);
8635 update_scan_rsp_data(&req
);
8636 enable_advertising(&req
);
8639 hci_req_run(&req
, adv_enable_complete
);
8642 static struct hci_mgmt_chan chan
= {
8643 .channel
= HCI_CHANNEL_CONTROL
,
8644 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
8645 .handlers
= mgmt_handlers
,
8646 .hdev_init
= mgmt_init_hdev
,
8651 return hci_mgmt_chan_register(&chan
);
8654 void mgmt_exit(void)
8656 hci_mgmt_chan_unregister(&chan
);