2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 10
43 static const u16 mgmt_commands
[] = {
44 MGMT_OP_READ_INDEX_LIST
,
47 MGMT_OP_SET_DISCOVERABLE
,
48 MGMT_OP_SET_CONNECTABLE
,
49 MGMT_OP_SET_FAST_CONNECTABLE
,
51 MGMT_OP_SET_LINK_SECURITY
,
55 MGMT_OP_SET_DEV_CLASS
,
56 MGMT_OP_SET_LOCAL_NAME
,
59 MGMT_OP_LOAD_LINK_KEYS
,
60 MGMT_OP_LOAD_LONG_TERM_KEYS
,
62 MGMT_OP_GET_CONNECTIONS
,
63 MGMT_OP_PIN_CODE_REPLY
,
64 MGMT_OP_PIN_CODE_NEG_REPLY
,
65 MGMT_OP_SET_IO_CAPABILITY
,
67 MGMT_OP_CANCEL_PAIR_DEVICE
,
68 MGMT_OP_UNPAIR_DEVICE
,
69 MGMT_OP_USER_CONFIRM_REPLY
,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
71 MGMT_OP_USER_PASSKEY_REPLY
,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
73 MGMT_OP_READ_LOCAL_OOB_DATA
,
74 MGMT_OP_ADD_REMOTE_OOB_DATA
,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
76 MGMT_OP_START_DISCOVERY
,
77 MGMT_OP_STOP_DISCOVERY
,
80 MGMT_OP_UNBLOCK_DEVICE
,
81 MGMT_OP_SET_DEVICE_ID
,
82 MGMT_OP_SET_ADVERTISING
,
84 MGMT_OP_SET_STATIC_ADDRESS
,
85 MGMT_OP_SET_SCAN_PARAMS
,
86 MGMT_OP_SET_SECURE_CONN
,
87 MGMT_OP_SET_DEBUG_KEYS
,
90 MGMT_OP_GET_CONN_INFO
,
91 MGMT_OP_GET_CLOCK_INFO
,
93 MGMT_OP_REMOVE_DEVICE
,
94 MGMT_OP_LOAD_CONN_PARAM
,
95 MGMT_OP_READ_UNCONF_INDEX_LIST
,
96 MGMT_OP_READ_CONFIG_INFO
,
97 MGMT_OP_SET_EXTERNAL_CONFIG
,
98 MGMT_OP_SET_PUBLIC_ADDRESS
,
99 MGMT_OP_START_SERVICE_DISCOVERY
,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
101 MGMT_OP_READ_EXT_INDEX_LIST
,
102 MGMT_OP_READ_ADV_FEATURES
,
103 MGMT_OP_ADD_ADVERTISING
,
104 MGMT_OP_REMOVE_ADVERTISING
,
107 static const u16 mgmt_events
[] = {
108 MGMT_EV_CONTROLLER_ERROR
,
110 MGMT_EV_INDEX_REMOVED
,
111 MGMT_EV_NEW_SETTINGS
,
112 MGMT_EV_CLASS_OF_DEV_CHANGED
,
113 MGMT_EV_LOCAL_NAME_CHANGED
,
114 MGMT_EV_NEW_LINK_KEY
,
115 MGMT_EV_NEW_LONG_TERM_KEY
,
116 MGMT_EV_DEVICE_CONNECTED
,
117 MGMT_EV_DEVICE_DISCONNECTED
,
118 MGMT_EV_CONNECT_FAILED
,
119 MGMT_EV_PIN_CODE_REQUEST
,
120 MGMT_EV_USER_CONFIRM_REQUEST
,
121 MGMT_EV_USER_PASSKEY_REQUEST
,
123 MGMT_EV_DEVICE_FOUND
,
125 MGMT_EV_DEVICE_BLOCKED
,
126 MGMT_EV_DEVICE_UNBLOCKED
,
127 MGMT_EV_DEVICE_UNPAIRED
,
128 MGMT_EV_PASSKEY_NOTIFY
,
131 MGMT_EV_DEVICE_ADDED
,
132 MGMT_EV_DEVICE_REMOVED
,
133 MGMT_EV_NEW_CONN_PARAM
,
134 MGMT_EV_UNCONF_INDEX_ADDED
,
135 MGMT_EV_UNCONF_INDEX_REMOVED
,
136 MGMT_EV_NEW_CONFIG_OPTIONS
,
137 MGMT_EV_EXT_INDEX_ADDED
,
138 MGMT_EV_EXT_INDEX_REMOVED
,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
140 MGMT_EV_ADVERTISING_ADDED
,
141 MGMT_EV_ADVERTISING_REMOVED
,
144 static const u16 mgmt_untrusted_commands
[] = {
145 MGMT_OP_READ_INDEX_LIST
,
147 MGMT_OP_READ_UNCONF_INDEX_LIST
,
148 MGMT_OP_READ_CONFIG_INFO
,
149 MGMT_OP_READ_EXT_INDEX_LIST
,
152 static const u16 mgmt_untrusted_events
[] = {
154 MGMT_EV_INDEX_REMOVED
,
155 MGMT_EV_NEW_SETTINGS
,
156 MGMT_EV_CLASS_OF_DEV_CHANGED
,
157 MGMT_EV_LOCAL_NAME_CHANGED
,
158 MGMT_EV_UNCONF_INDEX_ADDED
,
159 MGMT_EV_UNCONF_INDEX_REMOVED
,
160 MGMT_EV_NEW_CONFIG_OPTIONS
,
161 MGMT_EV_EXT_INDEX_ADDED
,
162 MGMT_EV_EXT_INDEX_REMOVED
,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table
[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
175 MGMT_STATUS_FAILED
, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
180 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY
, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED
, /* Rejected Security */
187 MGMT_STATUS_REJECTED
, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
195 MGMT_STATUS_BUSY
, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED
, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED
, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED
, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED
, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY
, /* Role Switch Pending */
221 MGMT_STATUS_FAILED
, /* Slot Violation */
222 MGMT_STATUS_FAILED
, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY
, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
235 static u8
mgmt_status(u8 hci_status
)
237 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
238 return mgmt_status_table
[hci_status
];
240 return MGMT_STATUS_FAILED
;
243 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
246 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
250 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
251 u16 len
, int flag
, struct sock
*skip_sk
)
253 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
257 static int mgmt_generic_event(u16 event
, struct hci_dev
*hdev
, void *data
,
258 u16 len
, struct sock
*skip_sk
)
260 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
261 HCI_MGMT_GENERIC_EVENTS
, skip_sk
);
264 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
265 struct sock
*skip_sk
)
267 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
268 HCI_SOCK_TRUSTED
, skip_sk
);
271 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
274 struct mgmt_rp_read_version rp
;
276 BT_DBG("sock %p", sk
);
278 rp
.version
= MGMT_VERSION
;
279 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
281 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
285 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
288 struct mgmt_rp_read_commands
*rp
;
289 u16 num_commands
, num_events
;
293 BT_DBG("sock %p", sk
);
295 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
296 num_commands
= ARRAY_SIZE(mgmt_commands
);
297 num_events
= ARRAY_SIZE(mgmt_events
);
299 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
300 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
303 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
305 rp
= kmalloc(rp_size
, GFP_KERNEL
);
309 rp
->num_commands
= cpu_to_le16(num_commands
);
310 rp
->num_events
= cpu_to_le16(num_events
);
312 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
313 __le16
*opcode
= rp
->opcodes
;
315 for (i
= 0; i
< num_commands
; i
++, opcode
++)
316 put_unaligned_le16(mgmt_commands
[i
], opcode
);
318 for (i
= 0; i
< num_events
; i
++, opcode
++)
319 put_unaligned_le16(mgmt_events
[i
], opcode
);
321 __le16
*opcode
= rp
->opcodes
;
323 for (i
= 0; i
< num_commands
; i
++, opcode
++)
324 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
326 for (i
= 0; i
< num_events
; i
++, opcode
++)
327 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
330 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
337 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
340 struct mgmt_rp_read_index_list
*rp
;
346 BT_DBG("sock %p", sk
);
348 read_lock(&hci_dev_list_lock
);
351 list_for_each_entry(d
, &hci_dev_list
, list
) {
352 if (d
->dev_type
== HCI_BREDR
&&
353 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
357 rp_len
= sizeof(*rp
) + (2 * count
);
358 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
360 read_unlock(&hci_dev_list_lock
);
365 list_for_each_entry(d
, &hci_dev_list
, list
) {
366 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
367 hci_dev_test_flag(d
, HCI_CONFIG
) ||
368 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
377 if (d
->dev_type
== HCI_BREDR
&&
378 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
379 rp
->index
[count
++] = cpu_to_le16(d
->id
);
380 BT_DBG("Added hci%u", d
->id
);
384 rp
->num_controllers
= cpu_to_le16(count
);
385 rp_len
= sizeof(*rp
) + (2 * count
);
387 read_unlock(&hci_dev_list_lock
);
389 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
397 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
398 void *data
, u16 data_len
)
400 struct mgmt_rp_read_unconf_index_list
*rp
;
406 BT_DBG("sock %p", sk
);
408 read_lock(&hci_dev_list_lock
);
411 list_for_each_entry(d
, &hci_dev_list
, list
) {
412 if (d
->dev_type
== HCI_BREDR
&&
413 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
417 rp_len
= sizeof(*rp
) + (2 * count
);
418 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
420 read_unlock(&hci_dev_list_lock
);
425 list_for_each_entry(d
, &hci_dev_list
, list
) {
426 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
427 hci_dev_test_flag(d
, HCI_CONFIG
) ||
428 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
437 if (d
->dev_type
== HCI_BREDR
&&
438 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
439 rp
->index
[count
++] = cpu_to_le16(d
->id
);
440 BT_DBG("Added hci%u", d
->id
);
444 rp
->num_controllers
= cpu_to_le16(count
);
445 rp_len
= sizeof(*rp
) + (2 * count
);
447 read_unlock(&hci_dev_list_lock
);
449 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
450 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
457 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
458 void *data
, u16 data_len
)
460 struct mgmt_rp_read_ext_index_list
*rp
;
466 BT_DBG("sock %p", sk
);
468 read_lock(&hci_dev_list_lock
);
471 list_for_each_entry(d
, &hci_dev_list
, list
) {
472 if (d
->dev_type
== HCI_BREDR
|| d
->dev_type
== HCI_AMP
)
476 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
477 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
479 read_unlock(&hci_dev_list_lock
);
484 list_for_each_entry(d
, &hci_dev_list
, list
) {
485 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
486 hci_dev_test_flag(d
, HCI_CONFIG
) ||
487 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
496 if (d
->dev_type
== HCI_BREDR
) {
497 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
498 rp
->entry
[count
].type
= 0x01;
500 rp
->entry
[count
].type
= 0x00;
501 } else if (d
->dev_type
== HCI_AMP
) {
502 rp
->entry
[count
].type
= 0x02;
507 rp
->entry
[count
].bus
= d
->bus
;
508 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
509 BT_DBG("Added hci%u", d
->id
);
512 rp
->num_controllers
= cpu_to_le16(count
);
513 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
515 read_unlock(&hci_dev_list_lock
);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
522 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
523 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
525 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
526 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
, rp_len
);
533 static bool is_configured(struct hci_dev
*hdev
)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
536 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
540 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
546 static __le32
get_missing_options(struct hci_dev
*hdev
)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
551 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
552 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
555 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
556 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
558 return cpu_to_le32(options
);
561 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
563 __le32 options
= get_missing_options(hdev
);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
566 sizeof(options
), skip
);
569 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
571 __le32 options
= get_missing_options(hdev
);
573 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
577 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
578 void *data
, u16 data_len
)
580 struct mgmt_rp_read_config_info rp
;
583 BT_DBG("sock %p %s", sk
, hdev
->name
);
587 memset(&rp
, 0, sizeof(rp
));
588 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
591 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
593 if (hdev
->set_bdaddr
)
594 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
596 rp
.supported_options
= cpu_to_le32(options
);
597 rp
.missing_options
= get_missing_options(hdev
);
599 hci_dev_unlock(hdev
);
601 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
605 static u32
get_supported_settings(struct hci_dev
*hdev
)
609 settings
|= MGMT_SETTING_POWERED
;
610 settings
|= MGMT_SETTING_BONDABLE
;
611 settings
|= MGMT_SETTING_DEBUG_KEYS
;
612 settings
|= MGMT_SETTING_CONNECTABLE
;
613 settings
|= MGMT_SETTING_DISCOVERABLE
;
615 if (lmp_bredr_capable(hdev
)) {
616 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
617 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
618 settings
|= MGMT_SETTING_BREDR
;
619 settings
|= MGMT_SETTING_LINK_SECURITY
;
621 if (lmp_ssp_capable(hdev
)) {
622 settings
|= MGMT_SETTING_SSP
;
623 settings
|= MGMT_SETTING_HS
;
626 if (lmp_sc_capable(hdev
))
627 settings
|= MGMT_SETTING_SECURE_CONN
;
630 if (lmp_le_capable(hdev
)) {
631 settings
|= MGMT_SETTING_LE
;
632 settings
|= MGMT_SETTING_ADVERTISING
;
633 settings
|= MGMT_SETTING_SECURE_CONN
;
634 settings
|= MGMT_SETTING_PRIVACY
;
635 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
640 settings
|= MGMT_SETTING_CONFIGURATION
;
645 static u32
get_current_settings(struct hci_dev
*hdev
)
649 if (hdev_is_powered(hdev
))
650 settings
|= MGMT_SETTING_POWERED
;
652 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
653 settings
|= MGMT_SETTING_CONNECTABLE
;
655 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
656 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
658 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
659 settings
|= MGMT_SETTING_DISCOVERABLE
;
661 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
662 settings
|= MGMT_SETTING_BONDABLE
;
664 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
665 settings
|= MGMT_SETTING_BREDR
;
667 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
668 settings
|= MGMT_SETTING_LE
;
670 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
671 settings
|= MGMT_SETTING_LINK_SECURITY
;
673 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
674 settings
|= MGMT_SETTING_SSP
;
676 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
677 settings
|= MGMT_SETTING_HS
;
679 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
680 settings
|= MGMT_SETTING_ADVERTISING
;
682 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
683 settings
|= MGMT_SETTING_SECURE_CONN
;
685 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
686 settings
|= MGMT_SETTING_DEBUG_KEYS
;
688 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
689 settings
|= MGMT_SETTING_PRIVACY
;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
704 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
705 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
706 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
707 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
717 u8
*ptr
= data
, *uuids_start
= NULL
;
718 struct bt_uuid
*uuid
;
723 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
726 if (uuid
->size
!= 16)
729 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
733 if (uuid16
== PNP_INFO_SVCLASS_ID
)
739 uuids_start
[1] = EIR_UUID16_ALL
;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr
- data
) + sizeof(u16
) > len
) {
745 uuids_start
[1] = EIR_UUID16_SOME
;
749 *ptr
++ = (uuid16
& 0x00ff);
750 *ptr
++ = (uuid16
& 0xff00) >> 8;
751 uuids_start
[0] += sizeof(uuid16
);
757 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
759 u8
*ptr
= data
, *uuids_start
= NULL
;
760 struct bt_uuid
*uuid
;
765 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
766 if (uuid
->size
!= 32)
772 uuids_start
[1] = EIR_UUID32_ALL
;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr
- data
) + sizeof(u32
) > len
) {
778 uuids_start
[1] = EIR_UUID32_SOME
;
782 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
784 uuids_start
[0] += sizeof(u32
);
790 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
792 u8
*ptr
= data
, *uuids_start
= NULL
;
793 struct bt_uuid
*uuid
;
798 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
799 if (uuid
->size
!= 128)
805 uuids_start
[1] = EIR_UUID128_ALL
;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr
- data
) + 16 > len
) {
811 uuids_start
[1] = EIR_UUID128_SOME
;
815 memcpy(ptr
, uuid
->uuid
, 16);
817 uuids_start
[0] += 16;
823 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
828 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
829 struct hci_dev
*hdev
,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
835 static u8
get_current_adv_instance(struct hci_dev
*hdev
)
837 /* The "Set Advertising" setting supersedes the "Add Advertising"
838 * setting. Here we set the advertising data based on which
839 * setting was set. When neither apply, default to the global settings,
840 * represented by instance "0".
842 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
843 !hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
844 return hdev
->cur_adv_instance
;
849 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
854 name_len
= strlen(hdev
->dev_name
);
856 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
858 if (name_len
> max_len
) {
860 ptr
[1] = EIR_NAME_SHORT
;
862 ptr
[1] = EIR_NAME_COMPLETE
;
864 ptr
[0] = name_len
+ 1;
866 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
868 ad_len
+= (name_len
+ 2);
869 ptr
+= (name_len
+ 2);
875 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
878 struct adv_info
*adv_instance
;
880 adv_instance
= hci_find_adv_instance(hdev
, instance
);
884 /* TODO: Set the appropriate entries based on advertising instance flags
885 * here once flags other than 0 are supported.
887 memcpy(ptr
, adv_instance
->scan_rsp_data
,
888 adv_instance
->scan_rsp_len
);
890 return adv_instance
->scan_rsp_len
;
893 static void update_inst_scan_rsp_data(struct hci_request
*req
, u8 instance
)
895 struct hci_dev
*hdev
= req
->hdev
;
896 struct hci_cp_le_set_scan_rsp_data cp
;
899 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
902 memset(&cp
, 0, sizeof(cp
));
905 len
= create_instance_scan_rsp_data(hdev
, instance
, cp
.data
);
907 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
909 if (hdev
->scan_rsp_data_len
== len
&&
910 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
913 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
914 hdev
->scan_rsp_data_len
= len
;
918 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
921 static void update_scan_rsp_data(struct hci_request
*req
)
923 update_inst_scan_rsp_data(req
, get_current_adv_instance(req
->hdev
));
926 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
928 struct mgmt_pending_cmd
*cmd
;
930 /* If there's a pending mgmt command the flags will not yet have
931 * their final values, so check for this first.
933 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
935 struct mgmt_mode
*cp
= cmd
->param
;
937 return LE_AD_GENERAL
;
938 else if (cp
->val
== 0x02)
939 return LE_AD_LIMITED
;
941 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
942 return LE_AD_LIMITED
;
943 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
944 return LE_AD_GENERAL
;
950 static bool get_connectable(struct hci_dev
*hdev
)
952 struct mgmt_pending_cmd
*cmd
;
954 /* If there's a pending mgmt command the flag will not yet have
955 * it's final value, so check for this first.
957 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
959 struct mgmt_mode
*cp
= cmd
->param
;
964 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
967 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
970 struct adv_info
*adv_instance
;
972 if (instance
== 0x00) {
973 /* Instance 0 always manages the "Tx Power" and "Flags"
976 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
978 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
979 * corresponds to the "connectable" instance flag.
981 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
982 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
987 adv_instance
= hci_find_adv_instance(hdev
, instance
);
989 /* Return 0 when we got an invalid instance identifier. */
993 return adv_instance
->flags
;
996 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
998 u8 instance
= get_current_adv_instance(hdev
);
999 struct adv_info
*adv_instance
;
1001 /* Ignore instance 0 */
1002 if (instance
== 0x00)
1005 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1009 /* TODO: Take into account the "appearance" and "local-name" flags here.
1010 * These are currently being ignored as they are not supported.
1012 return adv_instance
->scan_rsp_len
;
1015 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1017 struct adv_info
*adv_instance
= NULL
;
1018 u8 ad_len
= 0, flags
= 0;
1021 /* Return 0 when the current instance identifier is invalid. */
1023 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1028 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1030 /* The Add Advertising command allows userspace to set both the general
1031 * and limited discoverable flags.
1033 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1034 flags
|= LE_AD_GENERAL
;
1036 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1037 flags
|= LE_AD_LIMITED
;
1039 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1040 /* If a discovery flag wasn't provided, simply use the global
1044 flags
|= get_adv_discov_flags(hdev
);
1046 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1047 flags
|= LE_AD_NO_BREDR
;
1049 /* If flags would still be empty, then there is no need to
1050 * include the "Flags" AD field".
1063 memcpy(ptr
, adv_instance
->adv_data
,
1064 adv_instance
->adv_data_len
);
1065 ad_len
+= adv_instance
->adv_data_len
;
1066 ptr
+= adv_instance
->adv_data_len
;
1069 /* Provide Tx Power only if we can provide a valid value for it */
1070 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
&&
1071 (instance_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
1073 ptr
[1] = EIR_TX_POWER
;
1074 ptr
[2] = (u8
)hdev
->adv_tx_power
;
1083 static void update_inst_adv_data(struct hci_request
*req
, u8 instance
)
1085 struct hci_dev
*hdev
= req
->hdev
;
1086 struct hci_cp_le_set_adv_data cp
;
1089 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1092 memset(&cp
, 0, sizeof(cp
));
1094 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1096 /* There's nothing to do if the data hasn't changed */
1097 if (hdev
->adv_data_len
== len
&&
1098 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1101 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1102 hdev
->adv_data_len
= len
;
1106 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1109 static void update_adv_data(struct hci_request
*req
)
1111 update_inst_adv_data(req
, get_current_adv_instance(req
->hdev
));
1114 int mgmt_update_adv_data(struct hci_dev
*hdev
)
1116 struct hci_request req
;
1118 hci_req_init(&req
, hdev
);
1119 update_adv_data(&req
);
1121 return hci_req_run(&req
, NULL
);
1124 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
1129 name_len
= strlen(hdev
->dev_name
);
1133 if (name_len
> 48) {
1135 ptr
[1] = EIR_NAME_SHORT
;
1137 ptr
[1] = EIR_NAME_COMPLETE
;
1139 /* EIR Data length */
1140 ptr
[0] = name_len
+ 1;
1142 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
1144 ptr
+= (name_len
+ 2);
1147 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
1149 ptr
[1] = EIR_TX_POWER
;
1150 ptr
[2] = (u8
) hdev
->inq_tx_power
;
1155 if (hdev
->devid_source
> 0) {
1157 ptr
[1] = EIR_DEVICE_ID
;
1159 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
1160 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
1161 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
1162 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
1167 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1168 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1169 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1172 static void update_eir(struct hci_request
*req
)
1174 struct hci_dev
*hdev
= req
->hdev
;
1175 struct hci_cp_write_eir cp
;
1177 if (!hdev_is_powered(hdev
))
1180 if (!lmp_ext_inq_capable(hdev
))
1183 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1186 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1189 memset(&cp
, 0, sizeof(cp
));
1191 create_eir(hdev
, cp
.data
);
1193 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
1196 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
1198 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1201 static u8
get_service_classes(struct hci_dev
*hdev
)
1203 struct bt_uuid
*uuid
;
1206 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1207 val
|= uuid
->svc_hint
;
1212 static void update_class(struct hci_request
*req
)
1214 struct hci_dev
*hdev
= req
->hdev
;
1217 BT_DBG("%s", hdev
->name
);
1219 if (!hdev_is_powered(hdev
))
1222 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1225 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1228 cod
[0] = hdev
->minor_class
;
1229 cod
[1] = hdev
->major_class
;
1230 cod
[2] = get_service_classes(hdev
);
1232 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
1235 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1238 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1241 static void disable_advertising(struct hci_request
*req
)
1245 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1248 static void enable_advertising(struct hci_request
*req
)
1250 struct hci_dev
*hdev
= req
->hdev
;
1251 struct hci_cp_le_set_adv_param cp
;
1252 u8 own_addr_type
, enable
= 0x01;
1257 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1260 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1261 disable_advertising(req
);
1263 /* Clear the HCI_LE_ADV bit temporarily so that the
1264 * hci_update_random_address knows that it's safe to go ahead
1265 * and write a new random address. The flag will be set back on
1266 * as soon as the SET_ADV_ENABLE HCI command completes.
1268 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1270 instance
= get_current_adv_instance(hdev
);
1271 flags
= get_adv_instance_flags(hdev
, instance
);
1273 /* If the "connectable" instance flag was not set, then choose between
1274 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1276 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1277 get_connectable(hdev
);
1279 /* Set require_privacy to true only when non-connectable
1280 * advertising is used. In that case it is fine to use a
1281 * non-resolvable private address.
1283 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1286 memset(&cp
, 0, sizeof(cp
));
1287 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1288 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1291 cp
.type
= LE_ADV_IND
;
1292 else if (get_cur_adv_instance_scan_rsp_len(hdev
))
1293 cp
.type
= LE_ADV_SCAN_IND
;
1295 cp
.type
= LE_ADV_NONCONN_IND
;
1297 cp
.own_address_type
= own_addr_type
;
1298 cp
.channel_map
= hdev
->le_adv_channel_map
;
1300 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1302 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1305 static void service_cache_off(struct work_struct
*work
)
1307 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1308 service_cache
.work
);
1309 struct hci_request req
;
1311 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1314 hci_req_init(&req
, hdev
);
1321 hci_dev_unlock(hdev
);
1323 hci_req_run(&req
, NULL
);
1326 static void rpa_expired(struct work_struct
*work
)
1328 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1330 struct hci_request req
;
1334 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1336 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1339 /* The generation of a new RPA and programming it into the
1340 * controller happens in the enable_advertising() function.
1342 hci_req_init(&req
, hdev
);
1343 enable_advertising(&req
);
1344 hci_req_run(&req
, NULL
);
1347 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1349 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
1352 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1353 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1355 /* Non-mgmt controlled devices get this bit set
1356 * implicitly so that pairing works for them, however
1357 * for mgmt we require user-space to explicitly enable
1360 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
1363 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1364 void *data
, u16 data_len
)
1366 struct mgmt_rp_read_info rp
;
1368 BT_DBG("sock %p %s", sk
, hdev
->name
);
1372 memset(&rp
, 0, sizeof(rp
));
1374 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1376 rp
.version
= hdev
->hci_ver
;
1377 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1379 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1380 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1382 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1384 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1385 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1387 hci_dev_unlock(hdev
);
1389 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1393 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1395 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1397 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1401 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1403 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1405 if (hci_conn_count(hdev
) == 0) {
1406 cancel_delayed_work(&hdev
->power_off
);
1407 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1411 static bool hci_stop_discovery(struct hci_request
*req
)
1413 struct hci_dev
*hdev
= req
->hdev
;
1414 struct hci_cp_remote_name_req_cancel cp
;
1415 struct inquiry_entry
*e
;
1417 switch (hdev
->discovery
.state
) {
1418 case DISCOVERY_FINDING
:
1419 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1420 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1422 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1423 cancel_delayed_work(&hdev
->le_scan_disable
);
1424 hci_req_add_le_scan_disable(req
);
1429 case DISCOVERY_RESOLVING
:
1430 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1435 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1436 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1442 /* Passive scanning */
1443 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1444 hci_req_add_le_scan_disable(req
);
1454 static void advertising_added(struct sock
*sk
, struct hci_dev
*hdev
,
1457 struct mgmt_ev_advertising_added ev
;
1459 ev
.instance
= instance
;
1461 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
1464 static void advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1467 struct mgmt_ev_advertising_removed ev
;
1469 ev
.instance
= instance
;
1471 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1474 static int schedule_adv_instance(struct hci_request
*req
, u8 instance
,
1476 struct hci_dev
*hdev
= req
->hdev
;
1477 struct adv_info
*adv_instance
= NULL
;
1480 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1481 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
1484 if (hdev
->adv_instance_timeout
)
1487 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1491 /* A zero timeout means unlimited advertising. As long as there is
1492 * only one instance, duration should be ignored. We still set a timeout
1493 * in case further instances are being added later on.
1495 * If the remaining lifetime of the instance is more than the duration
1496 * then the timeout corresponds to the duration, otherwise it will be
1497 * reduced to the remaining instance lifetime.
1499 if (adv_instance
->timeout
== 0 ||
1500 adv_instance
->duration
<= adv_instance
->remaining_time
)
1501 timeout
= adv_instance
->duration
;
1503 timeout
= adv_instance
->remaining_time
;
1505 /* The remaining time is being reduced unless the instance is being
1506 * advertised without time limit.
1508 if (adv_instance
->timeout
)
1509 adv_instance
->remaining_time
=
1510 adv_instance
->remaining_time
- timeout
;
1512 hdev
->adv_instance_timeout
= timeout
;
1513 queue_delayed_work(hdev
->workqueue
,
1514 &hdev
->adv_instance_expire
,
1515 msecs_to_jiffies(timeout
* 1000));
1517 /* If we're just re-scheduling the same instance again then do not
1518 * execute any HCI commands. This happens when a single instance is
1521 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1522 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1525 hdev
->cur_adv_instance
= instance
;
1526 update_adv_data(req
);
1527 update_scan_rsp_data(req
);
1528 enable_advertising(req
);
1533 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1535 if (hdev
->adv_instance_timeout
) {
1536 hdev
->adv_instance_timeout
= 0;
1537 cancel_delayed_work(&hdev
->adv_instance_expire
);
1541 /* For a single instance:
1542 * - force == true: The instance will be removed even when its remaining
1543 * lifetime is not zero.
1544 * - force == false: the instance will be deactivated but kept stored unless
1545 * the remaining lifetime is zero.
1547 * For instance == 0x00:
1548 * - force == true: All instances will be removed regardless of their timeout
1550 * - force == false: Only instances that have a timeout will be removed.
1552 static void clear_adv_instance(struct hci_dev
*hdev
, struct hci_request
*req
,
1553 u8 instance
, bool force
)
1555 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1559 /* Cancel any timeout concerning the removed instance(s). */
1560 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1561 cancel_adv_timeout(hdev
);
1563 /* Get the next instance to advertise BEFORE we remove
1564 * the current one. This can be the same instance again
1565 * if there is only one instance.
1567 if (instance
&& hdev
->cur_adv_instance
== instance
)
1568 next_instance
= hci_get_next_instance(hdev
, instance
);
1570 if (instance
== 0x00) {
1571 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1573 if (!(force
|| adv_instance
->timeout
))
1576 rem_inst
= adv_instance
->instance
;
1577 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1579 advertising_removed(NULL
, hdev
, rem_inst
);
1581 hdev
->cur_adv_instance
= 0x00;
1583 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1585 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1586 !adv_instance
->remaining_time
)) {
1587 /* Don't advertise a removed instance. */
1588 if (next_instance
&&
1589 next_instance
->instance
== instance
)
1590 next_instance
= NULL
;
1592 err
= hci_remove_adv_instance(hdev
, instance
);
1594 advertising_removed(NULL
, hdev
, instance
);
1598 if (list_empty(&hdev
->adv_instances
)) {
1599 hdev
->cur_adv_instance
= 0x00;
1600 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
1603 if (!req
|| !hdev_is_powered(hdev
) ||
1604 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1608 schedule_adv_instance(req
, next_instance
->instance
, false);
1611 static int clean_up_hci_state(struct hci_dev
*hdev
)
1613 struct hci_request req
;
1614 struct hci_conn
*conn
;
1615 bool discov_stopped
;
1618 hci_req_init(&req
, hdev
);
1620 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1621 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1623 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1626 clear_adv_instance(hdev
, NULL
, 0x00, false);
1628 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1629 disable_advertising(&req
);
1631 discov_stopped
= hci_stop_discovery(&req
);
1633 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1634 struct hci_cp_disconnect dc
;
1635 struct hci_cp_reject_conn_req rej
;
1637 switch (conn
->state
) {
1640 dc
.handle
= cpu_to_le16(conn
->handle
);
1641 dc
.reason
= 0x15; /* Terminated due to Power Off */
1642 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1645 if (conn
->type
== LE_LINK
)
1646 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1648 else if (conn
->type
== ACL_LINK
)
1649 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1653 bacpy(&rej
.bdaddr
, &conn
->dst
);
1654 rej
.reason
= 0x15; /* Terminated due to Power Off */
1655 if (conn
->type
== ACL_LINK
)
1656 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1658 else if (conn
->type
== SCO_LINK
)
1659 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1665 err
= hci_req_run(&req
, clean_up_hci_complete
);
1666 if (!err
&& discov_stopped
)
1667 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1672 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1675 struct mgmt_mode
*cp
= data
;
1676 struct mgmt_pending_cmd
*cmd
;
1679 BT_DBG("request for %s", hdev
->name
);
1681 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1682 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1683 MGMT_STATUS_INVALID_PARAMS
);
1687 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1688 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1693 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
1694 cancel_delayed_work(&hdev
->power_off
);
1697 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1699 err
= mgmt_powered(hdev
, 1);
1704 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1705 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1709 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1716 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1719 /* Disconnect connections, stop scans, etc */
1720 err
= clean_up_hci_state(hdev
);
1722 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1723 HCI_POWER_OFF_TIMEOUT
);
1725 /* ENODATA means there were no HCI commands queued */
1726 if (err
== -ENODATA
) {
1727 cancel_delayed_work(&hdev
->power_off
);
1728 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1734 hci_dev_unlock(hdev
);
1738 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1740 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1742 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1746 int mgmt_new_settings(struct hci_dev
*hdev
)
1748 return new_settings(hdev
, NULL
);
1753 struct hci_dev
*hdev
;
1757 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1759 struct cmd_lookup
*match
= data
;
1761 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1763 list_del(&cmd
->list
);
1765 if (match
->sk
== NULL
) {
1766 match
->sk
= cmd
->sk
;
1767 sock_hold(match
->sk
);
1770 mgmt_pending_free(cmd
);
1773 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1777 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1778 mgmt_pending_remove(cmd
);
1781 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1783 if (cmd
->cmd_complete
) {
1786 cmd
->cmd_complete(cmd
, *status
);
1787 mgmt_pending_remove(cmd
);
1792 cmd_status_rsp(cmd
, data
);
1795 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1797 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1798 cmd
->param
, cmd
->param_len
);
1801 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1803 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1804 cmd
->param
, sizeof(struct mgmt_addr_info
));
1807 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1809 if (!lmp_bredr_capable(hdev
))
1810 return MGMT_STATUS_NOT_SUPPORTED
;
1811 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1812 return MGMT_STATUS_REJECTED
;
1814 return MGMT_STATUS_SUCCESS
;
1817 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1819 if (!lmp_le_capable(hdev
))
1820 return MGMT_STATUS_NOT_SUPPORTED
;
1821 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1822 return MGMT_STATUS_REJECTED
;
1824 return MGMT_STATUS_SUCCESS
;
1827 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
,
1830 struct mgmt_pending_cmd
*cmd
;
1831 struct mgmt_mode
*cp
;
1832 struct hci_request req
;
1835 BT_DBG("status 0x%02x", status
);
1839 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1844 u8 mgmt_err
= mgmt_status(status
);
1845 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1846 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1852 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_DISCOVERABLE
);
1854 if (hdev
->discov_timeout
> 0) {
1855 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1856 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1860 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_DISCOVERABLE
);
1863 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1866 new_settings(hdev
, cmd
->sk
);
1868 /* When the discoverable mode gets changed, make sure
1869 * that class of device has the limited discoverable
1870 * bit correctly set. Also update page scan based on whitelist
1873 hci_req_init(&req
, hdev
);
1874 __hci_update_page_scan(&req
);
1876 hci_req_run(&req
, NULL
);
1879 mgmt_pending_remove(cmd
);
1882 hci_dev_unlock(hdev
);
1885 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1888 struct mgmt_cp_set_discoverable
*cp
= data
;
1889 struct mgmt_pending_cmd
*cmd
;
1890 struct hci_request req
;
1895 BT_DBG("request for %s", hdev
->name
);
1897 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1898 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1899 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1900 MGMT_STATUS_REJECTED
);
1902 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1903 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1904 MGMT_STATUS_INVALID_PARAMS
);
1906 timeout
= __le16_to_cpu(cp
->timeout
);
1908 /* Disabling discoverable requires that no timeout is set,
1909 * and enabling limited discoverable requires a timeout.
1911 if ((cp
->val
== 0x00 && timeout
> 0) ||
1912 (cp
->val
== 0x02 && timeout
== 0))
1913 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1914 MGMT_STATUS_INVALID_PARAMS
);
1918 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1919 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1920 MGMT_STATUS_NOT_POWERED
);
1924 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1925 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1926 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1931 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1932 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1933 MGMT_STATUS_REJECTED
);
1937 if (!hdev_is_powered(hdev
)) {
1938 bool changed
= false;
1940 /* Setting limited discoverable when powered off is
1941 * not a valid operation since it requires a timeout
1942 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1944 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1945 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1949 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1954 err
= new_settings(hdev
, sk
);
1959 /* If the current mode is the same, then just update the timeout
1960 * value with the new value. And if only the timeout gets updated,
1961 * then no need for any HCI transactions.
1963 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1964 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1965 HCI_LIMITED_DISCOVERABLE
)) {
1966 cancel_delayed_work(&hdev
->discov_off
);
1967 hdev
->discov_timeout
= timeout
;
1969 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1970 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1971 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1975 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1979 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1985 /* Cancel any potential discoverable timeout that might be
1986 * still active and store new timeout value. The arming of
1987 * the timeout happens in the complete handler.
1989 cancel_delayed_work(&hdev
->discov_off
);
1990 hdev
->discov_timeout
= timeout
;
1992 /* Limited discoverable mode */
1993 if (cp
->val
== 0x02)
1994 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1996 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1998 hci_req_init(&req
, hdev
);
2000 /* The procedure for LE-only controllers is much simpler - just
2001 * update the advertising data.
2003 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2009 struct hci_cp_write_current_iac_lap hci_cp
;
2011 if (cp
->val
== 0x02) {
2012 /* Limited discoverable mode */
2013 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
2014 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
2015 hci_cp
.iac_lap
[1] = 0x8b;
2016 hci_cp
.iac_lap
[2] = 0x9e;
2017 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
2018 hci_cp
.iac_lap
[4] = 0x8b;
2019 hci_cp
.iac_lap
[5] = 0x9e;
2021 /* General discoverable mode */
2023 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
2024 hci_cp
.iac_lap
[1] = 0x8b;
2025 hci_cp
.iac_lap
[2] = 0x9e;
2028 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
2029 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
2031 scan
|= SCAN_INQUIRY
;
2033 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2036 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
2039 update_adv_data(&req
);
2041 err
= hci_req_run(&req
, set_discoverable_complete
);
2043 mgmt_pending_remove(cmd
);
2046 hci_dev_unlock(hdev
);
2050 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
2052 struct hci_dev
*hdev
= req
->hdev
;
2053 struct hci_cp_write_page_scan_activity acp
;
2056 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2059 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
2063 type
= PAGE_SCAN_TYPE_INTERLACED
;
2065 /* 160 msec page scan interval */
2066 acp
.interval
= cpu_to_le16(0x0100);
2068 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
2070 /* default 1.28 sec page scan */
2071 acp
.interval
= cpu_to_le16(0x0800);
2074 acp
.window
= cpu_to_le16(0x0012);
2076 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
2077 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
2078 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
2081 if (hdev
->page_scan_type
!= type
)
2082 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
2085 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
,
2088 struct mgmt_pending_cmd
*cmd
;
2089 struct mgmt_mode
*cp
;
2090 bool conn_changed
, discov_changed
;
2092 BT_DBG("status 0x%02x", status
);
2096 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
2101 u8 mgmt_err
= mgmt_status(status
);
2102 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
2108 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
2110 discov_changed
= false;
2112 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
2114 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
2118 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2120 if (conn_changed
|| discov_changed
) {
2121 new_settings(hdev
, cmd
->sk
);
2122 hci_update_page_scan(hdev
);
2124 mgmt_update_adv_data(hdev
);
2125 hci_update_background_scan(hdev
);
2129 mgmt_pending_remove(cmd
);
2132 hci_dev_unlock(hdev
);
2135 static int set_connectable_update_settings(struct hci_dev
*hdev
,
2136 struct sock
*sk
, u8 val
)
2138 bool changed
= false;
2141 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
2145 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
2147 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
2148 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2151 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2156 hci_update_page_scan(hdev
);
2157 hci_update_background_scan(hdev
);
2158 return new_settings(hdev
, sk
);
2164 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2167 struct mgmt_mode
*cp
= data
;
2168 struct mgmt_pending_cmd
*cmd
;
2169 struct hci_request req
;
2173 BT_DBG("request for %s", hdev
->name
);
2175 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2176 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2177 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2178 MGMT_STATUS_REJECTED
);
2180 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2181 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2182 MGMT_STATUS_INVALID_PARAMS
);
2186 if (!hdev_is_powered(hdev
)) {
2187 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
2191 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
2192 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
2193 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2198 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
2204 hci_req_init(&req
, hdev
);
2206 /* If BR/EDR is not enabled and we disable advertising as a
2207 * by-product of disabling connectable, we need to update the
2208 * advertising flags.
2210 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2212 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2213 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2215 update_adv_data(&req
);
2216 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
2220 /* If we don't have any whitelist entries just
2221 * disable all scanning. If there are entries
2222 * and we had both page and inquiry scanning
2223 * enabled then fall back to only page scanning.
2224 * Otherwise no changes are needed.
2226 if (list_empty(&hdev
->whitelist
))
2227 scan
= SCAN_DISABLED
;
2228 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
2231 goto no_scan_update
;
2233 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
2234 hdev
->discov_timeout
> 0)
2235 cancel_delayed_work(&hdev
->discov_off
);
2238 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
2242 /* Update the advertising parameters if necessary */
2243 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2244 hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
2245 enable_advertising(&req
);
2247 err
= hci_req_run(&req
, set_connectable_complete
);
2249 mgmt_pending_remove(cmd
);
2250 if (err
== -ENODATA
)
2251 err
= set_connectable_update_settings(hdev
, sk
,
2257 hci_dev_unlock(hdev
);
2261 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2264 struct mgmt_mode
*cp
= data
;
2268 BT_DBG("request for %s", hdev
->name
);
2270 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2271 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
2272 MGMT_STATUS_INVALID_PARAMS
);
2277 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
2279 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
2281 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
2286 err
= new_settings(hdev
, sk
);
2289 hci_dev_unlock(hdev
);
2293 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2296 struct mgmt_mode
*cp
= data
;
2297 struct mgmt_pending_cmd
*cmd
;
2301 BT_DBG("request for %s", hdev
->name
);
2303 status
= mgmt_bredr_support(hdev
);
2305 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2308 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2309 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2310 MGMT_STATUS_INVALID_PARAMS
);
2314 if (!hdev_is_powered(hdev
)) {
2315 bool changed
= false;
2317 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
2318 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
2322 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2327 err
= new_settings(hdev
, sk
);
2332 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2333 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2340 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2341 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2345 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2351 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2353 mgmt_pending_remove(cmd
);
2358 hci_dev_unlock(hdev
);
2362 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2364 struct mgmt_mode
*cp
= data
;
2365 struct mgmt_pending_cmd
*cmd
;
2369 BT_DBG("request for %s", hdev
->name
);
2371 status
= mgmt_bredr_support(hdev
);
2373 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2375 if (!lmp_ssp_capable(hdev
))
2376 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2377 MGMT_STATUS_NOT_SUPPORTED
);
2379 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2380 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2381 MGMT_STATUS_INVALID_PARAMS
);
2385 if (!hdev_is_powered(hdev
)) {
2389 changed
= !hci_dev_test_and_set_flag(hdev
,
2392 changed
= hci_dev_test_and_clear_flag(hdev
,
2395 changed
= hci_dev_test_and_clear_flag(hdev
,
2398 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
2401 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2406 err
= new_settings(hdev
, sk
);
2411 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2412 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2417 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
2418 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2422 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2428 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
2429 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2430 sizeof(cp
->val
), &cp
->val
);
2432 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2434 mgmt_pending_remove(cmd
);
2439 hci_dev_unlock(hdev
);
2443 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2445 struct mgmt_mode
*cp
= data
;
2450 BT_DBG("request for %s", hdev
->name
);
2452 status
= mgmt_bredr_support(hdev
);
2454 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2456 if (!lmp_ssp_capable(hdev
))
2457 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2458 MGMT_STATUS_NOT_SUPPORTED
);
2460 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
2461 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2462 MGMT_STATUS_REJECTED
);
2464 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2465 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2466 MGMT_STATUS_INVALID_PARAMS
);
2470 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2471 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2477 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
2479 if (hdev_is_powered(hdev
)) {
2480 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2481 MGMT_STATUS_REJECTED
);
2485 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
2488 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2493 err
= new_settings(hdev
, sk
);
2496 hci_dev_unlock(hdev
);
2500 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2502 struct cmd_lookup match
= { NULL
, hdev
};
2507 u8 mgmt_err
= mgmt_status(status
);
2509 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2514 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2516 new_settings(hdev
, match
.sk
);
2521 /* Make sure the controller has a good default for
2522 * advertising data. Restrict the update to when LE
2523 * has actually been enabled. During power on, the
2524 * update in powered_update_hci will take care of it.
2526 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2527 struct hci_request req
;
2529 hci_req_init(&req
, hdev
);
2530 update_adv_data(&req
);
2531 update_scan_rsp_data(&req
);
2532 __hci_update_background_scan(&req
);
2533 hci_req_run(&req
, NULL
);
2537 hci_dev_unlock(hdev
);
2540 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2542 struct mgmt_mode
*cp
= data
;
2543 struct hci_cp_write_le_host_supported hci_cp
;
2544 struct mgmt_pending_cmd
*cmd
;
2545 struct hci_request req
;
2549 BT_DBG("request for %s", hdev
->name
);
2551 if (!lmp_le_capable(hdev
))
2552 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2553 MGMT_STATUS_NOT_SUPPORTED
);
2555 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2556 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2557 MGMT_STATUS_INVALID_PARAMS
);
2559 /* Bluetooth single mode LE only controllers or dual-mode
2560 * controllers configured as LE only devices, do not allow
2561 * switching LE off. These have either LE enabled explicitly
2562 * or BR/EDR has been previously switched off.
2564 * When trying to enable an already enabled LE, then gracefully
2565 * send a positive response. Trying to disable it however will
2566 * result into rejection.
2568 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2569 if (cp
->val
== 0x01)
2570 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2572 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2573 MGMT_STATUS_REJECTED
);
2579 enabled
= lmp_host_le_capable(hdev
);
2582 clear_adv_instance(hdev
, NULL
, 0x00, true);
2584 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2585 bool changed
= false;
2587 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2588 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
2592 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2593 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
2597 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2602 err
= new_settings(hdev
, sk
);
2607 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
2608 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2609 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2614 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2620 hci_req_init(&req
, hdev
);
2622 memset(&hci_cp
, 0, sizeof(hci_cp
));
2626 hci_cp
.simul
= 0x00;
2628 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
2629 disable_advertising(&req
);
2632 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2635 err
= hci_req_run(&req
, le_enable_complete
);
2637 mgmt_pending_remove(cmd
);
2640 hci_dev_unlock(hdev
);
2644 /* This is a helper function to test for pending mgmt commands that can
2645 * cause CoD or EIR HCI commands. We can only allow one such pending
2646 * mgmt command at a time since otherwise we cannot easily track what
2647 * the current values are, will be, and based on that calculate if a new
2648 * HCI command needs to be sent and if yes with what value.
2650 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2652 struct mgmt_pending_cmd
*cmd
;
2654 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2655 switch (cmd
->opcode
) {
2656 case MGMT_OP_ADD_UUID
:
2657 case MGMT_OP_REMOVE_UUID
:
2658 case MGMT_OP_SET_DEV_CLASS
:
2659 case MGMT_OP_SET_POWERED
:
2667 static const u8 bluetooth_base_uuid
[] = {
2668 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2669 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2672 static u8
get_uuid_size(const u8
*uuid
)
2676 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2679 val
= get_unaligned_le32(&uuid
[12]);
2686 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2688 struct mgmt_pending_cmd
*cmd
;
2692 cmd
= pending_find(mgmt_op
, hdev
);
2696 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
2697 mgmt_status(status
), hdev
->dev_class
, 3);
2699 mgmt_pending_remove(cmd
);
2702 hci_dev_unlock(hdev
);
2705 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2707 BT_DBG("status 0x%02x", status
);
2709 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2712 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2714 struct mgmt_cp_add_uuid
*cp
= data
;
2715 struct mgmt_pending_cmd
*cmd
;
2716 struct hci_request req
;
2717 struct bt_uuid
*uuid
;
2720 BT_DBG("request for %s", hdev
->name
);
2724 if (pending_eir_or_class(hdev
)) {
2725 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2730 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2736 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2737 uuid
->svc_hint
= cp
->svc_hint
;
2738 uuid
->size
= get_uuid_size(cp
->uuid
);
2740 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2742 hci_req_init(&req
, hdev
);
2747 err
= hci_req_run(&req
, add_uuid_complete
);
2749 if (err
!= -ENODATA
)
2752 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2753 hdev
->dev_class
, 3);
2757 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2766 hci_dev_unlock(hdev
);
2770 static bool enable_service_cache(struct hci_dev
*hdev
)
2772 if (!hdev_is_powered(hdev
))
2775 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
2776 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2784 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2786 BT_DBG("status 0x%02x", status
);
2788 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2791 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2794 struct mgmt_cp_remove_uuid
*cp
= data
;
2795 struct mgmt_pending_cmd
*cmd
;
2796 struct bt_uuid
*match
, *tmp
;
2797 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2798 struct hci_request req
;
2801 BT_DBG("request for %s", hdev
->name
);
2805 if (pending_eir_or_class(hdev
)) {
2806 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2811 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2812 hci_uuids_clear(hdev
);
2814 if (enable_service_cache(hdev
)) {
2815 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2816 MGMT_OP_REMOVE_UUID
,
2817 0, hdev
->dev_class
, 3);
2826 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2827 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2830 list_del(&match
->list
);
2836 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2837 MGMT_STATUS_INVALID_PARAMS
);
2842 hci_req_init(&req
, hdev
);
2847 err
= hci_req_run(&req
, remove_uuid_complete
);
2849 if (err
!= -ENODATA
)
2852 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2853 hdev
->dev_class
, 3);
2857 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2866 hci_dev_unlock(hdev
);
2870 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2872 BT_DBG("status 0x%02x", status
);
2874 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2877 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2880 struct mgmt_cp_set_dev_class
*cp
= data
;
2881 struct mgmt_pending_cmd
*cmd
;
2882 struct hci_request req
;
2885 BT_DBG("request for %s", hdev
->name
);
2887 if (!lmp_bredr_capable(hdev
))
2888 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2889 MGMT_STATUS_NOT_SUPPORTED
);
2893 if (pending_eir_or_class(hdev
)) {
2894 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2899 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2900 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2901 MGMT_STATUS_INVALID_PARAMS
);
2905 hdev
->major_class
= cp
->major
;
2906 hdev
->minor_class
= cp
->minor
;
2908 if (!hdev_is_powered(hdev
)) {
2909 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2910 hdev
->dev_class
, 3);
2914 hci_req_init(&req
, hdev
);
2916 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2917 hci_dev_unlock(hdev
);
2918 cancel_delayed_work_sync(&hdev
->service_cache
);
2925 err
= hci_req_run(&req
, set_class_complete
);
2927 if (err
!= -ENODATA
)
2930 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2931 hdev
->dev_class
, 3);
2935 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2944 hci_dev_unlock(hdev
);
2948 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2951 struct mgmt_cp_load_link_keys
*cp
= data
;
2952 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2953 sizeof(struct mgmt_link_key_info
));
2954 u16 key_count
, expected_len
;
2958 BT_DBG("request for %s", hdev
->name
);
2960 if (!lmp_bredr_capable(hdev
))
2961 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2962 MGMT_STATUS_NOT_SUPPORTED
);
2964 key_count
= __le16_to_cpu(cp
->key_count
);
2965 if (key_count
> max_key_count
) {
2966 BT_ERR("load_link_keys: too big key_count value %u",
2968 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2969 MGMT_STATUS_INVALID_PARAMS
);
2972 expected_len
= sizeof(*cp
) + key_count
*
2973 sizeof(struct mgmt_link_key_info
);
2974 if (expected_len
!= len
) {
2975 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2977 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2978 MGMT_STATUS_INVALID_PARAMS
);
2981 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2982 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2983 MGMT_STATUS_INVALID_PARAMS
);
2985 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2988 for (i
= 0; i
< key_count
; i
++) {
2989 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2991 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2992 return mgmt_cmd_status(sk
, hdev
->id
,
2993 MGMT_OP_LOAD_LINK_KEYS
,
2994 MGMT_STATUS_INVALID_PARAMS
);
2999 hci_link_keys_clear(hdev
);
3002 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
3004 changed
= hci_dev_test_and_clear_flag(hdev
,
3005 HCI_KEEP_DEBUG_KEYS
);
3008 new_settings(hdev
, NULL
);
3010 for (i
= 0; i
< key_count
; i
++) {
3011 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
3013 /* Always ignore debug keys and require a new pairing if
3014 * the user wants to use them.
3016 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
3019 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
3020 key
->type
, key
->pin_len
, NULL
);
3023 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
3025 hci_dev_unlock(hdev
);
3030 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3031 u8 addr_type
, struct sock
*skip_sk
)
3033 struct mgmt_ev_device_unpaired ev
;
3035 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
3036 ev
.addr
.type
= addr_type
;
3038 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
3042 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3045 struct mgmt_cp_unpair_device
*cp
= data
;
3046 struct mgmt_rp_unpair_device rp
;
3047 struct hci_cp_disconnect dc
;
3048 struct mgmt_pending_cmd
*cmd
;
3049 struct hci_conn
*conn
;
3052 memset(&rp
, 0, sizeof(rp
));
3053 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3054 rp
.addr
.type
= cp
->addr
.type
;
3056 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3057 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3058 MGMT_STATUS_INVALID_PARAMS
,
3061 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
3062 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3063 MGMT_STATUS_INVALID_PARAMS
,
3068 if (!hdev_is_powered(hdev
)) {
3069 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3070 MGMT_STATUS_NOT_POWERED
, &rp
,
3075 if (cp
->addr
.type
== BDADDR_BREDR
) {
3076 /* If disconnection is requested, then look up the
3077 * connection. If the remote device is connected, it
3078 * will be later used to terminate the link.
3080 * Setting it to NULL explicitly will cause no
3081 * termination of the link.
3084 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3089 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
3093 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
3096 /* Defer clearing up the connection parameters
3097 * until closing to give a chance of keeping
3098 * them if a repairing happens.
3100 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3102 /* If disconnection is not requested, then
3103 * clear the connection variable so that the
3104 * link is not terminated.
3106 if (!cp
->disconnect
)
3110 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3111 addr_type
= ADDR_LE_DEV_PUBLIC
;
3113 addr_type
= ADDR_LE_DEV_RANDOM
;
3115 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3117 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3121 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3122 MGMT_STATUS_NOT_PAIRED
, &rp
,
3127 /* If the connection variable is set, then termination of the
3128 * link is requested.
3131 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
3133 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
3137 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
3144 cmd
->cmd_complete
= addr_cmd_complete
;
3146 dc
.handle
= cpu_to_le16(conn
->handle
);
3147 dc
.reason
= 0x13; /* Remote User Terminated Connection */
3148 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
3150 mgmt_pending_remove(cmd
);
3153 hci_dev_unlock(hdev
);
3157 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3160 struct mgmt_cp_disconnect
*cp
= data
;
3161 struct mgmt_rp_disconnect rp
;
3162 struct mgmt_pending_cmd
*cmd
;
3163 struct hci_conn
*conn
;
3168 memset(&rp
, 0, sizeof(rp
));
3169 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3170 rp
.addr
.type
= cp
->addr
.type
;
3172 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3173 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3174 MGMT_STATUS_INVALID_PARAMS
,
3179 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
3180 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3181 MGMT_STATUS_NOT_POWERED
, &rp
,
3186 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
3187 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3188 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3192 if (cp
->addr
.type
== BDADDR_BREDR
)
3193 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3196 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
3198 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
3199 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3200 MGMT_STATUS_NOT_CONNECTED
, &rp
,
3205 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
3211 cmd
->cmd_complete
= generic_cmd_complete
;
3213 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3215 mgmt_pending_remove(cmd
);
3218 hci_dev_unlock(hdev
);
3222 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
3224 switch (link_type
) {
3226 switch (addr_type
) {
3227 case ADDR_LE_DEV_PUBLIC
:
3228 return BDADDR_LE_PUBLIC
;
3231 /* Fallback to LE Random address type */
3232 return BDADDR_LE_RANDOM
;
3236 /* Fallback to BR/EDR type */
3237 return BDADDR_BREDR
;
3241 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3244 struct mgmt_rp_get_connections
*rp
;
3254 if (!hdev_is_powered(hdev
)) {
3255 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
3256 MGMT_STATUS_NOT_POWERED
);
3261 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3262 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3266 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3267 rp
= kmalloc(rp_len
, GFP_KERNEL
);
3274 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3275 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3277 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
3278 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
3279 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
3284 rp
->conn_count
= cpu_to_le16(i
);
3286 /* Recalculate length in case of filtered SCO connections, etc */
3287 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3289 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
3295 hci_dev_unlock(hdev
);
3299 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3300 struct mgmt_cp_pin_code_neg_reply
*cp
)
3302 struct mgmt_pending_cmd
*cmd
;
3305 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
3310 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
3311 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
3313 mgmt_pending_remove(cmd
);
3318 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3321 struct hci_conn
*conn
;
3322 struct mgmt_cp_pin_code_reply
*cp
= data
;
3323 struct hci_cp_pin_code_reply reply
;
3324 struct mgmt_pending_cmd
*cmd
;
3331 if (!hdev_is_powered(hdev
)) {
3332 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3333 MGMT_STATUS_NOT_POWERED
);
3337 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3339 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3340 MGMT_STATUS_NOT_CONNECTED
);
3344 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3345 struct mgmt_cp_pin_code_neg_reply ncp
;
3347 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3349 BT_ERR("PIN code is not 16 bytes long");
3351 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3353 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3354 MGMT_STATUS_INVALID_PARAMS
);
3359 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3365 cmd
->cmd_complete
= addr_cmd_complete
;
3367 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3368 reply
.pin_len
= cp
->pin_len
;
3369 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3371 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3373 mgmt_pending_remove(cmd
);
3376 hci_dev_unlock(hdev
);
3380 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3383 struct mgmt_cp_set_io_capability
*cp
= data
;
3387 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3388 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3389 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3393 hdev
->io_capability
= cp
->io_capability
;
3395 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3396 hdev
->io_capability
);
3398 hci_dev_unlock(hdev
);
3400 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
3404 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
3406 struct hci_dev
*hdev
= conn
->hdev
;
3407 struct mgmt_pending_cmd
*cmd
;
3409 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3410 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3413 if (cmd
->user_data
!= conn
)
3422 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
3424 struct mgmt_rp_pair_device rp
;
3425 struct hci_conn
*conn
= cmd
->user_data
;
3428 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3429 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3431 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
3432 status
, &rp
, sizeof(rp
));
3434 /* So we don't get further callbacks for this connection */
3435 conn
->connect_cfm_cb
= NULL
;
3436 conn
->security_cfm_cb
= NULL
;
3437 conn
->disconn_cfm_cb
= NULL
;
3439 hci_conn_drop(conn
);
3441 /* The device is paired so there is no need to remove
3442 * its connection parameters anymore.
3444 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3451 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3453 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3454 struct mgmt_pending_cmd
*cmd
;
3456 cmd
= find_pairing(conn
);
3458 cmd
->cmd_complete(cmd
, status
);
3459 mgmt_pending_remove(cmd
);
3463 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3465 struct mgmt_pending_cmd
*cmd
;
3467 BT_DBG("status %u", status
);
3469 cmd
= find_pairing(conn
);
3471 BT_DBG("Unable to find a pending command");
3475 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3476 mgmt_pending_remove(cmd
);
3479 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3481 struct mgmt_pending_cmd
*cmd
;
3483 BT_DBG("status %u", status
);
3488 cmd
= find_pairing(conn
);
3490 BT_DBG("Unable to find a pending command");
3494 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3495 mgmt_pending_remove(cmd
);
3498 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3501 struct mgmt_cp_pair_device
*cp
= data
;
3502 struct mgmt_rp_pair_device rp
;
3503 struct mgmt_pending_cmd
*cmd
;
3504 u8 sec_level
, auth_type
;
3505 struct hci_conn
*conn
;
3510 memset(&rp
, 0, sizeof(rp
));
3511 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3512 rp
.addr
.type
= cp
->addr
.type
;
3514 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3515 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3516 MGMT_STATUS_INVALID_PARAMS
,
3519 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3520 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3521 MGMT_STATUS_INVALID_PARAMS
,
3526 if (!hdev_is_powered(hdev
)) {
3527 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3528 MGMT_STATUS_NOT_POWERED
, &rp
,
3533 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
3534 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3535 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
3540 sec_level
= BT_SECURITY_MEDIUM
;
3541 auth_type
= HCI_AT_DEDICATED_BONDING
;
3543 if (cp
->addr
.type
== BDADDR_BREDR
) {
3544 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3549 /* Convert from L2CAP channel address type to HCI address type
3551 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3552 addr_type
= ADDR_LE_DEV_PUBLIC
;
3554 addr_type
= ADDR_LE_DEV_RANDOM
;
3556 /* When pairing a new device, it is expected to remember
3557 * this device for future connections. Adding the connection
3558 * parameter information ahead of time allows tracking
3559 * of the slave preferred values and will speed up any
3560 * further connection establishment.
3562 * If connection parameters already exist, then they
3563 * will be kept and this function does nothing.
3565 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3567 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3568 sec_level
, HCI_LE_CONN_TIMEOUT
,
3575 if (PTR_ERR(conn
) == -EBUSY
)
3576 status
= MGMT_STATUS_BUSY
;
3577 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
3578 status
= MGMT_STATUS_NOT_SUPPORTED
;
3579 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
3580 status
= MGMT_STATUS_REJECTED
;
3582 status
= MGMT_STATUS_CONNECT_FAILED
;
3584 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3585 status
, &rp
, sizeof(rp
));
3589 if (conn
->connect_cfm_cb
) {
3590 hci_conn_drop(conn
);
3591 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3592 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3596 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3599 hci_conn_drop(conn
);
3603 cmd
->cmd_complete
= pairing_complete
;
3605 /* For LE, just connecting isn't a proof that the pairing finished */
3606 if (cp
->addr
.type
== BDADDR_BREDR
) {
3607 conn
->connect_cfm_cb
= pairing_complete_cb
;
3608 conn
->security_cfm_cb
= pairing_complete_cb
;
3609 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3611 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3612 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3613 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3616 conn
->io_capability
= cp
->io_cap
;
3617 cmd
->user_data
= hci_conn_get(conn
);
3619 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3620 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3621 cmd
->cmd_complete(cmd
, 0);
3622 mgmt_pending_remove(cmd
);
3628 hci_dev_unlock(hdev
);
3632 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3635 struct mgmt_addr_info
*addr
= data
;
3636 struct mgmt_pending_cmd
*cmd
;
3637 struct hci_conn
*conn
;
3644 if (!hdev_is_powered(hdev
)) {
3645 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3646 MGMT_STATUS_NOT_POWERED
);
3650 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3652 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3653 MGMT_STATUS_INVALID_PARAMS
);
3657 conn
= cmd
->user_data
;
3659 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3660 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3661 MGMT_STATUS_INVALID_PARAMS
);
3665 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3666 mgmt_pending_remove(cmd
);
3668 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3669 addr
, sizeof(*addr
));
3671 hci_dev_unlock(hdev
);
3675 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3676 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3677 u16 hci_op
, __le32 passkey
)
3679 struct mgmt_pending_cmd
*cmd
;
3680 struct hci_conn
*conn
;
3685 if (!hdev_is_powered(hdev
)) {
3686 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3687 MGMT_STATUS_NOT_POWERED
, addr
,
3692 if (addr
->type
== BDADDR_BREDR
)
3693 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3695 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3698 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3699 MGMT_STATUS_NOT_CONNECTED
, addr
,
3704 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3705 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3707 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3708 MGMT_STATUS_SUCCESS
, addr
,
3711 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3712 MGMT_STATUS_FAILED
, addr
,
3718 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3724 cmd
->cmd_complete
= addr_cmd_complete
;
3726 /* Continue with pairing via HCI */
3727 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3728 struct hci_cp_user_passkey_reply cp
;
3730 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3731 cp
.passkey
= passkey
;
3732 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3734 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3738 mgmt_pending_remove(cmd
);
3741 hci_dev_unlock(hdev
);
3745 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3746 void *data
, u16 len
)
3748 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3752 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3753 MGMT_OP_PIN_CODE_NEG_REPLY
,
3754 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3757 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3760 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3764 if (len
!= sizeof(*cp
))
3765 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3766 MGMT_STATUS_INVALID_PARAMS
);
3768 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3769 MGMT_OP_USER_CONFIRM_REPLY
,
3770 HCI_OP_USER_CONFIRM_REPLY
, 0);
3773 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3774 void *data
, u16 len
)
3776 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3780 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3781 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3782 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3785 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3788 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3792 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3793 MGMT_OP_USER_PASSKEY_REPLY
,
3794 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3797 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3798 void *data
, u16 len
)
3800 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3804 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3805 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3806 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3809 static void update_name(struct hci_request
*req
)
3811 struct hci_dev
*hdev
= req
->hdev
;
3812 struct hci_cp_write_local_name cp
;
3814 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3816 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3819 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3821 struct mgmt_cp_set_local_name
*cp
;
3822 struct mgmt_pending_cmd
*cmd
;
3824 BT_DBG("status 0x%02x", status
);
3828 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3835 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3836 mgmt_status(status
));
3838 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3841 mgmt_pending_remove(cmd
);
3844 hci_dev_unlock(hdev
);
3847 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3850 struct mgmt_cp_set_local_name
*cp
= data
;
3851 struct mgmt_pending_cmd
*cmd
;
3852 struct hci_request req
;
3859 /* If the old values are the same as the new ones just return a
3860 * direct command complete event.
3862 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3863 !memcmp(hdev
->short_name
, cp
->short_name
,
3864 sizeof(hdev
->short_name
))) {
3865 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3870 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3872 if (!hdev_is_powered(hdev
)) {
3873 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3875 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3880 err
= mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
,
3886 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3892 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3894 hci_req_init(&req
, hdev
);
3896 if (lmp_bredr_capable(hdev
)) {
3901 /* The name is stored in the scan response data and so
3902 * no need to udpate the advertising data here.
3904 if (lmp_le_capable(hdev
))
3905 update_scan_rsp_data(&req
);
3907 err
= hci_req_run(&req
, set_name_complete
);
3909 mgmt_pending_remove(cmd
);
3912 hci_dev_unlock(hdev
);
3916 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
3917 u16 opcode
, struct sk_buff
*skb
)
3919 struct mgmt_rp_read_local_oob_data mgmt_rp
;
3920 size_t rp_size
= sizeof(mgmt_rp
);
3921 struct mgmt_pending_cmd
*cmd
;
3923 BT_DBG("%s status %u", hdev
->name
, status
);
3925 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
3929 if (status
|| !skb
) {
3930 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3931 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
3935 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
3937 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
3938 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
3940 if (skb
->len
< sizeof(*rp
)) {
3941 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3942 MGMT_OP_READ_LOCAL_OOB_DATA
,
3943 MGMT_STATUS_FAILED
);
3947 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
3948 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
3950 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
3952 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
3954 if (skb
->len
< sizeof(*rp
)) {
3955 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3956 MGMT_OP_READ_LOCAL_OOB_DATA
,
3957 MGMT_STATUS_FAILED
);
3961 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
3962 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
3964 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
3965 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
3968 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3969 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
3972 mgmt_pending_remove(cmd
);
3975 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3976 void *data
, u16 data_len
)
3978 struct mgmt_pending_cmd
*cmd
;
3979 struct hci_request req
;
3982 BT_DBG("%s", hdev
->name
);
3986 if (!hdev_is_powered(hdev
)) {
3987 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3988 MGMT_STATUS_NOT_POWERED
);
3992 if (!lmp_ssp_capable(hdev
)) {
3993 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3994 MGMT_STATUS_NOT_SUPPORTED
);
3998 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3999 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4004 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
4010 hci_req_init(&req
, hdev
);
4012 if (bredr_sc_enabled(hdev
))
4013 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
4015 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
4017 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
4019 mgmt_pending_remove(cmd
);
4022 hci_dev_unlock(hdev
);
4026 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4027 void *data
, u16 len
)
4029 struct mgmt_addr_info
*addr
= data
;
4032 BT_DBG("%s ", hdev
->name
);
4034 if (!bdaddr_type_is_valid(addr
->type
))
4035 return mgmt_cmd_complete(sk
, hdev
->id
,
4036 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4037 MGMT_STATUS_INVALID_PARAMS
,
4038 addr
, sizeof(*addr
));
4042 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
4043 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
4046 if (cp
->addr
.type
!= BDADDR_BREDR
) {
4047 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4048 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4049 MGMT_STATUS_INVALID_PARAMS
,
4050 &cp
->addr
, sizeof(cp
->addr
));
4054 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4055 cp
->addr
.type
, cp
->hash
,
4056 cp
->rand
, NULL
, NULL
);
4058 status
= MGMT_STATUS_FAILED
;
4060 status
= MGMT_STATUS_SUCCESS
;
4062 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4063 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
4064 &cp
->addr
, sizeof(cp
->addr
));
4065 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
4066 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
4067 u8
*rand192
, *hash192
, *rand256
, *hash256
;
4070 if (bdaddr_type_is_le(cp
->addr
.type
)) {
4071 /* Enforce zero-valued 192-bit parameters as
4072 * long as legacy SMP OOB isn't implemented.
4074 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4075 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4076 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4077 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4078 MGMT_STATUS_INVALID_PARAMS
,
4079 addr
, sizeof(*addr
));
4086 /* In case one of the P-192 values is set to zero,
4087 * then just disable OOB data for P-192.
4089 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4090 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4094 rand192
= cp
->rand192
;
4095 hash192
= cp
->hash192
;
4099 /* In case one of the P-256 values is set to zero, then just
4100 * disable OOB data for P-256.
4102 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
4103 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
4107 rand256
= cp
->rand256
;
4108 hash256
= cp
->hash256
;
4111 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4112 cp
->addr
.type
, hash192
, rand192
,
4115 status
= MGMT_STATUS_FAILED
;
4117 status
= MGMT_STATUS_SUCCESS
;
4119 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4120 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4121 status
, &cp
->addr
, sizeof(cp
->addr
));
4123 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
4124 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
4125 MGMT_STATUS_INVALID_PARAMS
);
4129 hci_dev_unlock(hdev
);
4133 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4134 void *data
, u16 len
)
4136 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
4140 BT_DBG("%s", hdev
->name
);
4142 if (cp
->addr
.type
!= BDADDR_BREDR
)
4143 return mgmt_cmd_complete(sk
, hdev
->id
,
4144 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4145 MGMT_STATUS_INVALID_PARAMS
,
4146 &cp
->addr
, sizeof(cp
->addr
));
4150 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4151 hci_remote_oob_data_clear(hdev
);
4152 status
= MGMT_STATUS_SUCCESS
;
4156 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
4158 status
= MGMT_STATUS_INVALID_PARAMS
;
4160 status
= MGMT_STATUS_SUCCESS
;
4163 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4164 status
, &cp
->addr
, sizeof(cp
->addr
));
4166 hci_dev_unlock(hdev
);
4170 static bool trigger_bredr_inquiry(struct hci_request
*req
, u8
*status
)
4172 struct hci_dev
*hdev
= req
->hdev
;
4173 struct hci_cp_inquiry cp
;
4174 /* General inquiry access code (GIAC) */
4175 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
4177 *status
= mgmt_bredr_support(hdev
);
4181 if (hci_dev_test_flag(hdev
, HCI_INQUIRY
)) {
4182 *status
= MGMT_STATUS_BUSY
;
4186 hci_inquiry_cache_flush(hdev
);
4188 memset(&cp
, 0, sizeof(cp
));
4189 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
4190 cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
4192 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
4197 static bool trigger_le_scan(struct hci_request
*req
, u16 interval
, u8
*status
)
4199 struct hci_dev
*hdev
= req
->hdev
;
4200 struct hci_cp_le_set_scan_param param_cp
;
4201 struct hci_cp_le_set_scan_enable enable_cp
;
4205 *status
= mgmt_le_support(hdev
);
4209 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
4210 /* Don't let discovery abort an outgoing connection attempt
4211 * that's using directed advertising.
4213 if (hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
)) {
4214 *status
= MGMT_STATUS_REJECTED
;
4218 cancel_adv_timeout(hdev
);
4219 disable_advertising(req
);
4222 /* If controller is scanning, it means the background scanning is
4223 * running. Thus, we should temporarily stop it in order to set the
4224 * discovery scanning parameters.
4226 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
4227 hci_req_add_le_scan_disable(req
);
4229 /* All active scans will be done with either a resolvable private
4230 * address (when privacy feature has been enabled) or non-resolvable
4233 err
= hci_update_random_address(req
, true, &own_addr_type
);
4235 *status
= MGMT_STATUS_FAILED
;
4239 memset(¶m_cp
, 0, sizeof(param_cp
));
4240 param_cp
.type
= LE_SCAN_ACTIVE
;
4241 param_cp
.interval
= cpu_to_le16(interval
);
4242 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
4243 param_cp
.own_address_type
= own_addr_type
;
4245 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
4248 memset(&enable_cp
, 0, sizeof(enable_cp
));
4249 enable_cp
.enable
= LE_SCAN_ENABLE
;
4250 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
4252 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
4258 static bool trigger_discovery(struct hci_request
*req
, u8
*status
)
4260 struct hci_dev
*hdev
= req
->hdev
;
4262 switch (hdev
->discovery
.type
) {
4263 case DISCOV_TYPE_BREDR
:
4264 if (!trigger_bredr_inquiry(req
, status
))
4268 case DISCOV_TYPE_INTERLEAVED
:
4269 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
4271 /* During simultaneous discovery, we double LE scan
4272 * interval. We must leave some time for the controller
4273 * to do BR/EDR inquiry.
4275 if (!trigger_le_scan(req
, DISCOV_LE_SCAN_INT
* 2,
4279 if (!trigger_bredr_inquiry(req
, status
))
4285 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4286 *status
= MGMT_STATUS_NOT_SUPPORTED
;
4291 case DISCOV_TYPE_LE
:
4292 if (!trigger_le_scan(req
, DISCOV_LE_SCAN_INT
, status
))
4297 *status
= MGMT_STATUS_INVALID_PARAMS
;
4304 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
,
4307 struct mgmt_pending_cmd
*cmd
;
4308 unsigned long timeout
;
4310 BT_DBG("status %d", status
);
4314 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
4316 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
4319 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4320 mgmt_pending_remove(cmd
);
4324 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4328 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
4330 /* If the scan involves LE scan, pick proper timeout to schedule
4331 * hdev->le_scan_disable that will stop it.
4333 switch (hdev
->discovery
.type
) {
4334 case DISCOV_TYPE_LE
:
4335 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
4337 case DISCOV_TYPE_INTERLEAVED
:
4338 /* When running simultaneous discovery, the LE scanning time
4339 * should occupy the whole discovery time sine BR/EDR inquiry
4340 * and LE scanning are scheduled by the controller.
4342 * For interleaving discovery in comparison, BR/EDR inquiry
4343 * and LE scanning are done sequentially with separate
4346 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
))
4347 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
4349 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
4351 case DISCOV_TYPE_BREDR
:
4355 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
4361 /* When service discovery is used and the controller has
4362 * a strict duplicate filter, it is important to remember
4363 * the start and duration of the scan. This is required
4364 * for restarting scanning during the discovery phase.
4366 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
,
4368 hdev
->discovery
.result_filtering
) {
4369 hdev
->discovery
.scan_start
= jiffies
;
4370 hdev
->discovery
.scan_duration
= timeout
;
4373 queue_delayed_work(hdev
->workqueue
,
4374 &hdev
->le_scan_disable
, timeout
);
4378 hci_dev_unlock(hdev
);
4381 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4382 void *data
, u16 len
)
4384 struct mgmt_cp_start_discovery
*cp
= data
;
4385 struct mgmt_pending_cmd
*cmd
;
4386 struct hci_request req
;
4390 BT_DBG("%s", hdev
->name
);
4394 if (!hdev_is_powered(hdev
)) {
4395 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4396 MGMT_STATUS_NOT_POWERED
,
4397 &cp
->type
, sizeof(cp
->type
));
4401 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4402 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4403 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4404 MGMT_STATUS_BUSY
, &cp
->type
,
4409 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
4415 cmd
->cmd_complete
= generic_cmd_complete
;
4417 /* Clear the discovery filter first to free any previously
4418 * allocated memory for the UUID list.
4420 hci_discovery_filter_clear(hdev
);
4422 hdev
->discovery
.type
= cp
->type
;
4423 hdev
->discovery
.report_invalid_rssi
= false;
4425 hci_req_init(&req
, hdev
);
4427 if (!trigger_discovery(&req
, &status
)) {
4428 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4429 status
, &cp
->type
, sizeof(cp
->type
));
4430 mgmt_pending_remove(cmd
);
4434 err
= hci_req_run(&req
, start_discovery_complete
);
4436 mgmt_pending_remove(cmd
);
4440 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4443 hci_dev_unlock(hdev
);
4447 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
4450 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
4454 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4455 void *data
, u16 len
)
4457 struct mgmt_cp_start_service_discovery
*cp
= data
;
4458 struct mgmt_pending_cmd
*cmd
;
4459 struct hci_request req
;
4460 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
4461 u16 uuid_count
, expected_len
;
4465 BT_DBG("%s", hdev
->name
);
4469 if (!hdev_is_powered(hdev
)) {
4470 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4471 MGMT_OP_START_SERVICE_DISCOVERY
,
4472 MGMT_STATUS_NOT_POWERED
,
4473 &cp
->type
, sizeof(cp
->type
));
4477 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4478 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4479 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4480 MGMT_OP_START_SERVICE_DISCOVERY
,
4481 MGMT_STATUS_BUSY
, &cp
->type
,
4486 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
4487 if (uuid_count
> max_uuid_count
) {
4488 BT_ERR("service_discovery: too big uuid_count value %u",
4490 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4491 MGMT_OP_START_SERVICE_DISCOVERY
,
4492 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4497 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4498 if (expected_len
!= len
) {
4499 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4501 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4502 MGMT_OP_START_SERVICE_DISCOVERY
,
4503 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4508 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4515 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4517 /* Clear the discovery filter first to free any previously
4518 * allocated memory for the UUID list.
4520 hci_discovery_filter_clear(hdev
);
4522 hdev
->discovery
.result_filtering
= true;
4523 hdev
->discovery
.type
= cp
->type
;
4524 hdev
->discovery
.rssi
= cp
->rssi
;
4525 hdev
->discovery
.uuid_count
= uuid_count
;
4527 if (uuid_count
> 0) {
4528 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4530 if (!hdev
->discovery
.uuids
) {
4531 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4532 MGMT_OP_START_SERVICE_DISCOVERY
,
4534 &cp
->type
, sizeof(cp
->type
));
4535 mgmt_pending_remove(cmd
);
4540 hci_req_init(&req
, hdev
);
4542 if (!trigger_discovery(&req
, &status
)) {
4543 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4544 MGMT_OP_START_SERVICE_DISCOVERY
,
4545 status
, &cp
->type
, sizeof(cp
->type
));
4546 mgmt_pending_remove(cmd
);
4550 err
= hci_req_run(&req
, start_discovery_complete
);
4552 mgmt_pending_remove(cmd
);
4556 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4559 hci_dev_unlock(hdev
);
4563 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4565 struct mgmt_pending_cmd
*cmd
;
4567 BT_DBG("status %d", status
);
4571 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4573 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4574 mgmt_pending_remove(cmd
);
4578 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4580 hci_dev_unlock(hdev
);
4583 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4586 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4587 struct mgmt_pending_cmd
*cmd
;
4588 struct hci_request req
;
4591 BT_DBG("%s", hdev
->name
);
4595 if (!hci_discovery_active(hdev
)) {
4596 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4597 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4598 sizeof(mgmt_cp
->type
));
4602 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4603 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4604 MGMT_STATUS_INVALID_PARAMS
,
4605 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4609 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4615 cmd
->cmd_complete
= generic_cmd_complete
;
4617 hci_req_init(&req
, hdev
);
4619 hci_stop_discovery(&req
);
4621 err
= hci_req_run(&req
, stop_discovery_complete
);
4623 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4627 mgmt_pending_remove(cmd
);
4629 /* If no HCI commands were sent we're done */
4630 if (err
== -ENODATA
) {
4631 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
4632 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4633 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4637 hci_dev_unlock(hdev
);
4641 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4644 struct mgmt_cp_confirm_name
*cp
= data
;
4645 struct inquiry_entry
*e
;
4648 BT_DBG("%s", hdev
->name
);
4652 if (!hci_discovery_active(hdev
)) {
4653 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4654 MGMT_STATUS_FAILED
, &cp
->addr
,
4659 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4661 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4662 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4667 if (cp
->name_known
) {
4668 e
->name_state
= NAME_KNOWN
;
4671 e
->name_state
= NAME_NEEDED
;
4672 hci_inquiry_cache_update_resolve(hdev
, e
);
4675 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
4676 &cp
->addr
, sizeof(cp
->addr
));
4679 hci_dev_unlock(hdev
);
4683 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4686 struct mgmt_cp_block_device
*cp
= data
;
4690 BT_DBG("%s", hdev
->name
);
4692 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4693 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4694 MGMT_STATUS_INVALID_PARAMS
,
4695 &cp
->addr
, sizeof(cp
->addr
));
4699 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4702 status
= MGMT_STATUS_FAILED
;
4706 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4708 status
= MGMT_STATUS_SUCCESS
;
4711 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4712 &cp
->addr
, sizeof(cp
->addr
));
4714 hci_dev_unlock(hdev
);
4719 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4722 struct mgmt_cp_unblock_device
*cp
= data
;
4726 BT_DBG("%s", hdev
->name
);
4728 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4729 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4730 MGMT_STATUS_INVALID_PARAMS
,
4731 &cp
->addr
, sizeof(cp
->addr
));
4735 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4738 status
= MGMT_STATUS_INVALID_PARAMS
;
4742 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4744 status
= MGMT_STATUS_SUCCESS
;
4747 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4748 &cp
->addr
, sizeof(cp
->addr
));
4750 hci_dev_unlock(hdev
);
4755 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4758 struct mgmt_cp_set_device_id
*cp
= data
;
4759 struct hci_request req
;
4763 BT_DBG("%s", hdev
->name
);
4765 source
= __le16_to_cpu(cp
->source
);
4767 if (source
> 0x0002)
4768 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4769 MGMT_STATUS_INVALID_PARAMS
);
4773 hdev
->devid_source
= source
;
4774 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4775 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4776 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4778 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
4781 hci_req_init(&req
, hdev
);
4783 hci_req_run(&req
, NULL
);
4785 hci_dev_unlock(hdev
);
4790 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
4793 BT_DBG("status %d", status
);
4796 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
4799 struct cmd_lookup match
= { NULL
, hdev
};
4800 struct hci_request req
;
4802 struct adv_info
*adv_instance
;
4808 u8 mgmt_err
= mgmt_status(status
);
4810 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4811 cmd_status_rsp
, &mgmt_err
);
4815 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
4816 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
4818 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
4820 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4823 new_settings(hdev
, match
.sk
);
4828 /* If "Set Advertising" was just disabled and instance advertising was
4829 * set up earlier, then re-enable multi-instance advertising.
4831 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
4832 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) ||
4833 list_empty(&hdev
->adv_instances
))
4836 instance
= hdev
->cur_adv_instance
;
4838 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
4839 struct adv_info
, list
);
4843 instance
= adv_instance
->instance
;
4846 hci_req_init(&req
, hdev
);
4848 err
= schedule_adv_instance(&req
, instance
, true);
4851 err
= hci_req_run(&req
, enable_advertising_instance
);
4854 BT_ERR("Failed to re-configure advertising");
4857 hci_dev_unlock(hdev
);
4860 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4863 struct mgmt_mode
*cp
= data
;
4864 struct mgmt_pending_cmd
*cmd
;
4865 struct hci_request req
;
4869 BT_DBG("request for %s", hdev
->name
);
4871 status
= mgmt_le_support(hdev
);
4873 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4876 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4877 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4878 MGMT_STATUS_INVALID_PARAMS
);
4884 /* The following conditions are ones which mean that we should
4885 * not do any HCI communication but directly send a mgmt
4886 * response to user space (after toggling the flag if
4889 if (!hdev_is_powered(hdev
) ||
4890 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
4891 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
4892 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4893 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4894 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4898 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
4899 if (cp
->val
== 0x02)
4900 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4902 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4904 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
4905 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4908 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4913 err
= new_settings(hdev
, sk
);
4918 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4919 pending_find(MGMT_OP_SET_LE
, hdev
)) {
4920 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4925 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4931 hci_req_init(&req
, hdev
);
4933 if (cp
->val
== 0x02)
4934 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4936 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4938 cancel_adv_timeout(hdev
);
4941 /* Switch to instance "0" for the Set Advertising setting.
4942 * We cannot use update_[adv|scan_rsp]_data() here as the
4943 * HCI_ADVERTISING flag is not yet set.
4945 update_inst_adv_data(&req
, 0x00);
4946 update_inst_scan_rsp_data(&req
, 0x00);
4947 enable_advertising(&req
);
4949 disable_advertising(&req
);
4952 err
= hci_req_run(&req
, set_advertising_complete
);
4954 mgmt_pending_remove(cmd
);
4957 hci_dev_unlock(hdev
);
4961 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4962 void *data
, u16 len
)
4964 struct mgmt_cp_set_static_address
*cp
= data
;
4967 BT_DBG("%s", hdev
->name
);
4969 if (!lmp_le_capable(hdev
))
4970 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4971 MGMT_STATUS_NOT_SUPPORTED
);
4973 if (hdev_is_powered(hdev
))
4974 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4975 MGMT_STATUS_REJECTED
);
4977 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4978 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4979 return mgmt_cmd_status(sk
, hdev
->id
,
4980 MGMT_OP_SET_STATIC_ADDRESS
,
4981 MGMT_STATUS_INVALID_PARAMS
);
4983 /* Two most significant bits shall be set */
4984 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4985 return mgmt_cmd_status(sk
, hdev
->id
,
4986 MGMT_OP_SET_STATIC_ADDRESS
,
4987 MGMT_STATUS_INVALID_PARAMS
);
4992 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4994 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
4998 err
= new_settings(hdev
, sk
);
5001 hci_dev_unlock(hdev
);
5005 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
5006 void *data
, u16 len
)
5008 struct mgmt_cp_set_scan_params
*cp
= data
;
5009 __u16 interval
, window
;
5012 BT_DBG("%s", hdev
->name
);
5014 if (!lmp_le_capable(hdev
))
5015 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5016 MGMT_STATUS_NOT_SUPPORTED
);
5018 interval
= __le16_to_cpu(cp
->interval
);
5020 if (interval
< 0x0004 || interval
> 0x4000)
5021 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5022 MGMT_STATUS_INVALID_PARAMS
);
5024 window
= __le16_to_cpu(cp
->window
);
5026 if (window
< 0x0004 || window
> 0x4000)
5027 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5028 MGMT_STATUS_INVALID_PARAMS
);
5030 if (window
> interval
)
5031 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5032 MGMT_STATUS_INVALID_PARAMS
);
5036 hdev
->le_scan_interval
= interval
;
5037 hdev
->le_scan_window
= window
;
5039 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
5042 /* If background scan is running, restart it so new parameters are
5045 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
5046 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
5047 struct hci_request req
;
5049 hci_req_init(&req
, hdev
);
5051 hci_req_add_le_scan_disable(&req
);
5052 hci_req_add_le_passive_scan(&req
);
5054 hci_req_run(&req
, NULL
);
5057 hci_dev_unlock(hdev
);
5062 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
5065 struct mgmt_pending_cmd
*cmd
;
5067 BT_DBG("status 0x%02x", status
);
5071 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
5076 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5077 mgmt_status(status
));
5079 struct mgmt_mode
*cp
= cmd
->param
;
5082 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
5084 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5086 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
5087 new_settings(hdev
, cmd
->sk
);
5090 mgmt_pending_remove(cmd
);
5093 hci_dev_unlock(hdev
);
5096 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
5097 void *data
, u16 len
)
5099 struct mgmt_mode
*cp
= data
;
5100 struct mgmt_pending_cmd
*cmd
;
5101 struct hci_request req
;
5104 BT_DBG("%s", hdev
->name
);
5106 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
5107 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
5108 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5109 MGMT_STATUS_NOT_SUPPORTED
);
5111 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5112 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5113 MGMT_STATUS_INVALID_PARAMS
);
5117 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
5118 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5123 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
5124 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
5129 if (!hdev_is_powered(hdev
)) {
5130 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
5131 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
5133 new_settings(hdev
, sk
);
5137 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
5144 hci_req_init(&req
, hdev
);
5146 write_fast_connectable(&req
, cp
->val
);
5148 err
= hci_req_run(&req
, fast_connectable_complete
);
5150 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5151 MGMT_STATUS_FAILED
);
5152 mgmt_pending_remove(cmd
);
5156 hci_dev_unlock(hdev
);
5161 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5163 struct mgmt_pending_cmd
*cmd
;
5165 BT_DBG("status 0x%02x", status
);
5169 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
5174 u8 mgmt_err
= mgmt_status(status
);
5176 /* We need to restore the flag if related HCI commands
5179 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
5181 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
5183 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
5184 new_settings(hdev
, cmd
->sk
);
5187 mgmt_pending_remove(cmd
);
5190 hci_dev_unlock(hdev
);
5193 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
5195 struct mgmt_mode
*cp
= data
;
5196 struct mgmt_pending_cmd
*cmd
;
5197 struct hci_request req
;
5200 BT_DBG("request for %s", hdev
->name
);
5202 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
5203 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5204 MGMT_STATUS_NOT_SUPPORTED
);
5206 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5207 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5208 MGMT_STATUS_REJECTED
);
5210 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5211 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5212 MGMT_STATUS_INVALID_PARAMS
);
5216 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5217 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5221 if (!hdev_is_powered(hdev
)) {
5223 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
5224 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
5225 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
5226 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5227 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
5230 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
5232 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5236 err
= new_settings(hdev
, sk
);
5240 /* Reject disabling when powered on */
5242 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5243 MGMT_STATUS_REJECTED
);
5246 /* When configuring a dual-mode controller to operate
5247 * with LE only and using a static address, then switching
5248 * BR/EDR back on is not allowed.
5250 * Dual-mode controllers shall operate with the public
5251 * address as its identity address for BR/EDR and LE. So
5252 * reject the attempt to create an invalid configuration.
5254 * The same restrictions applies when secure connections
5255 * has been enabled. For BR/EDR this is a controller feature
5256 * while for LE it is a host stack feature. This means that
5257 * switching BR/EDR back on when secure connections has been
5258 * enabled is not a supported transaction.
5260 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5261 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
5262 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
5263 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5264 MGMT_STATUS_REJECTED
);
5269 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
5270 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5275 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
5281 /* We need to flip the bit already here so that update_adv_data
5282 * generates the correct flags.
5284 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
5286 hci_req_init(&req
, hdev
);
5288 write_fast_connectable(&req
, false);
5289 __hci_update_page_scan(&req
);
5291 /* Since only the advertising data flags will change, there
5292 * is no need to update the scan response data.
5294 update_adv_data(&req
);
5296 err
= hci_req_run(&req
, set_bredr_complete
);
5298 mgmt_pending_remove(cmd
);
5301 hci_dev_unlock(hdev
);
5305 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5307 struct mgmt_pending_cmd
*cmd
;
5308 struct mgmt_mode
*cp
;
5310 BT_DBG("%s status %u", hdev
->name
, status
);
5314 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
5319 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5320 mgmt_status(status
));
5328 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
5329 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5332 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5333 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5336 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5337 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5341 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5342 new_settings(hdev
, cmd
->sk
);
5345 mgmt_pending_remove(cmd
);
5347 hci_dev_unlock(hdev
);
5350 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
5351 void *data
, u16 len
)
5353 struct mgmt_mode
*cp
= data
;
5354 struct mgmt_pending_cmd
*cmd
;
5355 struct hci_request req
;
5359 BT_DBG("request for %s", hdev
->name
);
5361 if (!lmp_sc_capable(hdev
) &&
5362 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5363 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5364 MGMT_STATUS_NOT_SUPPORTED
);
5366 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5367 lmp_sc_capable(hdev
) &&
5368 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
5369 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5370 MGMT_STATUS_REJECTED
);
5372 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5373 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5374 MGMT_STATUS_INVALID_PARAMS
);
5378 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
5379 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5383 changed
= !hci_dev_test_and_set_flag(hdev
,
5385 if (cp
->val
== 0x02)
5386 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5388 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5390 changed
= hci_dev_test_and_clear_flag(hdev
,
5392 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5395 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5400 err
= new_settings(hdev
, sk
);
5405 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
5406 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5413 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5414 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5415 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5419 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
5425 hci_req_init(&req
, hdev
);
5426 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
5427 err
= hci_req_run(&req
, sc_enable_complete
);
5429 mgmt_pending_remove(cmd
);
5434 hci_dev_unlock(hdev
);
5438 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5439 void *data
, u16 len
)
5441 struct mgmt_mode
*cp
= data
;
5442 bool changed
, use_changed
;
5445 BT_DBG("request for %s", hdev
->name
);
5447 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5448 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
5449 MGMT_STATUS_INVALID_PARAMS
);
5454 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
5456 changed
= hci_dev_test_and_clear_flag(hdev
,
5457 HCI_KEEP_DEBUG_KEYS
);
5459 if (cp
->val
== 0x02)
5460 use_changed
= !hci_dev_test_and_set_flag(hdev
,
5461 HCI_USE_DEBUG_KEYS
);
5463 use_changed
= hci_dev_test_and_clear_flag(hdev
,
5464 HCI_USE_DEBUG_KEYS
);
5466 if (hdev_is_powered(hdev
) && use_changed
&&
5467 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5468 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
5469 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
5470 sizeof(mode
), &mode
);
5473 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
5478 err
= new_settings(hdev
, sk
);
5481 hci_dev_unlock(hdev
);
5485 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5488 struct mgmt_cp_set_privacy
*cp
= cp_data
;
5492 BT_DBG("request for %s", hdev
->name
);
5494 if (!lmp_le_capable(hdev
))
5495 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5496 MGMT_STATUS_NOT_SUPPORTED
);
5498 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
5499 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5500 MGMT_STATUS_INVALID_PARAMS
);
5502 if (hdev_is_powered(hdev
))
5503 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5504 MGMT_STATUS_REJECTED
);
5508 /* If user space supports this command it is also expected to
5509 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5511 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5514 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
5515 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
5516 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
5518 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
5519 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
5520 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
5523 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
5528 err
= new_settings(hdev
, sk
);
5531 hci_dev_unlock(hdev
);
5535 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
5537 switch (irk
->addr
.type
) {
5538 case BDADDR_LE_PUBLIC
:
5541 case BDADDR_LE_RANDOM
:
5542 /* Two most significant bits shall be set */
5543 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5551 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5554 struct mgmt_cp_load_irks
*cp
= cp_data
;
5555 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
5556 sizeof(struct mgmt_irk_info
));
5557 u16 irk_count
, expected_len
;
5560 BT_DBG("request for %s", hdev
->name
);
5562 if (!lmp_le_capable(hdev
))
5563 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5564 MGMT_STATUS_NOT_SUPPORTED
);
5566 irk_count
= __le16_to_cpu(cp
->irk_count
);
5567 if (irk_count
> max_irk_count
) {
5568 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
5569 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5570 MGMT_STATUS_INVALID_PARAMS
);
5573 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
5574 if (expected_len
!= len
) {
5575 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5577 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5578 MGMT_STATUS_INVALID_PARAMS
);
5581 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
5583 for (i
= 0; i
< irk_count
; i
++) {
5584 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
5586 if (!irk_is_valid(key
))
5587 return mgmt_cmd_status(sk
, hdev
->id
,
5589 MGMT_STATUS_INVALID_PARAMS
);
5594 hci_smp_irks_clear(hdev
);
5596 for (i
= 0; i
< irk_count
; i
++) {
5597 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
5600 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
5601 addr_type
= ADDR_LE_DEV_PUBLIC
;
5603 addr_type
= ADDR_LE_DEV_RANDOM
;
5605 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
5609 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5611 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
5613 hci_dev_unlock(hdev
);
5618 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
5620 if (key
->master
!= 0x00 && key
->master
!= 0x01)
5623 switch (key
->addr
.type
) {
5624 case BDADDR_LE_PUBLIC
:
5627 case BDADDR_LE_RANDOM
:
5628 /* Two most significant bits shall be set */
5629 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5637 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5638 void *cp_data
, u16 len
)
5640 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5641 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5642 sizeof(struct mgmt_ltk_info
));
5643 u16 key_count
, expected_len
;
5646 BT_DBG("request for %s", hdev
->name
);
5648 if (!lmp_le_capable(hdev
))
5649 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5650 MGMT_STATUS_NOT_SUPPORTED
);
5652 key_count
= __le16_to_cpu(cp
->key_count
);
5653 if (key_count
> max_key_count
) {
5654 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5655 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5656 MGMT_STATUS_INVALID_PARAMS
);
5659 expected_len
= sizeof(*cp
) + key_count
*
5660 sizeof(struct mgmt_ltk_info
);
5661 if (expected_len
!= len
) {
5662 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5664 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5665 MGMT_STATUS_INVALID_PARAMS
);
5668 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5670 for (i
= 0; i
< key_count
; i
++) {
5671 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5673 if (!ltk_is_valid(key
))
5674 return mgmt_cmd_status(sk
, hdev
->id
,
5675 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5676 MGMT_STATUS_INVALID_PARAMS
);
5681 hci_smp_ltks_clear(hdev
);
5683 for (i
= 0; i
< key_count
; i
++) {
5684 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5685 u8 type
, addr_type
, authenticated
;
5687 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
5688 addr_type
= ADDR_LE_DEV_PUBLIC
;
5690 addr_type
= ADDR_LE_DEV_RANDOM
;
5692 switch (key
->type
) {
5693 case MGMT_LTK_UNAUTHENTICATED
:
5694 authenticated
= 0x00;
5695 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5697 case MGMT_LTK_AUTHENTICATED
:
5698 authenticated
= 0x01;
5699 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5701 case MGMT_LTK_P256_UNAUTH
:
5702 authenticated
= 0x00;
5703 type
= SMP_LTK_P256
;
5705 case MGMT_LTK_P256_AUTH
:
5706 authenticated
= 0x01;
5707 type
= SMP_LTK_P256
;
5709 case MGMT_LTK_P256_DEBUG
:
5710 authenticated
= 0x00;
5711 type
= SMP_LTK_P256_DEBUG
;
5716 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
5717 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
5721 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5724 hci_dev_unlock(hdev
);
5729 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5731 struct hci_conn
*conn
= cmd
->user_data
;
5732 struct mgmt_rp_get_conn_info rp
;
5735 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5737 if (status
== MGMT_STATUS_SUCCESS
) {
5738 rp
.rssi
= conn
->rssi
;
5739 rp
.tx_power
= conn
->tx_power
;
5740 rp
.max_tx_power
= conn
->max_tx_power
;
5742 rp
.rssi
= HCI_RSSI_INVALID
;
5743 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5744 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5747 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
5748 status
, &rp
, sizeof(rp
));
5750 hci_conn_drop(conn
);
5756 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
5759 struct hci_cp_read_rssi
*cp
;
5760 struct mgmt_pending_cmd
*cmd
;
5761 struct hci_conn
*conn
;
5765 BT_DBG("status 0x%02x", hci_status
);
5769 /* Commands sent in request are either Read RSSI or Read Transmit Power
5770 * Level so we check which one was last sent to retrieve connection
5771 * handle. Both commands have handle as first parameter so it's safe to
5772 * cast data on the same command struct.
5774 * First command sent is always Read RSSI and we fail only if it fails.
5775 * In other case we simply override error to indicate success as we
5776 * already remembered if TX power value is actually valid.
5778 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5780 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5781 status
= MGMT_STATUS_SUCCESS
;
5783 status
= mgmt_status(hci_status
);
5787 BT_ERR("invalid sent_cmd in conn_info response");
5791 handle
= __le16_to_cpu(cp
->handle
);
5792 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5794 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5798 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5802 cmd
->cmd_complete(cmd
, status
);
5803 mgmt_pending_remove(cmd
);
5806 hci_dev_unlock(hdev
);
5809 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5812 struct mgmt_cp_get_conn_info
*cp
= data
;
5813 struct mgmt_rp_get_conn_info rp
;
5814 struct hci_conn
*conn
;
5815 unsigned long conn_info_age
;
5818 BT_DBG("%s", hdev
->name
);
5820 memset(&rp
, 0, sizeof(rp
));
5821 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5822 rp
.addr
.type
= cp
->addr
.type
;
5824 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5825 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5826 MGMT_STATUS_INVALID_PARAMS
,
5831 if (!hdev_is_powered(hdev
)) {
5832 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5833 MGMT_STATUS_NOT_POWERED
, &rp
,
5838 if (cp
->addr
.type
== BDADDR_BREDR
)
5839 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5842 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5844 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5845 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5846 MGMT_STATUS_NOT_CONNECTED
, &rp
,
5851 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5852 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5853 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5857 /* To avoid client trying to guess when to poll again for information we
5858 * calculate conn info age as random value between min/max set in hdev.
5860 conn_info_age
= hdev
->conn_info_min_age
+
5861 prandom_u32_max(hdev
->conn_info_max_age
-
5862 hdev
->conn_info_min_age
);
5864 /* Query controller to refresh cached values if they are too old or were
5867 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5868 msecs_to_jiffies(conn_info_age
)) ||
5869 !conn
->conn_info_timestamp
) {
5870 struct hci_request req
;
5871 struct hci_cp_read_tx_power req_txp_cp
;
5872 struct hci_cp_read_rssi req_rssi_cp
;
5873 struct mgmt_pending_cmd
*cmd
;
5875 hci_req_init(&req
, hdev
);
5876 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5877 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5880 /* For LE links TX power does not change thus we don't need to
5881 * query for it once value is known.
5883 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5884 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5885 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5886 req_txp_cp
.type
= 0x00;
5887 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5888 sizeof(req_txp_cp
), &req_txp_cp
);
5891 /* Max TX power needs to be read only once per connection */
5892 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5893 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5894 req_txp_cp
.type
= 0x01;
5895 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5896 sizeof(req_txp_cp
), &req_txp_cp
);
5899 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5903 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5910 hci_conn_hold(conn
);
5911 cmd
->user_data
= hci_conn_get(conn
);
5912 cmd
->cmd_complete
= conn_info_cmd_complete
;
5914 conn
->conn_info_timestamp
= jiffies
;
5916 /* Cache is valid, just reply with values cached in hci_conn */
5917 rp
.rssi
= conn
->rssi
;
5918 rp
.tx_power
= conn
->tx_power
;
5919 rp
.max_tx_power
= conn
->max_tx_power
;
5921 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5922 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5926 hci_dev_unlock(hdev
);
5930 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5932 struct hci_conn
*conn
= cmd
->user_data
;
5933 struct mgmt_rp_get_clock_info rp
;
5934 struct hci_dev
*hdev
;
5937 memset(&rp
, 0, sizeof(rp
));
5938 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5943 hdev
= hci_dev_get(cmd
->index
);
5945 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5950 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5951 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5955 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
5959 hci_conn_drop(conn
);
5966 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5968 struct hci_cp_read_clock
*hci_cp
;
5969 struct mgmt_pending_cmd
*cmd
;
5970 struct hci_conn
*conn
;
5972 BT_DBG("%s status %u", hdev
->name
, status
);
5976 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5980 if (hci_cp
->which
) {
5981 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5982 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5987 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5991 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5992 mgmt_pending_remove(cmd
);
5995 hci_dev_unlock(hdev
);
5998 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6001 struct mgmt_cp_get_clock_info
*cp
= data
;
6002 struct mgmt_rp_get_clock_info rp
;
6003 struct hci_cp_read_clock hci_cp
;
6004 struct mgmt_pending_cmd
*cmd
;
6005 struct hci_request req
;
6006 struct hci_conn
*conn
;
6009 BT_DBG("%s", hdev
->name
);
6011 memset(&rp
, 0, sizeof(rp
));
6012 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6013 rp
.addr
.type
= cp
->addr
.type
;
6015 if (cp
->addr
.type
!= BDADDR_BREDR
)
6016 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
6017 MGMT_STATUS_INVALID_PARAMS
,
6022 if (!hdev_is_powered(hdev
)) {
6023 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
6024 MGMT_STATUS_NOT_POWERED
, &rp
,
6029 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6030 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
6032 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
6033 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6034 MGMT_OP_GET_CLOCK_INFO
,
6035 MGMT_STATUS_NOT_CONNECTED
,
6043 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
6049 cmd
->cmd_complete
= clock_info_cmd_complete
;
6051 hci_req_init(&req
, hdev
);
6053 memset(&hci_cp
, 0, sizeof(hci_cp
));
6054 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
6057 hci_conn_hold(conn
);
6058 cmd
->user_data
= hci_conn_get(conn
);
6060 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
6061 hci_cp
.which
= 0x01; /* Piconet clock */
6062 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
6065 err
= hci_req_run(&req
, get_clock_info_complete
);
6067 mgmt_pending_remove(cmd
);
6070 hci_dev_unlock(hdev
);
6074 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
6076 struct hci_conn
*conn
;
6078 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
6082 if (conn
->dst_type
!= type
)
6085 if (conn
->state
!= BT_CONNECTED
)
6091 /* This function requires the caller holds hdev->lock */
6092 static int hci_conn_params_set(struct hci_request
*req
, bdaddr_t
*addr
,
6093 u8 addr_type
, u8 auto_connect
)
6095 struct hci_dev
*hdev
= req
->hdev
;
6096 struct hci_conn_params
*params
;
6098 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
6102 if (params
->auto_connect
== auto_connect
)
6105 list_del_init(¶ms
->action
);
6107 switch (auto_connect
) {
6108 case HCI_AUTO_CONN_DISABLED
:
6109 case HCI_AUTO_CONN_LINK_LOSS
:
6110 __hci_update_background_scan(req
);
6112 case HCI_AUTO_CONN_REPORT
:
6113 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
6114 __hci_update_background_scan(req
);
6116 case HCI_AUTO_CONN_DIRECT
:
6117 case HCI_AUTO_CONN_ALWAYS
:
6118 if (!is_connected(hdev
, addr
, addr_type
)) {
6119 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6120 __hci_update_background_scan(req
);
6125 params
->auto_connect
= auto_connect
;
6127 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
6133 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
6134 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
6136 struct mgmt_ev_device_added ev
;
6138 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6139 ev
.addr
.type
= type
;
6142 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
6145 static void add_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6147 struct mgmt_pending_cmd
*cmd
;
6149 BT_DBG("status 0x%02x", status
);
6153 cmd
= pending_find(MGMT_OP_ADD_DEVICE
, hdev
);
6157 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6158 mgmt_pending_remove(cmd
);
6161 hci_dev_unlock(hdev
);
6164 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
6165 void *data
, u16 len
)
6167 struct mgmt_cp_add_device
*cp
= data
;
6168 struct mgmt_pending_cmd
*cmd
;
6169 struct hci_request req
;
6170 u8 auto_conn
, addr_type
;
6173 BT_DBG("%s", hdev
->name
);
6175 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
6176 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
6177 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6178 MGMT_STATUS_INVALID_PARAMS
,
6179 &cp
->addr
, sizeof(cp
->addr
));
6181 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
6182 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6183 MGMT_STATUS_INVALID_PARAMS
,
6184 &cp
->addr
, sizeof(cp
->addr
));
6186 hci_req_init(&req
, hdev
);
6190 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_DEVICE
, hdev
, data
, len
);
6196 cmd
->cmd_complete
= addr_cmd_complete
;
6198 if (cp
->addr
.type
== BDADDR_BREDR
) {
6199 /* Only incoming connections action is supported for now */
6200 if (cp
->action
!= 0x01) {
6201 err
= cmd
->cmd_complete(cmd
,
6202 MGMT_STATUS_INVALID_PARAMS
);
6203 mgmt_pending_remove(cmd
);
6207 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
6212 __hci_update_page_scan(&req
);
6217 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
6218 addr_type
= ADDR_LE_DEV_PUBLIC
;
6220 addr_type
= ADDR_LE_DEV_RANDOM
;
6222 if (cp
->action
== 0x02)
6223 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
6224 else if (cp
->action
== 0x01)
6225 auto_conn
= HCI_AUTO_CONN_DIRECT
;
6227 auto_conn
= HCI_AUTO_CONN_REPORT
;
6229 /* If the connection parameters don't exist for this device,
6230 * they will be created and configured with defaults.
6232 if (hci_conn_params_set(&req
, &cp
->addr
.bdaddr
, addr_type
,
6234 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_FAILED
);
6235 mgmt_pending_remove(cmd
);
6240 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
6242 err
= hci_req_run(&req
, add_device_complete
);
6244 /* ENODATA means no HCI commands were needed (e.g. if
6245 * the adapter is powered off).
6247 if (err
== -ENODATA
)
6248 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
6249 mgmt_pending_remove(cmd
);
6253 hci_dev_unlock(hdev
);
6257 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
6258 bdaddr_t
*bdaddr
, u8 type
)
6260 struct mgmt_ev_device_removed ev
;
6262 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6263 ev
.addr
.type
= type
;
6265 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
6268 static void remove_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6270 struct mgmt_pending_cmd
*cmd
;
6272 BT_DBG("status 0x%02x", status
);
6276 cmd
= pending_find(MGMT_OP_REMOVE_DEVICE
, hdev
);
6280 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6281 mgmt_pending_remove(cmd
);
6284 hci_dev_unlock(hdev
);
6287 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
6288 void *data
, u16 len
)
6290 struct mgmt_cp_remove_device
*cp
= data
;
6291 struct mgmt_pending_cmd
*cmd
;
6292 struct hci_request req
;
6295 BT_DBG("%s", hdev
->name
);
6297 hci_req_init(&req
, hdev
);
6301 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_DEVICE
, hdev
, data
, len
);
6307 cmd
->cmd_complete
= addr_cmd_complete
;
6309 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6310 struct hci_conn_params
*params
;
6313 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
6314 err
= cmd
->cmd_complete(cmd
,
6315 MGMT_STATUS_INVALID_PARAMS
);
6316 mgmt_pending_remove(cmd
);
6320 if (cp
->addr
.type
== BDADDR_BREDR
) {
6321 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
6325 err
= cmd
->cmd_complete(cmd
,
6326 MGMT_STATUS_INVALID_PARAMS
);
6327 mgmt_pending_remove(cmd
);
6331 __hci_update_page_scan(&req
);
6333 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
6338 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
6339 addr_type
= ADDR_LE_DEV_PUBLIC
;
6341 addr_type
= ADDR_LE_DEV_RANDOM
;
6343 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
6346 err
= cmd
->cmd_complete(cmd
,
6347 MGMT_STATUS_INVALID_PARAMS
);
6348 mgmt_pending_remove(cmd
);
6352 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
6353 err
= cmd
->cmd_complete(cmd
,
6354 MGMT_STATUS_INVALID_PARAMS
);
6355 mgmt_pending_remove(cmd
);
6359 list_del(¶ms
->action
);
6360 list_del(¶ms
->list
);
6362 __hci_update_background_scan(&req
);
6364 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
6366 struct hci_conn_params
*p
, *tmp
;
6367 struct bdaddr_list
*b
, *btmp
;
6369 if (cp
->addr
.type
) {
6370 err
= cmd
->cmd_complete(cmd
,
6371 MGMT_STATUS_INVALID_PARAMS
);
6372 mgmt_pending_remove(cmd
);
6376 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
6377 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
6382 __hci_update_page_scan(&req
);
6384 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
6385 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
6387 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
6388 list_del(&p
->action
);
6393 BT_DBG("All LE connection parameters were removed");
6395 __hci_update_background_scan(&req
);
6399 err
= hci_req_run(&req
, remove_device_complete
);
6401 /* ENODATA means no HCI commands were needed (e.g. if
6402 * the adapter is powered off).
6404 if (err
== -ENODATA
)
6405 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
6406 mgmt_pending_remove(cmd
);
6410 hci_dev_unlock(hdev
);
6414 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6417 struct mgmt_cp_load_conn_param
*cp
= data
;
6418 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
6419 sizeof(struct mgmt_conn_param
));
6420 u16 param_count
, expected_len
;
6423 if (!lmp_le_capable(hdev
))
6424 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6425 MGMT_STATUS_NOT_SUPPORTED
);
6427 param_count
= __le16_to_cpu(cp
->param_count
);
6428 if (param_count
> max_param_count
) {
6429 BT_ERR("load_conn_param: too big param_count value %u",
6431 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6432 MGMT_STATUS_INVALID_PARAMS
);
6435 expected_len
= sizeof(*cp
) + param_count
*
6436 sizeof(struct mgmt_conn_param
);
6437 if (expected_len
!= len
) {
6438 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6440 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6441 MGMT_STATUS_INVALID_PARAMS
);
6444 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
6448 hci_conn_params_clear_disabled(hdev
);
6450 for (i
= 0; i
< param_count
; i
++) {
6451 struct mgmt_conn_param
*param
= &cp
->params
[i
];
6452 struct hci_conn_params
*hci_param
;
6453 u16 min
, max
, latency
, timeout
;
6456 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
6459 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
6460 addr_type
= ADDR_LE_DEV_PUBLIC
;
6461 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
6462 addr_type
= ADDR_LE_DEV_RANDOM
;
6464 BT_ERR("Ignoring invalid connection parameters");
6468 min
= le16_to_cpu(param
->min_interval
);
6469 max
= le16_to_cpu(param
->max_interval
);
6470 latency
= le16_to_cpu(param
->latency
);
6471 timeout
= le16_to_cpu(param
->timeout
);
6473 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6474 min
, max
, latency
, timeout
);
6476 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
6477 BT_ERR("Ignoring invalid connection parameters");
6481 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
6484 BT_ERR("Failed to add connection parameters");
6488 hci_param
->conn_min_interval
= min
;
6489 hci_param
->conn_max_interval
= max
;
6490 hci_param
->conn_latency
= latency
;
6491 hci_param
->supervision_timeout
= timeout
;
6494 hci_dev_unlock(hdev
);
6496 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
6500 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
6501 void *data
, u16 len
)
6503 struct mgmt_cp_set_external_config
*cp
= data
;
6507 BT_DBG("%s", hdev
->name
);
6509 if (hdev_is_powered(hdev
))
6510 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6511 MGMT_STATUS_REJECTED
);
6513 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
6514 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6515 MGMT_STATUS_INVALID_PARAMS
);
6517 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
6518 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6519 MGMT_STATUS_NOT_SUPPORTED
);
6524 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
6526 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
6528 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
6535 err
= new_options(hdev
, sk
);
6537 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
6538 mgmt_index_removed(hdev
);
6540 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
6541 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6542 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6544 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6546 set_bit(HCI_RAW
, &hdev
->flags
);
6547 mgmt_index_added(hdev
);
6552 hci_dev_unlock(hdev
);
6556 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
6557 void *data
, u16 len
)
6559 struct mgmt_cp_set_public_address
*cp
= data
;
6563 BT_DBG("%s", hdev
->name
);
6565 if (hdev_is_powered(hdev
))
6566 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6567 MGMT_STATUS_REJECTED
);
6569 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
6570 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6571 MGMT_STATUS_INVALID_PARAMS
);
6573 if (!hdev
->set_bdaddr
)
6574 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6575 MGMT_STATUS_NOT_SUPPORTED
);
6579 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
6580 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
6582 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
6589 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
6590 err
= new_options(hdev
, sk
);
6592 if (is_configured(hdev
)) {
6593 mgmt_index_removed(hdev
);
6595 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
6597 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6598 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6600 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6604 hci_dev_unlock(hdev
);
6608 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6611 eir
[eir_len
++] = sizeof(type
) + data_len
;
6612 eir
[eir_len
++] = type
;
6613 memcpy(&eir
[eir_len
], data
, data_len
);
6614 eir_len
+= data_len
;
6619 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
6620 u16 opcode
, struct sk_buff
*skb
)
6622 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
6623 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
6624 u8
*h192
, *r192
, *h256
, *r256
;
6625 struct mgmt_pending_cmd
*cmd
;
6629 BT_DBG("%s status %u", hdev
->name
, status
);
6631 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
6635 mgmt_cp
= cmd
->param
;
6638 status
= mgmt_status(status
);
6645 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
6646 struct hci_rp_read_local_oob_data
*rp
;
6648 if (skb
->len
!= sizeof(*rp
)) {
6649 status
= MGMT_STATUS_FAILED
;
6652 status
= MGMT_STATUS_SUCCESS
;
6653 rp
= (void *)skb
->data
;
6655 eir_len
= 5 + 18 + 18;
6662 struct hci_rp_read_local_oob_ext_data
*rp
;
6664 if (skb
->len
!= sizeof(*rp
)) {
6665 status
= MGMT_STATUS_FAILED
;
6668 status
= MGMT_STATUS_SUCCESS
;
6669 rp
= (void *)skb
->data
;
6671 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
6672 eir_len
= 5 + 18 + 18;
6676 eir_len
= 5 + 18 + 18 + 18 + 18;
6686 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
6693 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
6694 hdev
->dev_class
, 3);
6697 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6698 EIR_SSP_HASH_C192
, h192
, 16);
6699 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6700 EIR_SSP_RAND_R192
, r192
, 16);
6704 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6705 EIR_SSP_HASH_C256
, h256
, 16);
6706 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6707 EIR_SSP_RAND_R256
, r256
, 16);
6711 mgmt_rp
->type
= mgmt_cp
->type
;
6712 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
6714 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
6715 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
6716 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
6717 if (err
< 0 || status
)
6720 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6722 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6723 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
6724 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
6727 mgmt_pending_remove(cmd
);
6730 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
6731 struct mgmt_cp_read_local_oob_ext_data
*cp
)
6733 struct mgmt_pending_cmd
*cmd
;
6734 struct hci_request req
;
6737 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
6742 hci_req_init(&req
, hdev
);
6744 if (bredr_sc_enabled(hdev
))
6745 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
6747 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
6749 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
6751 mgmt_pending_remove(cmd
);
6758 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
6759 void *data
, u16 data_len
)
6761 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
6762 struct mgmt_rp_read_local_oob_ext_data
*rp
;
6765 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
6768 BT_DBG("%s", hdev
->name
);
6770 if (hdev_is_powered(hdev
)) {
6772 case BIT(BDADDR_BREDR
):
6773 status
= mgmt_bredr_support(hdev
);
6779 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6780 status
= mgmt_le_support(hdev
);
6784 eir_len
= 9 + 3 + 18 + 18 + 3;
6787 status
= MGMT_STATUS_INVALID_PARAMS
;
6792 status
= MGMT_STATUS_NOT_POWERED
;
6796 rp_len
= sizeof(*rp
) + eir_len
;
6797 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6808 case BIT(BDADDR_BREDR
):
6809 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
6810 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
6811 hci_dev_unlock(hdev
);
6815 status
= MGMT_STATUS_FAILED
;
6818 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6820 hdev
->dev_class
, 3);
6823 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6824 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
6825 smp_generate_oob(hdev
, hash
, rand
) < 0) {
6826 hci_dev_unlock(hdev
);
6827 status
= MGMT_STATUS_FAILED
;
6831 /* This should return the active RPA, but since the RPA
6832 * is only programmed on demand, it is really hard to fill
6833 * this in at the moment. For now disallow retrieving
6834 * local out-of-band data when privacy is in use.
6836 * Returning the identity address will not help here since
6837 * pairing happens before the identity resolving key is
6838 * known and thus the connection establishment happens
6839 * based on the RPA and not the identity address.
6841 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
6842 hci_dev_unlock(hdev
);
6843 status
= MGMT_STATUS_REJECTED
;
6847 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
6848 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
6849 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
6850 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
6851 memcpy(addr
, &hdev
->static_addr
, 6);
6854 memcpy(addr
, &hdev
->bdaddr
, 6);
6858 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
6859 addr
, sizeof(addr
));
6861 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
6866 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
6867 &role
, sizeof(role
));
6869 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
6870 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6872 hash
, sizeof(hash
));
6874 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6876 rand
, sizeof(rand
));
6879 flags
= get_adv_discov_flags(hdev
);
6881 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
6882 flags
|= LE_AD_NO_BREDR
;
6884 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
6885 &flags
, sizeof(flags
));
6889 hci_dev_unlock(hdev
);
6891 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6893 status
= MGMT_STATUS_SUCCESS
;
6896 rp
->type
= cp
->type
;
6897 rp
->eir_len
= cpu_to_le16(eir_len
);
6899 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
6900 status
, rp
, sizeof(*rp
) + eir_len
);
6901 if (err
< 0 || status
)
6904 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6905 rp
, sizeof(*rp
) + eir_len
,
6906 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
6914 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
6918 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
6919 flags
|= MGMT_ADV_FLAG_DISCOV
;
6920 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
6921 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
6923 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
)
6924 flags
|= MGMT_ADV_FLAG_TX_POWER
;
6929 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
6930 void *data
, u16 data_len
)
6932 struct mgmt_rp_read_adv_features
*rp
;
6936 struct adv_info
*adv_instance
;
6937 u32 supported_flags
;
6939 BT_DBG("%s", hdev
->name
);
6941 if (!lmp_le_capable(hdev
))
6942 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6943 MGMT_STATUS_REJECTED
);
6947 rp_len
= sizeof(*rp
);
6949 instance
= hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6951 rp_len
+= hdev
->adv_instance_cnt
;
6953 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6955 hci_dev_unlock(hdev
);
6959 supported_flags
= get_supported_adv_flags(hdev
);
6961 rp
->supported_flags
= cpu_to_le32(supported_flags
);
6962 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
6963 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
6964 rp
->max_instances
= HCI_MAX_ADV_INSTANCES
;
6968 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
6969 if (i
>= hdev
->adv_instance_cnt
)
6972 rp
->instance
[i
] = adv_instance
->instance
;
6975 rp
->num_instances
= hdev
->adv_instance_cnt
;
6977 rp
->num_instances
= 0;
6980 hci_dev_unlock(hdev
);
6982 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6983 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
6990 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
6991 u8 len
, bool is_adv_data
)
6993 u8 max_len
= HCI_MAX_AD_LENGTH
;
6995 bool flags_managed
= false;
6996 bool tx_power_managed
= false;
6997 u32 flags_params
= MGMT_ADV_FLAG_DISCOV
| MGMT_ADV_FLAG_LIMITED_DISCOV
|
6998 MGMT_ADV_FLAG_MANAGED_FLAGS
;
7000 if (is_adv_data
&& (adv_flags
& flags_params
)) {
7001 flags_managed
= true;
7005 if (is_adv_data
&& (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
7006 tx_power_managed
= true;
7013 /* Make sure that the data is correctly formatted. */
7014 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
7017 if (flags_managed
&& data
[i
+ 1] == EIR_FLAGS
)
7020 if (tx_power_managed
&& data
[i
+ 1] == EIR_TX_POWER
)
7023 /* If the current field length would exceed the total data
7024 * length, then it's invalid.
7026 if (i
+ cur_len
>= len
)
7033 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7036 struct mgmt_pending_cmd
*cmd
;
7037 struct mgmt_cp_add_advertising
*cp
;
7038 struct mgmt_rp_add_advertising rp
;
7039 struct adv_info
*adv_instance
, *n
;
7042 BT_DBG("status %d", status
);
7046 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
7049 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
7051 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
7052 if (!adv_instance
->pending
)
7056 adv_instance
->pending
= false;
7060 instance
= adv_instance
->instance
;
7062 if (hdev
->cur_adv_instance
== instance
)
7063 cancel_adv_timeout(hdev
);
7065 hci_remove_adv_instance(hdev
, instance
);
7066 advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, instance
);
7073 rp
.instance
= cp
->instance
;
7076 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7077 mgmt_status(status
));
7079 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7080 mgmt_status(status
), &rp
, sizeof(rp
));
7082 mgmt_pending_remove(cmd
);
7085 hci_dev_unlock(hdev
);
7088 void mgmt_adv_timeout_expired(struct hci_dev
*hdev
)
7091 struct hci_request req
;
7093 hdev
->adv_instance_timeout
= 0;
7095 instance
= get_current_adv_instance(hdev
);
7096 if (instance
== 0x00)
7100 hci_req_init(&req
, hdev
);
7102 clear_adv_instance(hdev
, &req
, instance
, false);
7104 if (list_empty(&hdev
->adv_instances
))
7105 disable_advertising(&req
);
7107 if (!skb_queue_empty(&req
.cmd_q
))
7108 hci_req_run(&req
, NULL
);
7110 hci_dev_unlock(hdev
);
7113 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7114 void *data
, u16 data_len
)
7116 struct mgmt_cp_add_advertising
*cp
= data
;
7117 struct mgmt_rp_add_advertising rp
;
7119 u32 supported_flags
;
7121 u16 timeout
, duration
;
7122 unsigned int prev_instance_cnt
= hdev
->adv_instance_cnt
;
7123 u8 schedule_instance
= 0;
7124 struct adv_info
*next_instance
;
7126 struct mgmt_pending_cmd
*cmd
;
7127 struct hci_request req
;
7129 BT_DBG("%s", hdev
->name
);
7131 status
= mgmt_le_support(hdev
);
7133 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7136 flags
= __le32_to_cpu(cp
->flags
);
7137 timeout
= __le16_to_cpu(cp
->timeout
);
7138 duration
= __le16_to_cpu(cp
->duration
);
7140 /* The current implementation only supports a subset of the specified
7143 supported_flags
= get_supported_adv_flags(hdev
);
7144 if (flags
& ~supported_flags
)
7145 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7146 MGMT_STATUS_INVALID_PARAMS
);
7150 if (timeout
&& !hdev_is_powered(hdev
)) {
7151 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7152 MGMT_STATUS_REJECTED
);
7156 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7157 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7158 pending_find(MGMT_OP_SET_LE
, hdev
)) {
7159 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7164 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
7165 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
7166 cp
->scan_rsp_len
, false)) {
7167 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7168 MGMT_STATUS_INVALID_PARAMS
);
7172 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
7173 cp
->adv_data_len
, cp
->data
,
7175 cp
->data
+ cp
->adv_data_len
,
7178 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7179 MGMT_STATUS_FAILED
);
7183 /* Only trigger an advertising added event if a new instance was
7186 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
7187 advertising_added(sk
, hdev
, cp
->instance
);
7189 hci_dev_set_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
7191 if (hdev
->cur_adv_instance
== cp
->instance
) {
7192 /* If the currently advertised instance is being changed then
7193 * cancel the current advertising and schedule the next
7194 * instance. If there is only one instance then the overridden
7195 * advertising data will be visible right away.
7197 cancel_adv_timeout(hdev
);
7199 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
7201 schedule_instance
= next_instance
->instance
;
7202 } else if (!hdev
->adv_instance_timeout
) {
7203 /* Immediately advertise the new instance if no other
7204 * instance is currently being advertised.
7206 schedule_instance
= cp
->instance
;
7209 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7210 * there is no instance to be advertised then we have no HCI
7211 * communication to make. Simply return.
7213 if (!hdev_is_powered(hdev
) ||
7214 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7215 !schedule_instance
) {
7216 rp
.instance
= cp
->instance
;
7217 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7218 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7222 /* We're good to go, update advertising data, parameters, and start
7225 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
7232 hci_req_init(&req
, hdev
);
7234 err
= schedule_adv_instance(&req
, schedule_instance
, true);
7237 err
= hci_req_run(&req
, add_advertising_complete
);
7240 mgmt_pending_remove(cmd
);
7243 hci_dev_unlock(hdev
);
7248 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7251 struct mgmt_pending_cmd
*cmd
;
7252 struct mgmt_cp_remove_advertising
*cp
;
7253 struct mgmt_rp_remove_advertising rp
;
7255 BT_DBG("status %d", status
);
7259 /* A failure status here only means that we failed to disable
7260 * advertising. Otherwise, the advertising instance has been removed,
7261 * so report success.
7263 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
7268 rp
.instance
= cp
->instance
;
7270 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
7272 mgmt_pending_remove(cmd
);
7275 hci_dev_unlock(hdev
);
7278 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7279 void *data
, u16 data_len
)
7281 struct mgmt_cp_remove_advertising
*cp
= data
;
7282 struct mgmt_rp_remove_advertising rp
;
7283 struct mgmt_pending_cmd
*cmd
;
7284 struct hci_request req
;
7287 BT_DBG("%s", hdev
->name
);
7291 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
7292 err
= mgmt_cmd_status(sk
, hdev
->id
,
7293 MGMT_OP_REMOVE_ADVERTISING
,
7294 MGMT_STATUS_INVALID_PARAMS
);
7298 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7299 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7300 pending_find(MGMT_OP_SET_LE
, hdev
)) {
7301 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7306 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
)) {
7307 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7308 MGMT_STATUS_INVALID_PARAMS
);
7312 hci_req_init(&req
, hdev
);
7314 clear_adv_instance(hdev
, &req
, cp
->instance
, true);
7316 if (list_empty(&hdev
->adv_instances
))
7317 disable_advertising(&req
);
7319 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7320 * flag is set or the device isn't powered then we have no HCI
7321 * communication to make. Simply return.
7323 if (skb_queue_empty(&req
.cmd_q
) ||
7324 !hdev_is_powered(hdev
) ||
7325 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
7326 rp
.instance
= cp
->instance
;
7327 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7328 MGMT_OP_REMOVE_ADVERTISING
,
7329 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7333 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
7340 err
= hci_req_run(&req
, remove_advertising_complete
);
7342 mgmt_pending_remove(cmd
);
7345 hci_dev_unlock(hdev
);
7350 static const struct hci_mgmt_handler mgmt_handlers
[] = {
7351 { NULL
}, /* 0x0000 (no command) */
7352 { read_version
, MGMT_READ_VERSION_SIZE
,
7354 HCI_MGMT_UNTRUSTED
},
7355 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
7357 HCI_MGMT_UNTRUSTED
},
7358 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
7360 HCI_MGMT_UNTRUSTED
},
7361 { read_controller_info
, MGMT_READ_INFO_SIZE
,
7362 HCI_MGMT_UNTRUSTED
},
7363 { set_powered
, MGMT_SETTING_SIZE
},
7364 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
7365 { set_connectable
, MGMT_SETTING_SIZE
},
7366 { set_fast_connectable
, MGMT_SETTING_SIZE
},
7367 { set_bondable
, MGMT_SETTING_SIZE
},
7368 { set_link_security
, MGMT_SETTING_SIZE
},
7369 { set_ssp
, MGMT_SETTING_SIZE
},
7370 { set_hs
, MGMT_SETTING_SIZE
},
7371 { set_le
, MGMT_SETTING_SIZE
},
7372 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
7373 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
7374 { add_uuid
, MGMT_ADD_UUID_SIZE
},
7375 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
7376 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
7378 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
7380 { disconnect
, MGMT_DISCONNECT_SIZE
},
7381 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
7382 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
7383 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
7384 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
7385 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
7386 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
7387 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
7388 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
7389 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
7390 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
7391 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
7392 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
7393 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
7395 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
7396 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
7397 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
7398 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
7399 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
7400 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
7401 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
7402 { set_advertising
, MGMT_SETTING_SIZE
},
7403 { set_bredr
, MGMT_SETTING_SIZE
},
7404 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
7405 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
7406 { set_secure_conn
, MGMT_SETTING_SIZE
},
7407 { set_debug_keys
, MGMT_SETTING_SIZE
},
7408 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
7409 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
7411 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
7412 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
7413 { add_device
, MGMT_ADD_DEVICE_SIZE
},
7414 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
7415 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
7417 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
7419 HCI_MGMT_UNTRUSTED
},
7420 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
7421 HCI_MGMT_UNCONFIGURED
|
7422 HCI_MGMT_UNTRUSTED
},
7423 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
7424 HCI_MGMT_UNCONFIGURED
},
7425 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
7426 HCI_MGMT_UNCONFIGURED
},
7427 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
7429 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
7430 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
7432 HCI_MGMT_UNTRUSTED
},
7433 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
7434 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
7436 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
7439 void mgmt_index_added(struct hci_dev
*hdev
)
7441 struct mgmt_ev_ext_index ev
;
7443 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7446 switch (hdev
->dev_type
) {
7448 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7449 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
7450 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7453 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
7454 HCI_MGMT_INDEX_EVENTS
);
7467 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
7468 HCI_MGMT_EXT_INDEX_EVENTS
);
7471 void mgmt_index_removed(struct hci_dev
*hdev
)
7473 struct mgmt_ev_ext_index ev
;
7474 u8 status
= MGMT_STATUS_INVALID_INDEX
;
7476 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7479 switch (hdev
->dev_type
) {
7481 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7483 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7484 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
7485 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7488 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
7489 HCI_MGMT_INDEX_EVENTS
);
7502 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
7503 HCI_MGMT_EXT_INDEX_EVENTS
);
7506 /* This function requires the caller holds hdev->lock */
7507 static void restart_le_actions(struct hci_request
*req
)
7509 struct hci_dev
*hdev
= req
->hdev
;
7510 struct hci_conn_params
*p
;
7512 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
7513 /* Needed for AUTO_OFF case where might not "really"
7514 * have been powered off.
7516 list_del_init(&p
->action
);
7518 switch (p
->auto_connect
) {
7519 case HCI_AUTO_CONN_DIRECT
:
7520 case HCI_AUTO_CONN_ALWAYS
:
7521 list_add(&p
->action
, &hdev
->pend_le_conns
);
7523 case HCI_AUTO_CONN_REPORT
:
7524 list_add(&p
->action
, &hdev
->pend_le_reports
);
7531 __hci_update_background_scan(req
);
7534 static void powered_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
7536 struct cmd_lookup match
= { NULL
, hdev
};
7538 BT_DBG("status 0x%02x", status
);
7541 /* Register the available SMP channels (BR/EDR and LE) only
7542 * when successfully powering on the controller. This late
7543 * registration is required so that LE SMP can clearly
7544 * decide if the public address or static address is used.
7551 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7553 new_settings(hdev
, match
.sk
);
7555 hci_dev_unlock(hdev
);
7561 static int powered_update_hci(struct hci_dev
*hdev
)
7563 struct hci_request req
;
7564 struct adv_info
*adv_instance
;
7567 hci_req_init(&req
, hdev
);
7569 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
7570 !lmp_host_ssp_capable(hdev
)) {
7573 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
7575 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
7578 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
,
7579 sizeof(support
), &support
);
7583 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7584 lmp_bredr_capable(hdev
)) {
7585 struct hci_cp_write_le_host_supported cp
;
7590 /* Check first if we already have the right
7591 * host state (host features set)
7593 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
7594 cp
.simul
!= lmp_host_le_br_capable(hdev
))
7595 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
7599 if (lmp_le_capable(hdev
)) {
7600 /* Make sure the controller has a good default for
7601 * advertising data. This also applies to the case
7602 * where BR/EDR was toggled during the AUTO_OFF phase.
7604 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7605 (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7606 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))) {
7607 update_adv_data(&req
);
7608 update_scan_rsp_data(&req
);
7611 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
7612 hdev
->cur_adv_instance
== 0x00 &&
7613 !list_empty(&hdev
->adv_instances
)) {
7614 adv_instance
= list_first_entry(&hdev
->adv_instances
,
7615 struct adv_info
, list
);
7616 hdev
->cur_adv_instance
= adv_instance
->instance
;
7619 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7620 enable_advertising(&req
);
7621 else if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
7622 hdev
->cur_adv_instance
)
7623 schedule_adv_instance(&req
, hdev
->cur_adv_instance
,
7626 restart_le_actions(&req
);
7629 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
7630 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
7631 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
7632 sizeof(link_sec
), &link_sec
);
7634 if (lmp_bredr_capable(hdev
)) {
7635 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
7636 write_fast_connectable(&req
, true);
7638 write_fast_connectable(&req
, false);
7639 __hci_update_page_scan(&req
);
7645 return hci_req_run(&req
, powered_complete
);
7648 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
7650 struct cmd_lookup match
= { NULL
, hdev
};
7651 u8 status
, zero_cod
[] = { 0, 0, 0 };
7654 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
7658 if (powered_update_hci(hdev
) == 0)
7661 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
7666 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7668 /* If the power off is because of hdev unregistration let
7669 * use the appropriate INVALID_INDEX status. Otherwise use
7670 * NOT_POWERED. We cover both scenarios here since later in
7671 * mgmt_index_removed() any hci_conn callbacks will have already
7672 * been triggered, potentially causing misleading DISCONNECTED
7675 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
7676 status
= MGMT_STATUS_INVALID_INDEX
;
7678 status
= MGMT_STATUS_NOT_POWERED
;
7680 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7682 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
7683 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
7684 zero_cod
, sizeof(zero_cod
), NULL
);
7687 err
= new_settings(hdev
, match
.sk
);
7695 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
7697 struct mgmt_pending_cmd
*cmd
;
7700 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7704 if (err
== -ERFKILL
)
7705 status
= MGMT_STATUS_RFKILLED
;
7707 status
= MGMT_STATUS_FAILED
;
7709 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
7711 mgmt_pending_remove(cmd
);
7714 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
7716 struct hci_request req
;
7720 /* When discoverable timeout triggers, then just make sure
7721 * the limited discoverable flag is cleared. Even in the case
7722 * of a timeout triggered from general discoverable, it is
7723 * safe to unconditionally clear the flag.
7725 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
7726 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
7728 hci_req_init(&req
, hdev
);
7729 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
7730 u8 scan
= SCAN_PAGE
;
7731 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
7732 sizeof(scan
), &scan
);
7736 /* Advertising instances don't use the global discoverable setting, so
7737 * only update AD if advertising was enabled using Set Advertising.
7739 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7740 update_adv_data(&req
);
7742 hci_req_run(&req
, NULL
);
7744 hdev
->discov_timeout
= 0;
7746 new_settings(hdev
, NULL
);
7748 hci_dev_unlock(hdev
);
7751 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
7754 struct mgmt_ev_new_link_key ev
;
7756 memset(&ev
, 0, sizeof(ev
));
7758 ev
.store_hint
= persistent
;
7759 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7760 ev
.key
.addr
.type
= BDADDR_BREDR
;
7761 ev
.key
.type
= key
->type
;
7762 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
7763 ev
.key
.pin_len
= key
->pin_len
;
7765 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7768 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
7770 switch (ltk
->type
) {
7773 if (ltk
->authenticated
)
7774 return MGMT_LTK_AUTHENTICATED
;
7775 return MGMT_LTK_UNAUTHENTICATED
;
7777 if (ltk
->authenticated
)
7778 return MGMT_LTK_P256_AUTH
;
7779 return MGMT_LTK_P256_UNAUTH
;
7780 case SMP_LTK_P256_DEBUG
:
7781 return MGMT_LTK_P256_DEBUG
;
7784 return MGMT_LTK_UNAUTHENTICATED
;
7787 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
7789 struct mgmt_ev_new_long_term_key ev
;
7791 memset(&ev
, 0, sizeof(ev
));
7793 /* Devices using resolvable or non-resolvable random addresses
7794 * without providing an identity resolving key don't require
7795 * to store long term keys. Their addresses will change the
7798 * Only when a remote device provides an identity address
7799 * make sure the long term key is stored. If the remote
7800 * identity is known, the long term keys are internally
7801 * mapped to the identity address. So allow static random
7802 * and public addresses here.
7804 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7805 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7806 ev
.store_hint
= 0x00;
7808 ev
.store_hint
= persistent
;
7810 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7811 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
7812 ev
.key
.type
= mgmt_ltk_type(key
);
7813 ev
.key
.enc_size
= key
->enc_size
;
7814 ev
.key
.ediv
= key
->ediv
;
7815 ev
.key
.rand
= key
->rand
;
7817 if (key
->type
== SMP_LTK
)
7820 /* Make sure we copy only the significant bytes based on the
7821 * encryption key size, and set the rest of the value to zeroes.
7823 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
7824 memset(ev
.key
.val
+ key
->enc_size
, 0,
7825 sizeof(ev
.key
.val
) - key
->enc_size
);
7827 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7830 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
7832 struct mgmt_ev_new_irk ev
;
7834 memset(&ev
, 0, sizeof(ev
));
7836 /* For identity resolving keys from devices that are already
7837 * using a public address or static random address, do not
7838 * ask for storing this key. The identity resolving key really
7839 * is only mandatory for devices using resolvable random
7842 * Storing all identity resolving keys has the downside that
7843 * they will be also loaded on next boot of they system. More
7844 * identity resolving keys, means more time during scanning is
7845 * needed to actually resolve these addresses.
7847 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
7848 ev
.store_hint
= 0x01;
7850 ev
.store_hint
= 0x00;
7852 bacpy(&ev
.rpa
, &irk
->rpa
);
7853 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
7854 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
7855 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
7857 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7860 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
7863 struct mgmt_ev_new_csrk ev
;
7865 memset(&ev
, 0, sizeof(ev
));
7867 /* Devices using resolvable or non-resolvable random addresses
7868 * without providing an identity resolving key don't require
7869 * to store signature resolving keys. Their addresses will change
7870 * the next time around.
7872 * Only when a remote device provides an identity address
7873 * make sure the signature resolving key is stored. So allow
7874 * static random and public addresses here.
7876 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7877 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7878 ev
.store_hint
= 0x00;
7880 ev
.store_hint
= persistent
;
7882 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
7883 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
7884 ev
.key
.type
= csrk
->type
;
7885 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
7887 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7890 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7891 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
7892 u16 max_interval
, u16 latency
, u16 timeout
)
7894 struct mgmt_ev_new_conn_param ev
;
7896 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
7899 memset(&ev
, 0, sizeof(ev
));
7900 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7901 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
7902 ev
.store_hint
= store_hint
;
7903 ev
.min_interval
= cpu_to_le16(min_interval
);
7904 ev
.max_interval
= cpu_to_le16(max_interval
);
7905 ev
.latency
= cpu_to_le16(latency
);
7906 ev
.timeout
= cpu_to_le16(timeout
);
7908 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
7911 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
7912 u32 flags
, u8
*name
, u8 name_len
)
7915 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
7918 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
7919 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7921 ev
->flags
= __cpu_to_le32(flags
);
7923 /* We must ensure that the EIR Data fields are ordered and
7924 * unique. Keep it simple for now and avoid the problem by not
7925 * adding any BR/EDR data to the LE adv.
7927 if (conn
->le_adv_data_len
> 0) {
7928 memcpy(&ev
->eir
[eir_len
],
7929 conn
->le_adv_data
, conn
->le_adv_data_len
);
7930 eir_len
= conn
->le_adv_data_len
;
7933 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
7936 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
7937 eir_len
= eir_append_data(ev
->eir
, eir_len
,
7939 conn
->dev_class
, 3);
7942 ev
->eir_len
= cpu_to_le16(eir_len
);
7944 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
7945 sizeof(*ev
) + eir_len
, NULL
);
7948 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7950 struct sock
**sk
= data
;
7952 cmd
->cmd_complete(cmd
, 0);
7957 mgmt_pending_remove(cmd
);
7960 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7962 struct hci_dev
*hdev
= data
;
7963 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
7965 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
7967 cmd
->cmd_complete(cmd
, 0);
7968 mgmt_pending_remove(cmd
);
7971 bool mgmt_powering_down(struct hci_dev
*hdev
)
7973 struct mgmt_pending_cmd
*cmd
;
7974 struct mgmt_mode
*cp
;
7976 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7987 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7988 u8 link_type
, u8 addr_type
, u8 reason
,
7989 bool mgmt_connected
)
7991 struct mgmt_ev_device_disconnected ev
;
7992 struct sock
*sk
= NULL
;
7994 /* The connection is still in hci_conn_hash so test for 1
7995 * instead of 0 to know if this is the last one.
7997 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
7998 cancel_delayed_work(&hdev
->power_off
);
7999 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
8002 if (!mgmt_connected
)
8005 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
8008 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
8010 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8011 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8014 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
8019 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
8023 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8024 u8 link_type
, u8 addr_type
, u8 status
)
8026 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
8027 struct mgmt_cp_disconnect
*cp
;
8028 struct mgmt_pending_cmd
*cmd
;
8030 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
8033 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
8039 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
8042 if (cp
->addr
.type
!= bdaddr_type
)
8045 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8046 mgmt_pending_remove(cmd
);
8049 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8050 u8 addr_type
, u8 status
)
8052 struct mgmt_ev_connect_failed ev
;
8054 /* The connection is still in hci_conn_hash so test for 1
8055 * instead of 0 to know if this is the last one.
8057 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
8058 cancel_delayed_work(&hdev
->power_off
);
8059 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
8062 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8063 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8064 ev
.status
= mgmt_status(status
);
8066 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
8069 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
8071 struct mgmt_ev_pin_code_request ev
;
8073 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8074 ev
.addr
.type
= BDADDR_BREDR
;
8077 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
8080 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8083 struct mgmt_pending_cmd
*cmd
;
8085 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
8089 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8090 mgmt_pending_remove(cmd
);
8093 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8096 struct mgmt_pending_cmd
*cmd
;
8098 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
8102 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8103 mgmt_pending_remove(cmd
);
8106 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8107 u8 link_type
, u8 addr_type
, u32 value
,
8110 struct mgmt_ev_user_confirm_request ev
;
8112 BT_DBG("%s", hdev
->name
);
8114 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8115 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8116 ev
.confirm_hint
= confirm_hint
;
8117 ev
.value
= cpu_to_le32(value
);
8119 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
8123 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8124 u8 link_type
, u8 addr_type
)
8126 struct mgmt_ev_user_passkey_request ev
;
8128 BT_DBG("%s", hdev
->name
);
8130 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8131 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8133 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
8137 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8138 u8 link_type
, u8 addr_type
, u8 status
,
8141 struct mgmt_pending_cmd
*cmd
;
8143 cmd
= pending_find(opcode
, hdev
);
8147 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8148 mgmt_pending_remove(cmd
);
8153 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8154 u8 link_type
, u8 addr_type
, u8 status
)
8156 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8157 status
, MGMT_OP_USER_CONFIRM_REPLY
);
8160 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8161 u8 link_type
, u8 addr_type
, u8 status
)
8163 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8165 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
8168 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8169 u8 link_type
, u8 addr_type
, u8 status
)
8171 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8172 status
, MGMT_OP_USER_PASSKEY_REPLY
);
8175 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8176 u8 link_type
, u8 addr_type
, u8 status
)
8178 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8180 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
8183 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8184 u8 link_type
, u8 addr_type
, u32 passkey
,
8187 struct mgmt_ev_passkey_notify ev
;
8189 BT_DBG("%s", hdev
->name
);
8191 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8192 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8193 ev
.passkey
= __cpu_to_le32(passkey
);
8194 ev
.entered
= entered
;
8196 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
8199 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
8201 struct mgmt_ev_auth_failed ev
;
8202 struct mgmt_pending_cmd
*cmd
;
8203 u8 status
= mgmt_status(hci_status
);
8205 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
8206 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
8209 cmd
= find_pairing(conn
);
8211 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
8212 cmd
? cmd
->sk
: NULL
);
8215 cmd
->cmd_complete(cmd
, status
);
8216 mgmt_pending_remove(cmd
);
8220 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
8222 struct cmd_lookup match
= { NULL
, hdev
};
8226 u8 mgmt_err
= mgmt_status(status
);
8227 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
8228 cmd_status_rsp
, &mgmt_err
);
8232 if (test_bit(HCI_AUTH
, &hdev
->flags
))
8233 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
8235 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
8237 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
8241 new_settings(hdev
, match
.sk
);
8247 static void clear_eir(struct hci_request
*req
)
8249 struct hci_dev
*hdev
= req
->hdev
;
8250 struct hci_cp_write_eir cp
;
8252 if (!lmp_ext_inq_capable(hdev
))
8255 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
8257 memset(&cp
, 0, sizeof(cp
));
8259 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
8262 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
8264 struct cmd_lookup match
= { NULL
, hdev
};
8265 struct hci_request req
;
8266 bool changed
= false;
8269 u8 mgmt_err
= mgmt_status(status
);
8271 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
8273 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8274 new_settings(hdev
, NULL
);
8277 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
8283 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
8285 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
8287 changed
= hci_dev_test_and_clear_flag(hdev
,
8290 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8293 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
8296 new_settings(hdev
, match
.sk
);
8301 hci_req_init(&req
, hdev
);
8303 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
8304 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
8305 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
8306 sizeof(enable
), &enable
);
8312 hci_req_run(&req
, NULL
);
8315 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
8317 struct cmd_lookup
*match
= data
;
8319 if (match
->sk
== NULL
) {
8320 match
->sk
= cmd
->sk
;
8321 sock_hold(match
->sk
);
8325 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
8328 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
8330 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
8331 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
8332 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
8335 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
8336 dev_class
, 3, NULL
);
8342 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
8344 struct mgmt_cp_set_local_name ev
;
8345 struct mgmt_pending_cmd
*cmd
;
8350 memset(&ev
, 0, sizeof(ev
));
8351 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
8352 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
8354 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
8356 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
8358 /* If this is a HCI command related to powering on the
8359 * HCI dev don't send any mgmt signals.
8361 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
8365 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
8366 cmd
? cmd
->sk
: NULL
);
8369 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
8373 for (i
= 0; i
< uuid_count
; i
++) {
8374 if (!memcmp(uuid
, uuids
[i
], 16))
8381 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
8385 while (parsed
< eir_len
) {
8386 u8 field_len
= eir
[0];
8393 if (eir_len
- parsed
< field_len
+ 1)
8397 case EIR_UUID16_ALL
:
8398 case EIR_UUID16_SOME
:
8399 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
8400 memcpy(uuid
, bluetooth_base_uuid
, 16);
8401 uuid
[13] = eir
[i
+ 3];
8402 uuid
[12] = eir
[i
+ 2];
8403 if (has_uuid(uuid
, uuid_count
, uuids
))
8407 case EIR_UUID32_ALL
:
8408 case EIR_UUID32_SOME
:
8409 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
8410 memcpy(uuid
, bluetooth_base_uuid
, 16);
8411 uuid
[15] = eir
[i
+ 5];
8412 uuid
[14] = eir
[i
+ 4];
8413 uuid
[13] = eir
[i
+ 3];
8414 uuid
[12] = eir
[i
+ 2];
8415 if (has_uuid(uuid
, uuid_count
, uuids
))
8419 case EIR_UUID128_ALL
:
8420 case EIR_UUID128_SOME
:
8421 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
8422 memcpy(uuid
, eir
+ i
+ 2, 16);
8423 if (has_uuid(uuid
, uuid_count
, uuids
))
8429 parsed
+= field_len
+ 1;
8430 eir
+= field_len
+ 1;
8436 static void restart_le_scan(struct hci_dev
*hdev
)
8438 /* If controller is not scanning we are done. */
8439 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
8442 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
8443 hdev
->discovery
.scan_start
+
8444 hdev
->discovery
.scan_duration
))
8447 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_restart
,
8448 DISCOV_LE_RESTART_DELAY
);
8451 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
8452 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8454 /* If a RSSI threshold has been specified, and
8455 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8456 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8457 * is set, let it through for further processing, as we might need to
8460 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8461 * the results are also dropped.
8463 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8464 (rssi
== HCI_RSSI_INVALID
||
8465 (rssi
< hdev
->discovery
.rssi
&&
8466 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
8469 if (hdev
->discovery
.uuid_count
!= 0) {
8470 /* If a list of UUIDs is provided in filter, results with no
8471 * matching UUID should be dropped.
8473 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
8474 hdev
->discovery
.uuids
) &&
8475 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
8476 hdev
->discovery
.uuid_count
,
8477 hdev
->discovery
.uuids
))
8481 /* If duplicate filtering does not report RSSI changes, then restart
8482 * scanning to ensure updated result with updated RSSI values.
8484 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
8485 restart_le_scan(hdev
);
8487 /* Validate RSSI value against the RSSI threshold once more. */
8488 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8489 rssi
< hdev
->discovery
.rssi
)
8496 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8497 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
8498 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8501 struct mgmt_ev_device_found
*ev
= (void *)buf
;
8504 /* Don't send events for a non-kernel initiated discovery. With
8505 * LE one exception is if we have pend_le_reports > 0 in which
8506 * case we're doing passive scanning and want these events.
8508 if (!hci_discovery_active(hdev
)) {
8509 if (link_type
== ACL_LINK
)
8511 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
8515 if (hdev
->discovery
.result_filtering
) {
8516 /* We are using service discovery */
8517 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
8522 /* Make sure that the buffer is big enough. The 5 extra bytes
8523 * are for the potential CoD field.
8525 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
8528 memset(buf
, 0, sizeof(buf
));
8530 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8531 * RSSI value was reported as 0 when not available. This behavior
8532 * is kept when using device discovery. This is required for full
8533 * backwards compatibility with the API.
8535 * However when using service discovery, the value 127 will be
8536 * returned when the RSSI is not available.
8538 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
8539 link_type
== ACL_LINK
)
8542 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8543 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8545 ev
->flags
= cpu_to_le32(flags
);
8548 /* Copy EIR or advertising data into event */
8549 memcpy(ev
->eir
, eir
, eir_len
);
8551 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
8552 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
8555 if (scan_rsp_len
> 0)
8556 /* Append scan response data to event */
8557 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
8559 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
8560 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
8562 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
8565 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8566 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
8568 struct mgmt_ev_device_found
*ev
;
8569 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
8572 ev
= (struct mgmt_ev_device_found
*) buf
;
8574 memset(buf
, 0, sizeof(buf
));
8576 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8577 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8580 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
8583 ev
->eir_len
= cpu_to_le16(eir_len
);
8585 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
8588 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
8590 struct mgmt_ev_discovering ev
;
8592 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
8594 memset(&ev
, 0, sizeof(ev
));
8595 ev
.type
= hdev
->discovery
.type
;
8596 ev
.discovering
= discovering
;
8598 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
8601 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
8603 BT_DBG("%s status %u", hdev
->name
, status
);
8606 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
8608 struct hci_request req
;
8611 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
8612 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
8615 instance
= get_current_adv_instance(hdev
);
8617 hci_req_init(&req
, hdev
);
8620 schedule_adv_instance(&req
, instance
, true);
8622 update_adv_data(&req
);
8623 update_scan_rsp_data(&req
);
8624 enable_advertising(&req
);
8627 hci_req_run(&req
, adv_enable_complete
);
8630 static struct hci_mgmt_chan chan
= {
8631 .channel
= HCI_CHANNEL_CONTROL
,
8632 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
8633 .handlers
= mgmt_handlers
,
8634 .hdev_init
= mgmt_init_hdev
,
8639 return hci_mgmt_chan_register(&chan
);
8642 void mgmt_exit(void)
8644 hci_mgmt_chan_unregister(&chan
);