2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands
[] = {
44 MGMT_OP_READ_INDEX_LIST
,
47 MGMT_OP_SET_DISCOVERABLE
,
48 MGMT_OP_SET_CONNECTABLE
,
49 MGMT_OP_SET_FAST_CONNECTABLE
,
51 MGMT_OP_SET_LINK_SECURITY
,
55 MGMT_OP_SET_DEV_CLASS
,
56 MGMT_OP_SET_LOCAL_NAME
,
59 MGMT_OP_LOAD_LINK_KEYS
,
60 MGMT_OP_LOAD_LONG_TERM_KEYS
,
62 MGMT_OP_GET_CONNECTIONS
,
63 MGMT_OP_PIN_CODE_REPLY
,
64 MGMT_OP_PIN_CODE_NEG_REPLY
,
65 MGMT_OP_SET_IO_CAPABILITY
,
67 MGMT_OP_CANCEL_PAIR_DEVICE
,
68 MGMT_OP_UNPAIR_DEVICE
,
69 MGMT_OP_USER_CONFIRM_REPLY
,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
71 MGMT_OP_USER_PASSKEY_REPLY
,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
73 MGMT_OP_READ_LOCAL_OOB_DATA
,
74 MGMT_OP_ADD_REMOTE_OOB_DATA
,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
76 MGMT_OP_START_DISCOVERY
,
77 MGMT_OP_STOP_DISCOVERY
,
80 MGMT_OP_UNBLOCK_DEVICE
,
81 MGMT_OP_SET_DEVICE_ID
,
82 MGMT_OP_SET_ADVERTISING
,
84 MGMT_OP_SET_STATIC_ADDRESS
,
85 MGMT_OP_SET_SCAN_PARAMS
,
86 MGMT_OP_SET_SECURE_CONN
,
87 MGMT_OP_SET_DEBUG_KEYS
,
90 MGMT_OP_GET_CONN_INFO
,
91 MGMT_OP_GET_CLOCK_INFO
,
93 MGMT_OP_REMOVE_DEVICE
,
94 MGMT_OP_LOAD_CONN_PARAM
,
95 MGMT_OP_READ_UNCONF_INDEX_LIST
,
96 MGMT_OP_READ_CONFIG_INFO
,
97 MGMT_OP_SET_EXTERNAL_CONFIG
,
98 MGMT_OP_SET_PUBLIC_ADDRESS
,
99 MGMT_OP_START_SERVICE_DISCOVERY
,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
101 MGMT_OP_READ_EXT_INDEX_LIST
,
102 MGMT_OP_READ_ADV_FEATURES
,
103 MGMT_OP_ADD_ADVERTISING
,
104 MGMT_OP_REMOVE_ADVERTISING
,
107 static const u16 mgmt_events
[] = {
108 MGMT_EV_CONTROLLER_ERROR
,
110 MGMT_EV_INDEX_REMOVED
,
111 MGMT_EV_NEW_SETTINGS
,
112 MGMT_EV_CLASS_OF_DEV_CHANGED
,
113 MGMT_EV_LOCAL_NAME_CHANGED
,
114 MGMT_EV_NEW_LINK_KEY
,
115 MGMT_EV_NEW_LONG_TERM_KEY
,
116 MGMT_EV_DEVICE_CONNECTED
,
117 MGMT_EV_DEVICE_DISCONNECTED
,
118 MGMT_EV_CONNECT_FAILED
,
119 MGMT_EV_PIN_CODE_REQUEST
,
120 MGMT_EV_USER_CONFIRM_REQUEST
,
121 MGMT_EV_USER_PASSKEY_REQUEST
,
123 MGMT_EV_DEVICE_FOUND
,
125 MGMT_EV_DEVICE_BLOCKED
,
126 MGMT_EV_DEVICE_UNBLOCKED
,
127 MGMT_EV_DEVICE_UNPAIRED
,
128 MGMT_EV_PASSKEY_NOTIFY
,
131 MGMT_EV_DEVICE_ADDED
,
132 MGMT_EV_DEVICE_REMOVED
,
133 MGMT_EV_NEW_CONN_PARAM
,
134 MGMT_EV_UNCONF_INDEX_ADDED
,
135 MGMT_EV_UNCONF_INDEX_REMOVED
,
136 MGMT_EV_NEW_CONFIG_OPTIONS
,
137 MGMT_EV_EXT_INDEX_ADDED
,
138 MGMT_EV_EXT_INDEX_REMOVED
,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
140 MGMT_EV_ADVERTISING_ADDED
,
141 MGMT_EV_ADVERTISING_REMOVED
,
144 static const u16 mgmt_untrusted_commands
[] = {
145 MGMT_OP_READ_INDEX_LIST
,
147 MGMT_OP_READ_UNCONF_INDEX_LIST
,
148 MGMT_OP_READ_CONFIG_INFO
,
149 MGMT_OP_READ_EXT_INDEX_LIST
,
152 static const u16 mgmt_untrusted_events
[] = {
154 MGMT_EV_INDEX_REMOVED
,
155 MGMT_EV_NEW_SETTINGS
,
156 MGMT_EV_CLASS_OF_DEV_CHANGED
,
157 MGMT_EV_LOCAL_NAME_CHANGED
,
158 MGMT_EV_UNCONF_INDEX_ADDED
,
159 MGMT_EV_UNCONF_INDEX_REMOVED
,
160 MGMT_EV_NEW_CONFIG_OPTIONS
,
161 MGMT_EV_EXT_INDEX_ADDED
,
162 MGMT_EV_EXT_INDEX_REMOVED
,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table
[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
175 MGMT_STATUS_FAILED
, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
180 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY
, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED
, /* Rejected Security */
187 MGMT_STATUS_REJECTED
, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
195 MGMT_STATUS_BUSY
, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED
, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED
, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED
, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED
, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY
, /* Role Switch Pending */
221 MGMT_STATUS_FAILED
, /* Slot Violation */
222 MGMT_STATUS_FAILED
, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY
, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
235 static u8
mgmt_status(u8 hci_status
)
237 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
238 return mgmt_status_table
[hci_status
];
240 return MGMT_STATUS_FAILED
;
243 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
246 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
250 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
251 u16 len
, int flag
, struct sock
*skip_sk
)
253 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
257 static int mgmt_generic_event(u16 event
, struct hci_dev
*hdev
, void *data
,
258 u16 len
, struct sock
*skip_sk
)
260 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
261 HCI_MGMT_GENERIC_EVENTS
, skip_sk
);
264 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
265 struct sock
*skip_sk
)
267 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
268 HCI_SOCK_TRUSTED
, skip_sk
);
271 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
274 struct mgmt_rp_read_version rp
;
276 BT_DBG("sock %p", sk
);
278 rp
.version
= MGMT_VERSION
;
279 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
281 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
285 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
288 struct mgmt_rp_read_commands
*rp
;
289 u16 num_commands
, num_events
;
293 BT_DBG("sock %p", sk
);
295 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
296 num_commands
= ARRAY_SIZE(mgmt_commands
);
297 num_events
= ARRAY_SIZE(mgmt_events
);
299 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
300 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
303 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
305 rp
= kmalloc(rp_size
, GFP_KERNEL
);
309 rp
->num_commands
= cpu_to_le16(num_commands
);
310 rp
->num_events
= cpu_to_le16(num_events
);
312 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
313 __le16
*opcode
= rp
->opcodes
;
315 for (i
= 0; i
< num_commands
; i
++, opcode
++)
316 put_unaligned_le16(mgmt_commands
[i
], opcode
);
318 for (i
= 0; i
< num_events
; i
++, opcode
++)
319 put_unaligned_le16(mgmt_events
[i
], opcode
);
321 __le16
*opcode
= rp
->opcodes
;
323 for (i
= 0; i
< num_commands
; i
++, opcode
++)
324 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
326 for (i
= 0; i
< num_events
; i
++, opcode
++)
327 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
330 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
337 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
340 struct mgmt_rp_read_index_list
*rp
;
346 BT_DBG("sock %p", sk
);
348 read_lock(&hci_dev_list_lock
);
351 list_for_each_entry(d
, &hci_dev_list
, list
) {
352 if (d
->dev_type
== HCI_BREDR
&&
353 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
357 rp_len
= sizeof(*rp
) + (2 * count
);
358 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
360 read_unlock(&hci_dev_list_lock
);
365 list_for_each_entry(d
, &hci_dev_list
, list
) {
366 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
367 hci_dev_test_flag(d
, HCI_CONFIG
) ||
368 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
377 if (d
->dev_type
== HCI_BREDR
&&
378 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
379 rp
->index
[count
++] = cpu_to_le16(d
->id
);
380 BT_DBG("Added hci%u", d
->id
);
384 rp
->num_controllers
= cpu_to_le16(count
);
385 rp_len
= sizeof(*rp
) + (2 * count
);
387 read_unlock(&hci_dev_list_lock
);
389 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
397 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
398 void *data
, u16 data_len
)
400 struct mgmt_rp_read_unconf_index_list
*rp
;
406 BT_DBG("sock %p", sk
);
408 read_lock(&hci_dev_list_lock
);
411 list_for_each_entry(d
, &hci_dev_list
, list
) {
412 if (d
->dev_type
== HCI_BREDR
&&
413 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
417 rp_len
= sizeof(*rp
) + (2 * count
);
418 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
420 read_unlock(&hci_dev_list_lock
);
425 list_for_each_entry(d
, &hci_dev_list
, list
) {
426 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
427 hci_dev_test_flag(d
, HCI_CONFIG
) ||
428 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
437 if (d
->dev_type
== HCI_BREDR
&&
438 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
439 rp
->index
[count
++] = cpu_to_le16(d
->id
);
440 BT_DBG("Added hci%u", d
->id
);
444 rp
->num_controllers
= cpu_to_le16(count
);
445 rp_len
= sizeof(*rp
) + (2 * count
);
447 read_unlock(&hci_dev_list_lock
);
449 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
450 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
457 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
458 void *data
, u16 data_len
)
460 struct mgmt_rp_read_ext_index_list
*rp
;
466 BT_DBG("sock %p", sk
);
468 read_lock(&hci_dev_list_lock
);
471 list_for_each_entry(d
, &hci_dev_list
, list
) {
472 if (d
->dev_type
== HCI_BREDR
|| d
->dev_type
== HCI_AMP
)
476 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
477 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
479 read_unlock(&hci_dev_list_lock
);
484 list_for_each_entry(d
, &hci_dev_list
, list
) {
485 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
486 hci_dev_test_flag(d
, HCI_CONFIG
) ||
487 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
496 if (d
->dev_type
== HCI_BREDR
) {
497 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
498 rp
->entry
[count
].type
= 0x01;
500 rp
->entry
[count
].type
= 0x00;
501 } else if (d
->dev_type
== HCI_AMP
) {
502 rp
->entry
[count
].type
= 0x02;
507 rp
->entry
[count
].bus
= d
->bus
;
508 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
509 BT_DBG("Added hci%u", d
->id
);
512 rp
->num_controllers
= cpu_to_le16(count
);
513 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
515 read_unlock(&hci_dev_list_lock
);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
522 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
523 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
525 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
526 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
, rp_len
);
533 static bool is_configured(struct hci_dev
*hdev
)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
536 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
540 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
546 static __le32
get_missing_options(struct hci_dev
*hdev
)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
551 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
552 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
555 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
556 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
558 return cpu_to_le32(options
);
561 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
563 __le32 options
= get_missing_options(hdev
);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
566 sizeof(options
), skip
);
569 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
571 __le32 options
= get_missing_options(hdev
);
573 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
577 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
578 void *data
, u16 data_len
)
580 struct mgmt_rp_read_config_info rp
;
583 BT_DBG("sock %p %s", sk
, hdev
->name
);
587 memset(&rp
, 0, sizeof(rp
));
588 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
591 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
593 if (hdev
->set_bdaddr
)
594 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
596 rp
.supported_options
= cpu_to_le32(options
);
597 rp
.missing_options
= get_missing_options(hdev
);
599 hci_dev_unlock(hdev
);
601 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
605 static u32
get_supported_settings(struct hci_dev
*hdev
)
609 settings
|= MGMT_SETTING_POWERED
;
610 settings
|= MGMT_SETTING_BONDABLE
;
611 settings
|= MGMT_SETTING_DEBUG_KEYS
;
612 settings
|= MGMT_SETTING_CONNECTABLE
;
613 settings
|= MGMT_SETTING_DISCOVERABLE
;
615 if (lmp_bredr_capable(hdev
)) {
616 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
617 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
618 settings
|= MGMT_SETTING_BREDR
;
619 settings
|= MGMT_SETTING_LINK_SECURITY
;
621 if (lmp_ssp_capable(hdev
)) {
622 settings
|= MGMT_SETTING_SSP
;
623 settings
|= MGMT_SETTING_HS
;
626 if (lmp_sc_capable(hdev
))
627 settings
|= MGMT_SETTING_SECURE_CONN
;
630 if (lmp_le_capable(hdev
)) {
631 settings
|= MGMT_SETTING_LE
;
632 settings
|= MGMT_SETTING_ADVERTISING
;
633 settings
|= MGMT_SETTING_SECURE_CONN
;
634 settings
|= MGMT_SETTING_PRIVACY
;
635 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
640 settings
|= MGMT_SETTING_CONFIGURATION
;
645 static u32
get_current_settings(struct hci_dev
*hdev
)
649 if (hdev_is_powered(hdev
))
650 settings
|= MGMT_SETTING_POWERED
;
652 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
653 settings
|= MGMT_SETTING_CONNECTABLE
;
655 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
656 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
658 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
659 settings
|= MGMT_SETTING_DISCOVERABLE
;
661 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
662 settings
|= MGMT_SETTING_BONDABLE
;
664 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
665 settings
|= MGMT_SETTING_BREDR
;
667 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
668 settings
|= MGMT_SETTING_LE
;
670 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
671 settings
|= MGMT_SETTING_LINK_SECURITY
;
673 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
674 settings
|= MGMT_SETTING_SSP
;
676 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
677 settings
|= MGMT_SETTING_HS
;
679 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
680 settings
|= MGMT_SETTING_ADVERTISING
;
682 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
683 settings
|= MGMT_SETTING_SECURE_CONN
;
685 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
686 settings
|= MGMT_SETTING_DEBUG_KEYS
;
688 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
689 settings
|= MGMT_SETTING_PRIVACY
;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
704 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
705 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
706 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
707 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
717 u8
*ptr
= data
, *uuids_start
= NULL
;
718 struct bt_uuid
*uuid
;
723 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
726 if (uuid
->size
!= 16)
729 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
733 if (uuid16
== PNP_INFO_SVCLASS_ID
)
739 uuids_start
[1] = EIR_UUID16_ALL
;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr
- data
) + sizeof(u16
) > len
) {
745 uuids_start
[1] = EIR_UUID16_SOME
;
749 *ptr
++ = (uuid16
& 0x00ff);
750 *ptr
++ = (uuid16
& 0xff00) >> 8;
751 uuids_start
[0] += sizeof(uuid16
);
757 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
759 u8
*ptr
= data
, *uuids_start
= NULL
;
760 struct bt_uuid
*uuid
;
765 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
766 if (uuid
->size
!= 32)
772 uuids_start
[1] = EIR_UUID32_ALL
;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr
- data
) + sizeof(u32
) > len
) {
778 uuids_start
[1] = EIR_UUID32_SOME
;
782 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
784 uuids_start
[0] += sizeof(u32
);
790 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
792 u8
*ptr
= data
, *uuids_start
= NULL
;
793 struct bt_uuid
*uuid
;
798 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
799 if (uuid
->size
!= 128)
805 uuids_start
[1] = EIR_UUID128_ALL
;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr
- data
) + 16 > len
) {
811 uuids_start
[1] = EIR_UUID128_SOME
;
815 memcpy(ptr
, uuid
->uuid
, 16);
817 uuids_start
[0] += 16;
823 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
828 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
829 struct hci_dev
*hdev
,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
835 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
840 name_len
= strlen(hdev
->dev_name
);
842 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
844 if (name_len
> max_len
) {
846 ptr
[1] = EIR_NAME_SHORT
;
848 ptr
[1] = EIR_NAME_COMPLETE
;
850 ptr
[0] = name_len
+ 1;
852 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
854 ad_len
+= (name_len
+ 2);
855 ptr
+= (name_len
+ 2);
861 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr
, hdev
->adv_instance
.scan_rsp_data
,
867 hdev
->adv_instance
.scan_rsp_len
);
869 return hdev
->adv_instance
.scan_rsp_len
;
872 static void update_scan_rsp_data_for_instance(struct hci_request
*req
,
875 struct hci_dev
*hdev
= req
->hdev
;
876 struct hci_cp_le_set_scan_rsp_data cp
;
879 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
882 memset(&cp
, 0, sizeof(cp
));
885 len
= create_instance_scan_rsp_data(hdev
, cp
.data
);
887 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
889 if (hdev
->scan_rsp_data_len
== len
&&
890 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
893 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
894 hdev
->scan_rsp_data_len
= len
;
898 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
901 static void update_scan_rsp_data(struct hci_request
*req
)
903 struct hci_dev
*hdev
= req
->hdev
;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
912 !hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
917 update_scan_rsp_data_for_instance(req
, instance
);
920 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
922 struct mgmt_pending_cmd
*cmd
;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
929 struct mgmt_mode
*cp
= cmd
->param
;
931 return LE_AD_GENERAL
;
932 else if (cp
->val
== 0x02)
933 return LE_AD_LIMITED
;
935 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
936 return LE_AD_LIMITED
;
937 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
938 return LE_AD_GENERAL
;
944 static u8
get_current_adv_instance(struct hci_dev
*hdev
)
946 /* The "Set Advertising" setting supersedes the "Add Advertising"
947 * setting. Here we set the advertising data based on which
948 * setting was set. When neither apply, default to the global settings,
949 * represented by instance "0".
951 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
952 !hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
958 static bool get_connectable(struct hci_dev
*hdev
)
960 struct mgmt_pending_cmd
*cmd
;
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
965 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
967 struct mgmt_mode
*cp
= cmd
->param
;
972 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
975 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
982 if (instance
== 0x01)
983 return hdev
->adv_instance
.flags
;
985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
988 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 * to the "connectable" instance flag.
991 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
992 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
997 static u8
get_adv_instance_scan_rsp_len(struct hci_dev
*hdev
, u8 instance
)
999 /* Ignore instance 0 and other unsupported instances */
1000 if (instance
!= 0x01)
1003 /* TODO: Take into account the "appearance" and "local-name" flags here.
1004 * These are currently being ignored as they are not supported.
1006 return hdev
->adv_instance
.scan_rsp_len
;
1009 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1011 u8 ad_len
= 0, flags
= 0;
1012 u32 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1014 /* The Add Advertising command allows userspace to set both the general
1015 * and limited discoverable flags.
1017 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1018 flags
|= LE_AD_GENERAL
;
1020 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1021 flags
|= LE_AD_LIMITED
;
1023 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1024 /* If a discovery flag wasn't provided, simply use the global
1028 flags
|= get_adv_discov_flags(hdev
);
1030 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1031 flags
|= LE_AD_NO_BREDR
;
1033 /* If flags would still be empty, then there is no need to
1034 * include the "Flags" AD field".
1047 memcpy(ptr
, hdev
->adv_instance
.adv_data
,
1048 hdev
->adv_instance
.adv_data_len
);
1050 ad_len
+= hdev
->adv_instance
.adv_data_len
;
1051 ptr
+= hdev
->adv_instance
.adv_data_len
;
1054 /* Provide Tx Power only if we can provide a valid value for it */
1055 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
&&
1056 (instance_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
1058 ptr
[1] = EIR_TX_POWER
;
1059 ptr
[2] = (u8
)hdev
->adv_tx_power
;
1068 static void update_adv_data_for_instance(struct hci_request
*req
, u8 instance
)
1070 struct hci_dev
*hdev
= req
->hdev
;
1071 struct hci_cp_le_set_adv_data cp
;
1074 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1077 memset(&cp
, 0, sizeof(cp
));
1079 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1081 /* There's nothing to do if the data hasn't changed */
1082 if (hdev
->adv_data_len
== len
&&
1083 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1086 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1087 hdev
->adv_data_len
= len
;
1091 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1094 static void update_adv_data(struct hci_request
*req
)
1096 struct hci_dev
*hdev
= req
->hdev
;
1097 u8 instance
= get_current_adv_instance(hdev
);
1099 update_adv_data_for_instance(req
, instance
);
1102 int mgmt_update_adv_data(struct hci_dev
*hdev
)
1104 struct hci_request req
;
1106 hci_req_init(&req
, hdev
);
1107 update_adv_data(&req
);
1109 return hci_req_run(&req
, NULL
);
1112 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
1117 name_len
= strlen(hdev
->dev_name
);
1121 if (name_len
> 48) {
1123 ptr
[1] = EIR_NAME_SHORT
;
1125 ptr
[1] = EIR_NAME_COMPLETE
;
1127 /* EIR Data length */
1128 ptr
[0] = name_len
+ 1;
1130 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
1132 ptr
+= (name_len
+ 2);
1135 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
1137 ptr
[1] = EIR_TX_POWER
;
1138 ptr
[2] = (u8
) hdev
->inq_tx_power
;
1143 if (hdev
->devid_source
> 0) {
1145 ptr
[1] = EIR_DEVICE_ID
;
1147 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
1148 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
1149 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
1150 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
1155 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1156 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1157 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1160 static void update_eir(struct hci_request
*req
)
1162 struct hci_dev
*hdev
= req
->hdev
;
1163 struct hci_cp_write_eir cp
;
1165 if (!hdev_is_powered(hdev
))
1168 if (!lmp_ext_inq_capable(hdev
))
1171 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1174 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1177 memset(&cp
, 0, sizeof(cp
));
1179 create_eir(hdev
, cp
.data
);
1181 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
1184 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
1186 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1189 static u8
get_service_classes(struct hci_dev
*hdev
)
1191 struct bt_uuid
*uuid
;
1194 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1195 val
|= uuid
->svc_hint
;
1200 static void update_class(struct hci_request
*req
)
1202 struct hci_dev
*hdev
= req
->hdev
;
1205 BT_DBG("%s", hdev
->name
);
1207 if (!hdev_is_powered(hdev
))
1210 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1213 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1216 cod
[0] = hdev
->minor_class
;
1217 cod
[1] = hdev
->major_class
;
1218 cod
[2] = get_service_classes(hdev
);
1220 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
1223 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1226 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1229 static void disable_advertising(struct hci_request
*req
)
1233 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1236 static void enable_advertising(struct hci_request
*req
)
1238 struct hci_dev
*hdev
= req
->hdev
;
1239 struct hci_cp_le_set_adv_param cp
;
1240 u8 own_addr_type
, enable
= 0x01;
1245 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1248 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1249 disable_advertising(req
);
1251 /* Clear the HCI_LE_ADV bit temporarily so that the
1252 * hci_update_random_address knows that it's safe to go ahead
1253 * and write a new random address. The flag will be set back on
1254 * as soon as the SET_ADV_ENABLE HCI command completes.
1256 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1258 instance
= get_current_adv_instance(hdev
);
1259 flags
= get_adv_instance_flags(hdev
, instance
);
1261 /* If the "connectable" instance flag was not set, then choose between
1262 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1264 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1265 get_connectable(hdev
);
1267 /* Set require_privacy to true only when non-connectable
1268 * advertising is used. In that case it is fine to use a
1269 * non-resolvable private address.
1271 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1274 memset(&cp
, 0, sizeof(cp
));
1275 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1276 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1279 cp
.type
= LE_ADV_IND
;
1280 else if (get_adv_instance_scan_rsp_len(hdev
, instance
))
1281 cp
.type
= LE_ADV_SCAN_IND
;
1283 cp
.type
= LE_ADV_NONCONN_IND
;
1285 cp
.own_address_type
= own_addr_type
;
1286 cp
.channel_map
= hdev
->le_adv_channel_map
;
1288 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1290 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1293 static void service_cache_off(struct work_struct
*work
)
1295 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1296 service_cache
.work
);
1297 struct hci_request req
;
1299 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1302 hci_req_init(&req
, hdev
);
1309 hci_dev_unlock(hdev
);
1311 hci_req_run(&req
, NULL
);
1314 static void rpa_expired(struct work_struct
*work
)
1316 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1318 struct hci_request req
;
1322 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1324 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1327 /* The generation of a new RPA and programming it into the
1328 * controller happens in the enable_advertising() function.
1330 hci_req_init(&req
, hdev
);
1331 enable_advertising(&req
);
1332 hci_req_run(&req
, NULL
);
1335 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1337 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
1340 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1341 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1343 /* Non-mgmt controlled devices get this bit set
1344 * implicitly so that pairing works for them, however
1345 * for mgmt we require user-space to explicitly enable
1348 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
1351 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1352 void *data
, u16 data_len
)
1354 struct mgmt_rp_read_info rp
;
1356 BT_DBG("sock %p %s", sk
, hdev
->name
);
1360 memset(&rp
, 0, sizeof(rp
));
1362 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1364 rp
.version
= hdev
->hci_ver
;
1365 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1367 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1368 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1370 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1372 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1373 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1375 hci_dev_unlock(hdev
);
1377 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1381 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1383 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1385 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1389 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1391 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1393 if (hci_conn_count(hdev
) == 0) {
1394 cancel_delayed_work(&hdev
->power_off
);
1395 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1399 static bool hci_stop_discovery(struct hci_request
*req
)
1401 struct hci_dev
*hdev
= req
->hdev
;
1402 struct hci_cp_remote_name_req_cancel cp
;
1403 struct inquiry_entry
*e
;
1405 switch (hdev
->discovery
.state
) {
1406 case DISCOVERY_FINDING
:
1407 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1408 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1410 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1411 cancel_delayed_work(&hdev
->le_scan_disable
);
1412 hci_req_add_le_scan_disable(req
);
1417 case DISCOVERY_RESOLVING
:
1418 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1423 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1424 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1430 /* Passive scanning */
1431 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
)) {
1432 hci_req_add_le_scan_disable(req
);
1442 static void advertising_added(struct sock
*sk
, struct hci_dev
*hdev
,
1445 struct mgmt_ev_advertising_added ev
;
1447 ev
.instance
= instance
;
1449 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
1452 static void advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1455 struct mgmt_ev_advertising_removed ev
;
1457 ev
.instance
= instance
;
1459 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1462 static void clear_adv_instance(struct hci_dev
*hdev
)
1464 struct hci_request req
;
1466 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
1469 if (hdev
->adv_instance
.timeout
)
1470 cancel_delayed_work(&hdev
->adv_instance
.timeout_exp
);
1472 memset(&hdev
->adv_instance
, 0, sizeof(hdev
->adv_instance
));
1473 advertising_removed(NULL
, hdev
, 1);
1474 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
1476 if (!hdev_is_powered(hdev
) ||
1477 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1480 hci_req_init(&req
, hdev
);
1481 disable_advertising(&req
);
1482 hci_req_run(&req
, NULL
);
1485 static int clean_up_hci_state(struct hci_dev
*hdev
)
1487 struct hci_request req
;
1488 struct hci_conn
*conn
;
1489 bool discov_stopped
;
1492 hci_req_init(&req
, hdev
);
1494 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1495 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1497 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1500 if (hdev
->adv_instance
.timeout
)
1501 clear_adv_instance(hdev
);
1503 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1504 disable_advertising(&req
);
1506 discov_stopped
= hci_stop_discovery(&req
);
1508 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1509 struct hci_cp_disconnect dc
;
1510 struct hci_cp_reject_conn_req rej
;
1512 switch (conn
->state
) {
1515 dc
.handle
= cpu_to_le16(conn
->handle
);
1516 dc
.reason
= 0x15; /* Terminated due to Power Off */
1517 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1520 if (conn
->type
== LE_LINK
)
1521 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1523 else if (conn
->type
== ACL_LINK
)
1524 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1528 bacpy(&rej
.bdaddr
, &conn
->dst
);
1529 rej
.reason
= 0x15; /* Terminated due to Power Off */
1530 if (conn
->type
== ACL_LINK
)
1531 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1533 else if (conn
->type
== SCO_LINK
)
1534 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1540 err
= hci_req_run(&req
, clean_up_hci_complete
);
1541 if (!err
&& discov_stopped
)
1542 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1547 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1550 struct mgmt_mode
*cp
= data
;
1551 struct mgmt_pending_cmd
*cmd
;
1554 BT_DBG("request for %s", hdev
->name
);
1556 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1557 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1558 MGMT_STATUS_INVALID_PARAMS
);
1562 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1563 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1568 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
1569 cancel_delayed_work(&hdev
->power_off
);
1572 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1574 err
= mgmt_powered(hdev
, 1);
1579 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1580 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1584 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1591 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1594 /* Disconnect connections, stop scans, etc */
1595 err
= clean_up_hci_state(hdev
);
1597 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1598 HCI_POWER_OFF_TIMEOUT
);
1600 /* ENODATA means there were no HCI commands queued */
1601 if (err
== -ENODATA
) {
1602 cancel_delayed_work(&hdev
->power_off
);
1603 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1609 hci_dev_unlock(hdev
);
1613 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1615 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1617 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1621 int mgmt_new_settings(struct hci_dev
*hdev
)
1623 return new_settings(hdev
, NULL
);
1628 struct hci_dev
*hdev
;
1632 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1634 struct cmd_lookup
*match
= data
;
1636 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1638 list_del(&cmd
->list
);
1640 if (match
->sk
== NULL
) {
1641 match
->sk
= cmd
->sk
;
1642 sock_hold(match
->sk
);
1645 mgmt_pending_free(cmd
);
1648 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1652 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1653 mgmt_pending_remove(cmd
);
1656 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1658 if (cmd
->cmd_complete
) {
1661 cmd
->cmd_complete(cmd
, *status
);
1662 mgmt_pending_remove(cmd
);
1667 cmd_status_rsp(cmd
, data
);
1670 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1672 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1673 cmd
->param
, cmd
->param_len
);
1676 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1678 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1679 cmd
->param
, sizeof(struct mgmt_addr_info
));
1682 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1684 if (!lmp_bredr_capable(hdev
))
1685 return MGMT_STATUS_NOT_SUPPORTED
;
1686 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1687 return MGMT_STATUS_REJECTED
;
1689 return MGMT_STATUS_SUCCESS
;
1692 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1694 if (!lmp_le_capable(hdev
))
1695 return MGMT_STATUS_NOT_SUPPORTED
;
1696 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1697 return MGMT_STATUS_REJECTED
;
1699 return MGMT_STATUS_SUCCESS
;
1702 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
,
1705 struct mgmt_pending_cmd
*cmd
;
1706 struct mgmt_mode
*cp
;
1707 struct hci_request req
;
1710 BT_DBG("status 0x%02x", status
);
1714 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1719 u8 mgmt_err
= mgmt_status(status
);
1720 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1721 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1727 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_DISCOVERABLE
);
1729 if (hdev
->discov_timeout
> 0) {
1730 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1731 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1735 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_DISCOVERABLE
);
1738 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1741 new_settings(hdev
, cmd
->sk
);
1743 /* When the discoverable mode gets changed, make sure
1744 * that class of device has the limited discoverable
1745 * bit correctly set. Also update page scan based on whitelist
1748 hci_req_init(&req
, hdev
);
1749 __hci_update_page_scan(&req
);
1751 hci_req_run(&req
, NULL
);
1754 mgmt_pending_remove(cmd
);
1757 hci_dev_unlock(hdev
);
1760 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1763 struct mgmt_cp_set_discoverable
*cp
= data
;
1764 struct mgmt_pending_cmd
*cmd
;
1765 struct hci_request req
;
1770 BT_DBG("request for %s", hdev
->name
);
1772 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1773 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1774 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1775 MGMT_STATUS_REJECTED
);
1777 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1778 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1779 MGMT_STATUS_INVALID_PARAMS
);
1781 timeout
= __le16_to_cpu(cp
->timeout
);
1783 /* Disabling discoverable requires that no timeout is set,
1784 * and enabling limited discoverable requires a timeout.
1786 if ((cp
->val
== 0x00 && timeout
> 0) ||
1787 (cp
->val
== 0x02 && timeout
== 0))
1788 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1789 MGMT_STATUS_INVALID_PARAMS
);
1793 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1794 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1795 MGMT_STATUS_NOT_POWERED
);
1799 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1800 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1801 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1806 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1807 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1808 MGMT_STATUS_REJECTED
);
1812 if (!hdev_is_powered(hdev
)) {
1813 bool changed
= false;
1815 /* Setting limited discoverable when powered off is
1816 * not a valid operation since it requires a timeout
1817 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1819 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1820 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1824 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1829 err
= new_settings(hdev
, sk
);
1834 /* If the current mode is the same, then just update the timeout
1835 * value with the new value. And if only the timeout gets updated,
1836 * then no need for any HCI transactions.
1838 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1839 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1840 HCI_LIMITED_DISCOVERABLE
)) {
1841 cancel_delayed_work(&hdev
->discov_off
);
1842 hdev
->discov_timeout
= timeout
;
1844 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1845 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1846 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1850 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1854 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1860 /* Cancel any potential discoverable timeout that might be
1861 * still active and store new timeout value. The arming of
1862 * the timeout happens in the complete handler.
1864 cancel_delayed_work(&hdev
->discov_off
);
1865 hdev
->discov_timeout
= timeout
;
1867 /* Limited discoverable mode */
1868 if (cp
->val
== 0x02)
1869 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1871 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1873 hci_req_init(&req
, hdev
);
1875 /* The procedure for LE-only controllers is much simpler - just
1876 * update the advertising data.
1878 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1884 struct hci_cp_write_current_iac_lap hci_cp
;
1886 if (cp
->val
== 0x02) {
1887 /* Limited discoverable mode */
1888 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1889 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1890 hci_cp
.iac_lap
[1] = 0x8b;
1891 hci_cp
.iac_lap
[2] = 0x9e;
1892 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1893 hci_cp
.iac_lap
[4] = 0x8b;
1894 hci_cp
.iac_lap
[5] = 0x9e;
1896 /* General discoverable mode */
1898 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1899 hci_cp
.iac_lap
[1] = 0x8b;
1900 hci_cp
.iac_lap
[2] = 0x9e;
1903 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1904 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1906 scan
|= SCAN_INQUIRY
;
1908 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1911 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1914 update_adv_data(&req
);
1916 err
= hci_req_run(&req
, set_discoverable_complete
);
1918 mgmt_pending_remove(cmd
);
1921 hci_dev_unlock(hdev
);
1925 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1927 struct hci_dev
*hdev
= req
->hdev
;
1928 struct hci_cp_write_page_scan_activity acp
;
1931 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1934 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1938 type
= PAGE_SCAN_TYPE_INTERLACED
;
1940 /* 160 msec page scan interval */
1941 acp
.interval
= cpu_to_le16(0x0100);
1943 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1945 /* default 1.28 sec page scan */
1946 acp
.interval
= cpu_to_le16(0x0800);
1949 acp
.window
= cpu_to_le16(0x0012);
1951 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1952 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1953 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1956 if (hdev
->page_scan_type
!= type
)
1957 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1960 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
,
1963 struct mgmt_pending_cmd
*cmd
;
1964 struct mgmt_mode
*cp
;
1965 bool conn_changed
, discov_changed
;
1967 BT_DBG("status 0x%02x", status
);
1971 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1976 u8 mgmt_err
= mgmt_status(status
);
1977 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1983 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1985 discov_changed
= false;
1987 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1989 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1993 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1995 if (conn_changed
|| discov_changed
) {
1996 new_settings(hdev
, cmd
->sk
);
1997 hci_update_page_scan(hdev
);
1999 mgmt_update_adv_data(hdev
);
2000 hci_update_background_scan(hdev
);
2004 mgmt_pending_remove(cmd
);
2007 hci_dev_unlock(hdev
);
2010 static int set_connectable_update_settings(struct hci_dev
*hdev
,
2011 struct sock
*sk
, u8 val
)
2013 bool changed
= false;
2016 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
2020 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
2022 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
2023 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2026 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2031 hci_update_page_scan(hdev
);
2032 hci_update_background_scan(hdev
);
2033 return new_settings(hdev
, sk
);
2039 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2042 struct mgmt_mode
*cp
= data
;
2043 struct mgmt_pending_cmd
*cmd
;
2044 struct hci_request req
;
2048 BT_DBG("request for %s", hdev
->name
);
2050 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2051 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2052 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2053 MGMT_STATUS_REJECTED
);
2055 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2056 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2057 MGMT_STATUS_INVALID_PARAMS
);
2061 if (!hdev_is_powered(hdev
)) {
2062 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
2066 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
2067 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
2068 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2073 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
2079 hci_req_init(&req
, hdev
);
2081 /* If BR/EDR is not enabled and we disable advertising as a
2082 * by-product of disabling connectable, we need to update the
2083 * advertising flags.
2085 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2087 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2088 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2090 update_adv_data(&req
);
2091 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
2095 /* If we don't have any whitelist entries just
2096 * disable all scanning. If there are entries
2097 * and we had both page and inquiry scanning
2098 * enabled then fall back to only page scanning.
2099 * Otherwise no changes are needed.
2101 if (list_empty(&hdev
->whitelist
))
2102 scan
= SCAN_DISABLED
;
2103 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
2106 goto no_scan_update
;
2108 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
2109 hdev
->discov_timeout
> 0)
2110 cancel_delayed_work(&hdev
->discov_off
);
2113 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
2117 /* Update the advertising parameters if necessary */
2118 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2119 hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
2120 enable_advertising(&req
);
2122 err
= hci_req_run(&req
, set_connectable_complete
);
2124 mgmt_pending_remove(cmd
);
2125 if (err
== -ENODATA
)
2126 err
= set_connectable_update_settings(hdev
, sk
,
2132 hci_dev_unlock(hdev
);
2136 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2139 struct mgmt_mode
*cp
= data
;
2143 BT_DBG("request for %s", hdev
->name
);
2145 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2146 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
2147 MGMT_STATUS_INVALID_PARAMS
);
2152 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
2154 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
2156 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
2161 err
= new_settings(hdev
, sk
);
2164 hci_dev_unlock(hdev
);
2168 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2171 struct mgmt_mode
*cp
= data
;
2172 struct mgmt_pending_cmd
*cmd
;
2176 BT_DBG("request for %s", hdev
->name
);
2178 status
= mgmt_bredr_support(hdev
);
2180 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2183 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2184 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2185 MGMT_STATUS_INVALID_PARAMS
);
2189 if (!hdev_is_powered(hdev
)) {
2190 bool changed
= false;
2192 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
2193 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
2197 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2202 err
= new_settings(hdev
, sk
);
2207 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2208 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2215 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2216 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2220 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2226 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2228 mgmt_pending_remove(cmd
);
2233 hci_dev_unlock(hdev
);
2237 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2239 struct mgmt_mode
*cp
= data
;
2240 struct mgmt_pending_cmd
*cmd
;
2244 BT_DBG("request for %s", hdev
->name
);
2246 status
= mgmt_bredr_support(hdev
);
2248 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2250 if (!lmp_ssp_capable(hdev
))
2251 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2252 MGMT_STATUS_NOT_SUPPORTED
);
2254 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2255 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2256 MGMT_STATUS_INVALID_PARAMS
);
2260 if (!hdev_is_powered(hdev
)) {
2264 changed
= !hci_dev_test_and_set_flag(hdev
,
2267 changed
= hci_dev_test_and_clear_flag(hdev
,
2270 changed
= hci_dev_test_and_clear_flag(hdev
,
2273 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
2276 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2281 err
= new_settings(hdev
, sk
);
2286 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2287 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2292 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
2293 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2297 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2303 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
2304 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2305 sizeof(cp
->val
), &cp
->val
);
2307 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2309 mgmt_pending_remove(cmd
);
2314 hci_dev_unlock(hdev
);
2318 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2320 struct mgmt_mode
*cp
= data
;
2325 BT_DBG("request for %s", hdev
->name
);
2327 status
= mgmt_bredr_support(hdev
);
2329 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2331 if (!lmp_ssp_capable(hdev
))
2332 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2333 MGMT_STATUS_NOT_SUPPORTED
);
2335 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
2336 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2337 MGMT_STATUS_REJECTED
);
2339 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2340 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2341 MGMT_STATUS_INVALID_PARAMS
);
2345 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2346 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2352 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
2354 if (hdev_is_powered(hdev
)) {
2355 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2356 MGMT_STATUS_REJECTED
);
2360 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
2363 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2368 err
= new_settings(hdev
, sk
);
2371 hci_dev_unlock(hdev
);
2375 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2377 struct cmd_lookup match
= { NULL
, hdev
};
2382 u8 mgmt_err
= mgmt_status(status
);
2384 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2389 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2391 new_settings(hdev
, match
.sk
);
2396 /* Make sure the controller has a good default for
2397 * advertising data. Restrict the update to when LE
2398 * has actually been enabled. During power on, the
2399 * update in powered_update_hci will take care of it.
2401 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2402 struct hci_request req
;
2404 hci_req_init(&req
, hdev
);
2405 update_adv_data(&req
);
2406 update_scan_rsp_data(&req
);
2407 __hci_update_background_scan(&req
);
2408 hci_req_run(&req
, NULL
);
2412 hci_dev_unlock(hdev
);
2415 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2417 struct mgmt_mode
*cp
= data
;
2418 struct hci_cp_write_le_host_supported hci_cp
;
2419 struct mgmt_pending_cmd
*cmd
;
2420 struct hci_request req
;
2424 BT_DBG("request for %s", hdev
->name
);
2426 if (!lmp_le_capable(hdev
))
2427 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2428 MGMT_STATUS_NOT_SUPPORTED
);
2430 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2431 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2432 MGMT_STATUS_INVALID_PARAMS
);
2434 /* Bluetooth single mode LE only controllers or dual-mode
2435 * controllers configured as LE only devices, do not allow
2436 * switching LE off. These have either LE enabled explicitly
2437 * or BR/EDR has been previously switched off.
2439 * When trying to enable an already enabled LE, then gracefully
2440 * send a positive response. Trying to disable it however will
2441 * result into rejection.
2443 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2444 if (cp
->val
== 0x01)
2445 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2447 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2448 MGMT_STATUS_REJECTED
);
2454 enabled
= lmp_host_le_capable(hdev
);
2456 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2457 bool changed
= false;
2459 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2460 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
2464 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2465 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
2469 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2474 err
= new_settings(hdev
, sk
);
2479 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
2480 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2481 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2486 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2492 hci_req_init(&req
, hdev
);
2494 memset(&hci_cp
, 0, sizeof(hci_cp
));
2498 hci_cp
.simul
= 0x00;
2500 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
2501 disable_advertising(&req
);
2504 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2507 err
= hci_req_run(&req
, le_enable_complete
);
2509 mgmt_pending_remove(cmd
);
2512 hci_dev_unlock(hdev
);
2516 /* This is a helper function to test for pending mgmt commands that can
2517 * cause CoD or EIR HCI commands. We can only allow one such pending
2518 * mgmt command at a time since otherwise we cannot easily track what
2519 * the current values are, will be, and based on that calculate if a new
2520 * HCI command needs to be sent and if yes with what value.
2522 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2524 struct mgmt_pending_cmd
*cmd
;
2526 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2527 switch (cmd
->opcode
) {
2528 case MGMT_OP_ADD_UUID
:
2529 case MGMT_OP_REMOVE_UUID
:
2530 case MGMT_OP_SET_DEV_CLASS
:
2531 case MGMT_OP_SET_POWERED
:
2539 static const u8 bluetooth_base_uuid
[] = {
2540 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2541 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2544 static u8
get_uuid_size(const u8
*uuid
)
2548 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2551 val
= get_unaligned_le32(&uuid
[12]);
2558 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2560 struct mgmt_pending_cmd
*cmd
;
2564 cmd
= pending_find(mgmt_op
, hdev
);
2568 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
2569 mgmt_status(status
), hdev
->dev_class
, 3);
2571 mgmt_pending_remove(cmd
);
2574 hci_dev_unlock(hdev
);
2577 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2579 BT_DBG("status 0x%02x", status
);
2581 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2584 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2586 struct mgmt_cp_add_uuid
*cp
= data
;
2587 struct mgmt_pending_cmd
*cmd
;
2588 struct hci_request req
;
2589 struct bt_uuid
*uuid
;
2592 BT_DBG("request for %s", hdev
->name
);
2596 if (pending_eir_or_class(hdev
)) {
2597 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2602 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2608 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2609 uuid
->svc_hint
= cp
->svc_hint
;
2610 uuid
->size
= get_uuid_size(cp
->uuid
);
2612 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2614 hci_req_init(&req
, hdev
);
2619 err
= hci_req_run(&req
, add_uuid_complete
);
2621 if (err
!= -ENODATA
)
2624 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2625 hdev
->dev_class
, 3);
2629 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2638 hci_dev_unlock(hdev
);
2642 static bool enable_service_cache(struct hci_dev
*hdev
)
2644 if (!hdev_is_powered(hdev
))
2647 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
2648 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2656 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2658 BT_DBG("status 0x%02x", status
);
2660 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2663 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2666 struct mgmt_cp_remove_uuid
*cp
= data
;
2667 struct mgmt_pending_cmd
*cmd
;
2668 struct bt_uuid
*match
, *tmp
;
2669 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2670 struct hci_request req
;
2673 BT_DBG("request for %s", hdev
->name
);
2677 if (pending_eir_or_class(hdev
)) {
2678 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2683 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2684 hci_uuids_clear(hdev
);
2686 if (enable_service_cache(hdev
)) {
2687 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2688 MGMT_OP_REMOVE_UUID
,
2689 0, hdev
->dev_class
, 3);
2698 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2699 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2702 list_del(&match
->list
);
2708 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2709 MGMT_STATUS_INVALID_PARAMS
);
2714 hci_req_init(&req
, hdev
);
2719 err
= hci_req_run(&req
, remove_uuid_complete
);
2721 if (err
!= -ENODATA
)
2724 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2725 hdev
->dev_class
, 3);
2729 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2738 hci_dev_unlock(hdev
);
2742 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2744 BT_DBG("status 0x%02x", status
);
2746 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2749 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2752 struct mgmt_cp_set_dev_class
*cp
= data
;
2753 struct mgmt_pending_cmd
*cmd
;
2754 struct hci_request req
;
2757 BT_DBG("request for %s", hdev
->name
);
2759 if (!lmp_bredr_capable(hdev
))
2760 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2761 MGMT_STATUS_NOT_SUPPORTED
);
2765 if (pending_eir_or_class(hdev
)) {
2766 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2771 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2772 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2773 MGMT_STATUS_INVALID_PARAMS
);
2777 hdev
->major_class
= cp
->major
;
2778 hdev
->minor_class
= cp
->minor
;
2780 if (!hdev_is_powered(hdev
)) {
2781 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2782 hdev
->dev_class
, 3);
2786 hci_req_init(&req
, hdev
);
2788 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2789 hci_dev_unlock(hdev
);
2790 cancel_delayed_work_sync(&hdev
->service_cache
);
2797 err
= hci_req_run(&req
, set_class_complete
);
2799 if (err
!= -ENODATA
)
2802 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2803 hdev
->dev_class
, 3);
2807 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2816 hci_dev_unlock(hdev
);
2820 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2823 struct mgmt_cp_load_link_keys
*cp
= data
;
2824 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2825 sizeof(struct mgmt_link_key_info
));
2826 u16 key_count
, expected_len
;
2830 BT_DBG("request for %s", hdev
->name
);
2832 if (!lmp_bredr_capable(hdev
))
2833 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2834 MGMT_STATUS_NOT_SUPPORTED
);
2836 key_count
= __le16_to_cpu(cp
->key_count
);
2837 if (key_count
> max_key_count
) {
2838 BT_ERR("load_link_keys: too big key_count value %u",
2840 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2841 MGMT_STATUS_INVALID_PARAMS
);
2844 expected_len
= sizeof(*cp
) + key_count
*
2845 sizeof(struct mgmt_link_key_info
);
2846 if (expected_len
!= len
) {
2847 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2849 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2850 MGMT_STATUS_INVALID_PARAMS
);
2853 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2854 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2855 MGMT_STATUS_INVALID_PARAMS
);
2857 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2860 for (i
= 0; i
< key_count
; i
++) {
2861 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2863 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2864 return mgmt_cmd_status(sk
, hdev
->id
,
2865 MGMT_OP_LOAD_LINK_KEYS
,
2866 MGMT_STATUS_INVALID_PARAMS
);
2871 hci_link_keys_clear(hdev
);
2874 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2876 changed
= hci_dev_test_and_clear_flag(hdev
,
2877 HCI_KEEP_DEBUG_KEYS
);
2880 new_settings(hdev
, NULL
);
2882 for (i
= 0; i
< key_count
; i
++) {
2883 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2885 /* Always ignore debug keys and require a new pairing if
2886 * the user wants to use them.
2888 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2891 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2892 key
->type
, key
->pin_len
, NULL
);
2895 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2897 hci_dev_unlock(hdev
);
2902 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2903 u8 addr_type
, struct sock
*skip_sk
)
2905 struct mgmt_ev_device_unpaired ev
;
2907 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2908 ev
.addr
.type
= addr_type
;
2910 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2914 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2917 struct mgmt_cp_unpair_device
*cp
= data
;
2918 struct mgmt_rp_unpair_device rp
;
2919 struct hci_cp_disconnect dc
;
2920 struct mgmt_pending_cmd
*cmd
;
2921 struct hci_conn
*conn
;
2924 memset(&rp
, 0, sizeof(rp
));
2925 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2926 rp
.addr
.type
= cp
->addr
.type
;
2928 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2929 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2930 MGMT_STATUS_INVALID_PARAMS
,
2933 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2934 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2935 MGMT_STATUS_INVALID_PARAMS
,
2940 if (!hdev_is_powered(hdev
)) {
2941 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2942 MGMT_STATUS_NOT_POWERED
, &rp
,
2947 if (cp
->addr
.type
== BDADDR_BREDR
) {
2948 /* If disconnection is requested, then look up the
2949 * connection. If the remote device is connected, it
2950 * will be later used to terminate the link.
2952 * Setting it to NULL explicitly will cause no
2953 * termination of the link.
2956 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2961 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2965 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2966 addr_type
= ADDR_LE_DEV_PUBLIC
;
2968 addr_type
= ADDR_LE_DEV_RANDOM
;
2970 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2973 /* Defer clearing up the connection parameters
2974 * until closing to give a chance of keeping
2975 * them if a repairing happens.
2977 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2979 /* If disconnection is not requested, then
2980 * clear the connection variable so that the
2981 * link is not terminated.
2983 if (!cp
->disconnect
)
2986 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2989 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2991 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2995 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2996 MGMT_STATUS_NOT_PAIRED
, &rp
,
3001 /* If the connection variable is set, then termination of the
3002 * link is requested.
3005 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
3007 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
3011 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
3018 cmd
->cmd_complete
= addr_cmd_complete
;
3020 dc
.handle
= cpu_to_le16(conn
->handle
);
3021 dc
.reason
= 0x13; /* Remote User Terminated Connection */
3022 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
3024 mgmt_pending_remove(cmd
);
3027 hci_dev_unlock(hdev
);
3031 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3034 struct mgmt_cp_disconnect
*cp
= data
;
3035 struct mgmt_rp_disconnect rp
;
3036 struct mgmt_pending_cmd
*cmd
;
3037 struct hci_conn
*conn
;
3042 memset(&rp
, 0, sizeof(rp
));
3043 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3044 rp
.addr
.type
= cp
->addr
.type
;
3046 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3047 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3048 MGMT_STATUS_INVALID_PARAMS
,
3053 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
3054 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3055 MGMT_STATUS_NOT_POWERED
, &rp
,
3060 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
3061 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3062 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3066 if (cp
->addr
.type
== BDADDR_BREDR
)
3067 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3070 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
3072 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
3073 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3074 MGMT_STATUS_NOT_CONNECTED
, &rp
,
3079 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
3085 cmd
->cmd_complete
= generic_cmd_complete
;
3087 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3089 mgmt_pending_remove(cmd
);
3092 hci_dev_unlock(hdev
);
3096 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
3098 switch (link_type
) {
3100 switch (addr_type
) {
3101 case ADDR_LE_DEV_PUBLIC
:
3102 return BDADDR_LE_PUBLIC
;
3105 /* Fallback to LE Random address type */
3106 return BDADDR_LE_RANDOM
;
3110 /* Fallback to BR/EDR type */
3111 return BDADDR_BREDR
;
3115 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3118 struct mgmt_rp_get_connections
*rp
;
3128 if (!hdev_is_powered(hdev
)) {
3129 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
3130 MGMT_STATUS_NOT_POWERED
);
3135 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3136 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3140 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3141 rp
= kmalloc(rp_len
, GFP_KERNEL
);
3148 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3149 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3151 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
3152 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
3153 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
3158 rp
->conn_count
= cpu_to_le16(i
);
3160 /* Recalculate length in case of filtered SCO connections, etc */
3161 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3163 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
3169 hci_dev_unlock(hdev
);
3173 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3174 struct mgmt_cp_pin_code_neg_reply
*cp
)
3176 struct mgmt_pending_cmd
*cmd
;
3179 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
3184 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
3185 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
3187 mgmt_pending_remove(cmd
);
3192 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3195 struct hci_conn
*conn
;
3196 struct mgmt_cp_pin_code_reply
*cp
= data
;
3197 struct hci_cp_pin_code_reply reply
;
3198 struct mgmt_pending_cmd
*cmd
;
3205 if (!hdev_is_powered(hdev
)) {
3206 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3207 MGMT_STATUS_NOT_POWERED
);
3211 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3213 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3214 MGMT_STATUS_NOT_CONNECTED
);
3218 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3219 struct mgmt_cp_pin_code_neg_reply ncp
;
3221 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3223 BT_ERR("PIN code is not 16 bytes long");
3225 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3227 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3228 MGMT_STATUS_INVALID_PARAMS
);
3233 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3239 cmd
->cmd_complete
= addr_cmd_complete
;
3241 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3242 reply
.pin_len
= cp
->pin_len
;
3243 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3245 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3247 mgmt_pending_remove(cmd
);
3250 hci_dev_unlock(hdev
);
3254 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3257 struct mgmt_cp_set_io_capability
*cp
= data
;
3261 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3262 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3263 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3267 hdev
->io_capability
= cp
->io_capability
;
3269 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3270 hdev
->io_capability
);
3272 hci_dev_unlock(hdev
);
3274 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
3278 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
3280 struct hci_dev
*hdev
= conn
->hdev
;
3281 struct mgmt_pending_cmd
*cmd
;
3283 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3284 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3287 if (cmd
->user_data
!= conn
)
3296 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
3298 struct mgmt_rp_pair_device rp
;
3299 struct hci_conn
*conn
= cmd
->user_data
;
3302 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3303 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3305 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
3306 status
, &rp
, sizeof(rp
));
3308 /* So we don't get further callbacks for this connection */
3309 conn
->connect_cfm_cb
= NULL
;
3310 conn
->security_cfm_cb
= NULL
;
3311 conn
->disconn_cfm_cb
= NULL
;
3313 hci_conn_drop(conn
);
3315 /* The device is paired so there is no need to remove
3316 * its connection parameters anymore.
3318 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3325 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3327 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3328 struct mgmt_pending_cmd
*cmd
;
3330 cmd
= find_pairing(conn
);
3332 cmd
->cmd_complete(cmd
, status
);
3333 mgmt_pending_remove(cmd
);
3337 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3339 struct mgmt_pending_cmd
*cmd
;
3341 BT_DBG("status %u", status
);
3343 cmd
= find_pairing(conn
);
3345 BT_DBG("Unable to find a pending command");
3349 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3350 mgmt_pending_remove(cmd
);
3353 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3355 struct mgmt_pending_cmd
*cmd
;
3357 BT_DBG("status %u", status
);
3362 cmd
= find_pairing(conn
);
3364 BT_DBG("Unable to find a pending command");
3368 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3369 mgmt_pending_remove(cmd
);
3372 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3375 struct mgmt_cp_pair_device
*cp
= data
;
3376 struct mgmt_rp_pair_device rp
;
3377 struct mgmt_pending_cmd
*cmd
;
3378 u8 sec_level
, auth_type
;
3379 struct hci_conn
*conn
;
3384 memset(&rp
, 0, sizeof(rp
));
3385 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3386 rp
.addr
.type
= cp
->addr
.type
;
3388 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3389 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3390 MGMT_STATUS_INVALID_PARAMS
,
3393 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3394 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3395 MGMT_STATUS_INVALID_PARAMS
,
3400 if (!hdev_is_powered(hdev
)) {
3401 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3402 MGMT_STATUS_NOT_POWERED
, &rp
,
3407 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
3408 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3409 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
3414 sec_level
= BT_SECURITY_MEDIUM
;
3415 auth_type
= HCI_AT_DEDICATED_BONDING
;
3417 if (cp
->addr
.type
== BDADDR_BREDR
) {
3418 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3423 /* Convert from L2CAP channel address type to HCI address type
3425 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3426 addr_type
= ADDR_LE_DEV_PUBLIC
;
3428 addr_type
= ADDR_LE_DEV_RANDOM
;
3430 /* When pairing a new device, it is expected to remember
3431 * this device for future connections. Adding the connection
3432 * parameter information ahead of time allows tracking
3433 * of the slave preferred values and will speed up any
3434 * further connection establishment.
3436 * If connection parameters already exist, then they
3437 * will be kept and this function does nothing.
3439 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3441 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3442 sec_level
, HCI_LE_CONN_TIMEOUT
,
3449 if (PTR_ERR(conn
) == -EBUSY
)
3450 status
= MGMT_STATUS_BUSY
;
3451 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
3452 status
= MGMT_STATUS_NOT_SUPPORTED
;
3453 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
3454 status
= MGMT_STATUS_REJECTED
;
3456 status
= MGMT_STATUS_CONNECT_FAILED
;
3458 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3459 status
, &rp
, sizeof(rp
));
3463 if (conn
->connect_cfm_cb
) {
3464 hci_conn_drop(conn
);
3465 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3466 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3470 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3473 hci_conn_drop(conn
);
3477 cmd
->cmd_complete
= pairing_complete
;
3479 /* For LE, just connecting isn't a proof that the pairing finished */
3480 if (cp
->addr
.type
== BDADDR_BREDR
) {
3481 conn
->connect_cfm_cb
= pairing_complete_cb
;
3482 conn
->security_cfm_cb
= pairing_complete_cb
;
3483 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3485 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3486 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3487 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3490 conn
->io_capability
= cp
->io_cap
;
3491 cmd
->user_data
= hci_conn_get(conn
);
3493 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3494 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3495 cmd
->cmd_complete(cmd
, 0);
3496 mgmt_pending_remove(cmd
);
3502 hci_dev_unlock(hdev
);
3506 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3509 struct mgmt_addr_info
*addr
= data
;
3510 struct mgmt_pending_cmd
*cmd
;
3511 struct hci_conn
*conn
;
3518 if (!hdev_is_powered(hdev
)) {
3519 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3520 MGMT_STATUS_NOT_POWERED
);
3524 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3526 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3527 MGMT_STATUS_INVALID_PARAMS
);
3531 conn
= cmd
->user_data
;
3533 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3534 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3535 MGMT_STATUS_INVALID_PARAMS
);
3539 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3540 mgmt_pending_remove(cmd
);
3542 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3543 addr
, sizeof(*addr
));
3545 hci_dev_unlock(hdev
);
3549 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3550 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3551 u16 hci_op
, __le32 passkey
)
3553 struct mgmt_pending_cmd
*cmd
;
3554 struct hci_conn
*conn
;
3559 if (!hdev_is_powered(hdev
)) {
3560 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3561 MGMT_STATUS_NOT_POWERED
, addr
,
3566 if (addr
->type
== BDADDR_BREDR
)
3567 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3569 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3572 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3573 MGMT_STATUS_NOT_CONNECTED
, addr
,
3578 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3579 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3581 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3582 MGMT_STATUS_SUCCESS
, addr
,
3585 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3586 MGMT_STATUS_FAILED
, addr
,
3592 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3598 cmd
->cmd_complete
= addr_cmd_complete
;
3600 /* Continue with pairing via HCI */
3601 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3602 struct hci_cp_user_passkey_reply cp
;
3604 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3605 cp
.passkey
= passkey
;
3606 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3608 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3612 mgmt_pending_remove(cmd
);
3615 hci_dev_unlock(hdev
);
3619 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3620 void *data
, u16 len
)
3622 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3626 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3627 MGMT_OP_PIN_CODE_NEG_REPLY
,
3628 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3631 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3634 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3638 if (len
!= sizeof(*cp
))
3639 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3640 MGMT_STATUS_INVALID_PARAMS
);
3642 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3643 MGMT_OP_USER_CONFIRM_REPLY
,
3644 HCI_OP_USER_CONFIRM_REPLY
, 0);
3647 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3648 void *data
, u16 len
)
3650 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3654 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3655 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3656 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3659 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3662 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3666 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3667 MGMT_OP_USER_PASSKEY_REPLY
,
3668 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3671 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3672 void *data
, u16 len
)
3674 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3678 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3679 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3680 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3683 static void update_name(struct hci_request
*req
)
3685 struct hci_dev
*hdev
= req
->hdev
;
3686 struct hci_cp_write_local_name cp
;
3688 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3690 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3693 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3695 struct mgmt_cp_set_local_name
*cp
;
3696 struct mgmt_pending_cmd
*cmd
;
3698 BT_DBG("status 0x%02x", status
);
3702 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3709 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3710 mgmt_status(status
));
3712 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3715 mgmt_pending_remove(cmd
);
3718 hci_dev_unlock(hdev
);
3721 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3724 struct mgmt_cp_set_local_name
*cp
= data
;
3725 struct mgmt_pending_cmd
*cmd
;
3726 struct hci_request req
;
3733 /* If the old values are the same as the new ones just return a
3734 * direct command complete event.
3736 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3737 !memcmp(hdev
->short_name
, cp
->short_name
,
3738 sizeof(hdev
->short_name
))) {
3739 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3744 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3746 if (!hdev_is_powered(hdev
)) {
3747 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3749 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3754 err
= mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
,
3760 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3766 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3768 hci_req_init(&req
, hdev
);
3770 if (lmp_bredr_capable(hdev
)) {
3775 /* The name is stored in the scan response data and so
3776 * no need to udpate the advertising data here.
3778 if (lmp_le_capable(hdev
))
3779 update_scan_rsp_data(&req
);
3781 err
= hci_req_run(&req
, set_name_complete
);
3783 mgmt_pending_remove(cmd
);
3786 hci_dev_unlock(hdev
);
3790 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
3791 u16 opcode
, struct sk_buff
*skb
)
3793 struct mgmt_rp_read_local_oob_data mgmt_rp
;
3794 size_t rp_size
= sizeof(mgmt_rp
);
3795 struct mgmt_pending_cmd
*cmd
;
3797 BT_DBG("%s status %u", hdev
->name
, status
);
3799 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
3803 if (status
|| !skb
) {
3804 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3805 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
3809 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
3811 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
3812 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
3814 if (skb
->len
< sizeof(*rp
)) {
3815 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3816 MGMT_OP_READ_LOCAL_OOB_DATA
,
3817 MGMT_STATUS_FAILED
);
3821 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
3822 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
3824 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
3826 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
3828 if (skb
->len
< sizeof(*rp
)) {
3829 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3830 MGMT_OP_READ_LOCAL_OOB_DATA
,
3831 MGMT_STATUS_FAILED
);
3835 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
3836 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
3838 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
3839 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
3842 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3843 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
3846 mgmt_pending_remove(cmd
);
3849 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3850 void *data
, u16 data_len
)
3852 struct mgmt_pending_cmd
*cmd
;
3853 struct hci_request req
;
3856 BT_DBG("%s", hdev
->name
);
3860 if (!hdev_is_powered(hdev
)) {
3861 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3862 MGMT_STATUS_NOT_POWERED
);
3866 if (!lmp_ssp_capable(hdev
)) {
3867 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3868 MGMT_STATUS_NOT_SUPPORTED
);
3872 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3873 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3878 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3884 hci_req_init(&req
, hdev
);
3886 if (bredr_sc_enabled(hdev
))
3887 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
3889 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3891 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
3893 mgmt_pending_remove(cmd
);
3896 hci_dev_unlock(hdev
);
3900 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3901 void *data
, u16 len
)
3903 struct mgmt_addr_info
*addr
= data
;
3906 BT_DBG("%s ", hdev
->name
);
3908 if (!bdaddr_type_is_valid(addr
->type
))
3909 return mgmt_cmd_complete(sk
, hdev
->id
,
3910 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3911 MGMT_STATUS_INVALID_PARAMS
,
3912 addr
, sizeof(*addr
));
3916 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3917 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3920 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3921 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3922 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3923 MGMT_STATUS_INVALID_PARAMS
,
3924 &cp
->addr
, sizeof(cp
->addr
));
3928 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3929 cp
->addr
.type
, cp
->hash
,
3930 cp
->rand
, NULL
, NULL
);
3932 status
= MGMT_STATUS_FAILED
;
3934 status
= MGMT_STATUS_SUCCESS
;
3936 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3937 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
3938 &cp
->addr
, sizeof(cp
->addr
));
3939 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3940 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3941 u8
*rand192
, *hash192
, *rand256
, *hash256
;
3944 if (bdaddr_type_is_le(cp
->addr
.type
)) {
3945 /* Enforce zero-valued 192-bit parameters as
3946 * long as legacy SMP OOB isn't implemented.
3948 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3949 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3950 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3951 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3952 MGMT_STATUS_INVALID_PARAMS
,
3953 addr
, sizeof(*addr
));
3960 /* In case one of the P-192 values is set to zero,
3961 * then just disable OOB data for P-192.
3963 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3964 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3968 rand192
= cp
->rand192
;
3969 hash192
= cp
->hash192
;
3973 /* In case one of the P-256 values is set to zero, then just
3974 * disable OOB data for P-256.
3976 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
3977 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
3981 rand256
= cp
->rand256
;
3982 hash256
= cp
->hash256
;
3985 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3986 cp
->addr
.type
, hash192
, rand192
,
3989 status
= MGMT_STATUS_FAILED
;
3991 status
= MGMT_STATUS_SUCCESS
;
3993 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3994 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3995 status
, &cp
->addr
, sizeof(cp
->addr
));
3997 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3998 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3999 MGMT_STATUS_INVALID_PARAMS
);
4003 hci_dev_unlock(hdev
);
4007 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4008 void *data
, u16 len
)
4010 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
4014 BT_DBG("%s", hdev
->name
);
4016 if (cp
->addr
.type
!= BDADDR_BREDR
)
4017 return mgmt_cmd_complete(sk
, hdev
->id
,
4018 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4019 MGMT_STATUS_INVALID_PARAMS
,
4020 &cp
->addr
, sizeof(cp
->addr
));
4024 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4025 hci_remote_oob_data_clear(hdev
);
4026 status
= MGMT_STATUS_SUCCESS
;
4030 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
4032 status
= MGMT_STATUS_INVALID_PARAMS
;
4034 status
= MGMT_STATUS_SUCCESS
;
4037 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4038 status
, &cp
->addr
, sizeof(cp
->addr
));
4040 hci_dev_unlock(hdev
);
4044 static bool trigger_bredr_inquiry(struct hci_request
*req
, u8
*status
)
4046 struct hci_dev
*hdev
= req
->hdev
;
4047 struct hci_cp_inquiry cp
;
4048 /* General inquiry access code (GIAC) */
4049 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
4051 *status
= mgmt_bredr_support(hdev
);
4055 if (hci_dev_test_flag(hdev
, HCI_INQUIRY
)) {
4056 *status
= MGMT_STATUS_BUSY
;
4060 hci_inquiry_cache_flush(hdev
);
4062 memset(&cp
, 0, sizeof(cp
));
4063 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
4064 cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
4066 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
4071 static bool trigger_le_scan(struct hci_request
*req
, u16 interval
, u8
*status
)
4073 struct hci_dev
*hdev
= req
->hdev
;
4074 struct hci_cp_le_set_scan_param param_cp
;
4075 struct hci_cp_le_set_scan_enable enable_cp
;
4079 *status
= mgmt_le_support(hdev
);
4083 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
)) {
4084 /* Don't let discovery abort an outgoing connection attempt
4085 * that's using directed advertising.
4087 if (hci_conn_hash_lookup_state(hdev
, LE_LINK
, BT_CONNECT
)) {
4088 *status
= MGMT_STATUS_REJECTED
;
4092 disable_advertising(req
);
4095 /* If controller is scanning, it means the background scanning is
4096 * running. Thus, we should temporarily stop it in order to set the
4097 * discovery scanning parameters.
4099 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
4100 hci_req_add_le_scan_disable(req
);
4102 /* All active scans will be done with either a resolvable private
4103 * address (when privacy feature has been enabled) or non-resolvable
4106 err
= hci_update_random_address(req
, true, &own_addr_type
);
4108 *status
= MGMT_STATUS_FAILED
;
4112 memset(¶m_cp
, 0, sizeof(param_cp
));
4113 param_cp
.type
= LE_SCAN_ACTIVE
;
4114 param_cp
.interval
= cpu_to_le16(interval
);
4115 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
4116 param_cp
.own_address_type
= own_addr_type
;
4118 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
4121 memset(&enable_cp
, 0, sizeof(enable_cp
));
4122 enable_cp
.enable
= LE_SCAN_ENABLE
;
4123 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
4125 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
4131 static bool trigger_discovery(struct hci_request
*req
, u8
*status
)
4133 struct hci_dev
*hdev
= req
->hdev
;
4135 switch (hdev
->discovery
.type
) {
4136 case DISCOV_TYPE_BREDR
:
4137 if (!trigger_bredr_inquiry(req
, status
))
4141 case DISCOV_TYPE_INTERLEAVED
:
4142 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
4144 /* During simultaneous discovery, we double LE scan
4145 * interval. We must leave some time for the controller
4146 * to do BR/EDR inquiry.
4148 if (!trigger_le_scan(req
, DISCOV_LE_SCAN_INT
* 2,
4152 if (!trigger_bredr_inquiry(req
, status
))
4158 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4159 *status
= MGMT_STATUS_NOT_SUPPORTED
;
4164 case DISCOV_TYPE_LE
:
4165 if (!trigger_le_scan(req
, DISCOV_LE_SCAN_INT
, status
))
4170 *status
= MGMT_STATUS_INVALID_PARAMS
;
4177 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
,
4180 struct mgmt_pending_cmd
*cmd
;
4181 unsigned long timeout
;
4183 BT_DBG("status %d", status
);
4187 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
4189 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
4192 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4193 mgmt_pending_remove(cmd
);
4197 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4201 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
4203 /* If the scan involves LE scan, pick proper timeout to schedule
4204 * hdev->le_scan_disable that will stop it.
4206 switch (hdev
->discovery
.type
) {
4207 case DISCOV_TYPE_LE
:
4208 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
4210 case DISCOV_TYPE_INTERLEAVED
:
4211 /* When running simultaneous discovery, the LE scanning time
4212 * should occupy the whole discovery time sine BR/EDR inquiry
4213 * and LE scanning are scheduled by the controller.
4215 * For interleaving discovery in comparison, BR/EDR inquiry
4216 * and LE scanning are done sequentially with separate
4219 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
))
4220 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
4222 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
4224 case DISCOV_TYPE_BREDR
:
4228 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
4234 /* When service discovery is used and the controller has
4235 * a strict duplicate filter, it is important to remember
4236 * the start and duration of the scan. This is required
4237 * for restarting scanning during the discovery phase.
4239 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
,
4241 hdev
->discovery
.result_filtering
) {
4242 hdev
->discovery
.scan_start
= jiffies
;
4243 hdev
->discovery
.scan_duration
= timeout
;
4246 queue_delayed_work(hdev
->workqueue
,
4247 &hdev
->le_scan_disable
, timeout
);
4251 hci_dev_unlock(hdev
);
4254 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4255 void *data
, u16 len
)
4257 struct mgmt_cp_start_discovery
*cp
= data
;
4258 struct mgmt_pending_cmd
*cmd
;
4259 struct hci_request req
;
4263 BT_DBG("%s", hdev
->name
);
4267 if (!hdev_is_powered(hdev
)) {
4268 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4269 MGMT_STATUS_NOT_POWERED
,
4270 &cp
->type
, sizeof(cp
->type
));
4274 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4275 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4276 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4277 MGMT_STATUS_BUSY
, &cp
->type
,
4282 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
4288 cmd
->cmd_complete
= generic_cmd_complete
;
4290 /* Clear the discovery filter first to free any previously
4291 * allocated memory for the UUID list.
4293 hci_discovery_filter_clear(hdev
);
4295 hdev
->discovery
.type
= cp
->type
;
4296 hdev
->discovery
.report_invalid_rssi
= false;
4298 hci_req_init(&req
, hdev
);
4300 if (!trigger_discovery(&req
, &status
)) {
4301 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4302 status
, &cp
->type
, sizeof(cp
->type
));
4303 mgmt_pending_remove(cmd
);
4307 err
= hci_req_run(&req
, start_discovery_complete
);
4309 mgmt_pending_remove(cmd
);
4313 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4316 hci_dev_unlock(hdev
);
4320 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
4323 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
4327 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4328 void *data
, u16 len
)
4330 struct mgmt_cp_start_service_discovery
*cp
= data
;
4331 struct mgmt_pending_cmd
*cmd
;
4332 struct hci_request req
;
4333 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
4334 u16 uuid_count
, expected_len
;
4338 BT_DBG("%s", hdev
->name
);
4342 if (!hdev_is_powered(hdev
)) {
4343 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4344 MGMT_OP_START_SERVICE_DISCOVERY
,
4345 MGMT_STATUS_NOT_POWERED
,
4346 &cp
->type
, sizeof(cp
->type
));
4350 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4351 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4352 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4353 MGMT_OP_START_SERVICE_DISCOVERY
,
4354 MGMT_STATUS_BUSY
, &cp
->type
,
4359 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
4360 if (uuid_count
> max_uuid_count
) {
4361 BT_ERR("service_discovery: too big uuid_count value %u",
4363 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4364 MGMT_OP_START_SERVICE_DISCOVERY
,
4365 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4370 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4371 if (expected_len
!= len
) {
4372 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4374 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4375 MGMT_OP_START_SERVICE_DISCOVERY
,
4376 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4381 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4388 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4390 /* Clear the discovery filter first to free any previously
4391 * allocated memory for the UUID list.
4393 hci_discovery_filter_clear(hdev
);
4395 hdev
->discovery
.result_filtering
= true;
4396 hdev
->discovery
.type
= cp
->type
;
4397 hdev
->discovery
.rssi
= cp
->rssi
;
4398 hdev
->discovery
.uuid_count
= uuid_count
;
4400 if (uuid_count
> 0) {
4401 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4403 if (!hdev
->discovery
.uuids
) {
4404 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4405 MGMT_OP_START_SERVICE_DISCOVERY
,
4407 &cp
->type
, sizeof(cp
->type
));
4408 mgmt_pending_remove(cmd
);
4413 hci_req_init(&req
, hdev
);
4415 if (!trigger_discovery(&req
, &status
)) {
4416 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4417 MGMT_OP_START_SERVICE_DISCOVERY
,
4418 status
, &cp
->type
, sizeof(cp
->type
));
4419 mgmt_pending_remove(cmd
);
4423 err
= hci_req_run(&req
, start_discovery_complete
);
4425 mgmt_pending_remove(cmd
);
4429 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4432 hci_dev_unlock(hdev
);
4436 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4438 struct mgmt_pending_cmd
*cmd
;
4440 BT_DBG("status %d", status
);
4444 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4446 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4447 mgmt_pending_remove(cmd
);
4451 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4453 hci_dev_unlock(hdev
);
4456 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4459 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4460 struct mgmt_pending_cmd
*cmd
;
4461 struct hci_request req
;
4464 BT_DBG("%s", hdev
->name
);
4468 if (!hci_discovery_active(hdev
)) {
4469 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4470 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4471 sizeof(mgmt_cp
->type
));
4475 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4476 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4477 MGMT_STATUS_INVALID_PARAMS
,
4478 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4482 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4488 cmd
->cmd_complete
= generic_cmd_complete
;
4490 hci_req_init(&req
, hdev
);
4492 hci_stop_discovery(&req
);
4494 err
= hci_req_run(&req
, stop_discovery_complete
);
4496 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4500 mgmt_pending_remove(cmd
);
4502 /* If no HCI commands were sent we're done */
4503 if (err
== -ENODATA
) {
4504 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
4505 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4506 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4510 hci_dev_unlock(hdev
);
4514 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4517 struct mgmt_cp_confirm_name
*cp
= data
;
4518 struct inquiry_entry
*e
;
4521 BT_DBG("%s", hdev
->name
);
4525 if (!hci_discovery_active(hdev
)) {
4526 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4527 MGMT_STATUS_FAILED
, &cp
->addr
,
4532 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4534 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4535 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4540 if (cp
->name_known
) {
4541 e
->name_state
= NAME_KNOWN
;
4544 e
->name_state
= NAME_NEEDED
;
4545 hci_inquiry_cache_update_resolve(hdev
, e
);
4548 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
4549 &cp
->addr
, sizeof(cp
->addr
));
4552 hci_dev_unlock(hdev
);
4556 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4559 struct mgmt_cp_block_device
*cp
= data
;
4563 BT_DBG("%s", hdev
->name
);
4565 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4566 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4567 MGMT_STATUS_INVALID_PARAMS
,
4568 &cp
->addr
, sizeof(cp
->addr
));
4572 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4575 status
= MGMT_STATUS_FAILED
;
4579 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4581 status
= MGMT_STATUS_SUCCESS
;
4584 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4585 &cp
->addr
, sizeof(cp
->addr
));
4587 hci_dev_unlock(hdev
);
4592 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4595 struct mgmt_cp_unblock_device
*cp
= data
;
4599 BT_DBG("%s", hdev
->name
);
4601 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4602 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4603 MGMT_STATUS_INVALID_PARAMS
,
4604 &cp
->addr
, sizeof(cp
->addr
));
4608 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4611 status
= MGMT_STATUS_INVALID_PARAMS
;
4615 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4617 status
= MGMT_STATUS_SUCCESS
;
4620 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4621 &cp
->addr
, sizeof(cp
->addr
));
4623 hci_dev_unlock(hdev
);
4628 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4631 struct mgmt_cp_set_device_id
*cp
= data
;
4632 struct hci_request req
;
4636 BT_DBG("%s", hdev
->name
);
4638 source
= __le16_to_cpu(cp
->source
);
4640 if (source
> 0x0002)
4641 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4642 MGMT_STATUS_INVALID_PARAMS
);
4646 hdev
->devid_source
= source
;
4647 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4648 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4649 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4651 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
4654 hci_req_init(&req
, hdev
);
4656 hci_req_run(&req
, NULL
);
4658 hci_dev_unlock(hdev
);
4663 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
4666 BT_DBG("status %d", status
);
4669 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
4672 struct cmd_lookup match
= { NULL
, hdev
};
4673 struct hci_request req
;
4678 u8 mgmt_err
= mgmt_status(status
);
4680 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4681 cmd_status_rsp
, &mgmt_err
);
4685 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
4686 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
4688 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
4690 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4693 new_settings(hdev
, match
.sk
);
4698 /* If "Set Advertising" was just disabled and instance advertising was
4699 * set up earlier, then enable the advertising instance.
4701 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
4702 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
4705 hci_req_init(&req
, hdev
);
4707 update_adv_data(&req
);
4708 enable_advertising(&req
);
4710 if (hci_req_run(&req
, enable_advertising_instance
) < 0)
4711 BT_ERR("Failed to re-configure advertising");
4714 hci_dev_unlock(hdev
);
4717 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4720 struct mgmt_mode
*cp
= data
;
4721 struct mgmt_pending_cmd
*cmd
;
4722 struct hci_request req
;
4726 BT_DBG("request for %s", hdev
->name
);
4728 status
= mgmt_le_support(hdev
);
4730 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4733 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4734 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4735 MGMT_STATUS_INVALID_PARAMS
);
4741 /* The following conditions are ones which mean that we should
4742 * not do any HCI communication but directly send a mgmt
4743 * response to user space (after toggling the flag if
4746 if (!hdev_is_powered(hdev
) ||
4747 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
4748 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
4749 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4750 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4751 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4755 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
4756 if (cp
->val
== 0x02)
4757 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4759 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4761 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
4762 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4765 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4770 err
= new_settings(hdev
, sk
);
4775 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4776 pending_find(MGMT_OP_SET_LE
, hdev
)) {
4777 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4782 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4788 hci_req_init(&req
, hdev
);
4790 if (cp
->val
== 0x02)
4791 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4793 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4796 /* Switch to instance "0" for the Set Advertising setting. */
4797 update_adv_data_for_instance(&req
, 0);
4798 update_scan_rsp_data_for_instance(&req
, 0);
4799 enable_advertising(&req
);
4801 disable_advertising(&req
);
4804 err
= hci_req_run(&req
, set_advertising_complete
);
4806 mgmt_pending_remove(cmd
);
4809 hci_dev_unlock(hdev
);
4813 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4814 void *data
, u16 len
)
4816 struct mgmt_cp_set_static_address
*cp
= data
;
4819 BT_DBG("%s", hdev
->name
);
4821 if (!lmp_le_capable(hdev
))
4822 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4823 MGMT_STATUS_NOT_SUPPORTED
);
4825 if (hdev_is_powered(hdev
))
4826 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4827 MGMT_STATUS_REJECTED
);
4829 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4830 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4831 return mgmt_cmd_status(sk
, hdev
->id
,
4832 MGMT_OP_SET_STATIC_ADDRESS
,
4833 MGMT_STATUS_INVALID_PARAMS
);
4835 /* Two most significant bits shall be set */
4836 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4837 return mgmt_cmd_status(sk
, hdev
->id
,
4838 MGMT_OP_SET_STATIC_ADDRESS
,
4839 MGMT_STATUS_INVALID_PARAMS
);
4844 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4846 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
4850 err
= new_settings(hdev
, sk
);
4853 hci_dev_unlock(hdev
);
4857 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4858 void *data
, u16 len
)
4860 struct mgmt_cp_set_scan_params
*cp
= data
;
4861 __u16 interval
, window
;
4864 BT_DBG("%s", hdev
->name
);
4866 if (!lmp_le_capable(hdev
))
4867 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4868 MGMT_STATUS_NOT_SUPPORTED
);
4870 interval
= __le16_to_cpu(cp
->interval
);
4872 if (interval
< 0x0004 || interval
> 0x4000)
4873 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4874 MGMT_STATUS_INVALID_PARAMS
);
4876 window
= __le16_to_cpu(cp
->window
);
4878 if (window
< 0x0004 || window
> 0x4000)
4879 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4880 MGMT_STATUS_INVALID_PARAMS
);
4882 if (window
> interval
)
4883 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4884 MGMT_STATUS_INVALID_PARAMS
);
4888 hdev
->le_scan_interval
= interval
;
4889 hdev
->le_scan_window
= window
;
4891 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
4894 /* If background scan is running, restart it so new parameters are
4897 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4898 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4899 struct hci_request req
;
4901 hci_req_init(&req
, hdev
);
4903 hci_req_add_le_scan_disable(&req
);
4904 hci_req_add_le_passive_scan(&req
);
4906 hci_req_run(&req
, NULL
);
4909 hci_dev_unlock(hdev
);
4914 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
4917 struct mgmt_pending_cmd
*cmd
;
4919 BT_DBG("status 0x%02x", status
);
4923 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4928 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4929 mgmt_status(status
));
4931 struct mgmt_mode
*cp
= cmd
->param
;
4934 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
4936 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4938 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4939 new_settings(hdev
, cmd
->sk
);
4942 mgmt_pending_remove(cmd
);
4945 hci_dev_unlock(hdev
);
4948 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4949 void *data
, u16 len
)
4951 struct mgmt_mode
*cp
= data
;
4952 struct mgmt_pending_cmd
*cmd
;
4953 struct hci_request req
;
4956 BT_DBG("%s", hdev
->name
);
4958 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
4959 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4960 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4961 MGMT_STATUS_NOT_SUPPORTED
);
4963 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4964 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4965 MGMT_STATUS_INVALID_PARAMS
);
4969 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4970 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4975 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
4976 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4981 if (!hdev_is_powered(hdev
)) {
4982 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
4983 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4985 new_settings(hdev
, sk
);
4989 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4996 hci_req_init(&req
, hdev
);
4998 write_fast_connectable(&req
, cp
->val
);
5000 err
= hci_req_run(&req
, fast_connectable_complete
);
5002 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5003 MGMT_STATUS_FAILED
);
5004 mgmt_pending_remove(cmd
);
5008 hci_dev_unlock(hdev
);
5013 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5015 struct mgmt_pending_cmd
*cmd
;
5017 BT_DBG("status 0x%02x", status
);
5021 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
5026 u8 mgmt_err
= mgmt_status(status
);
5028 /* We need to restore the flag if related HCI commands
5031 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
5033 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
5035 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
5036 new_settings(hdev
, cmd
->sk
);
5039 mgmt_pending_remove(cmd
);
5042 hci_dev_unlock(hdev
);
5045 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
5047 struct mgmt_mode
*cp
= data
;
5048 struct mgmt_pending_cmd
*cmd
;
5049 struct hci_request req
;
5052 BT_DBG("request for %s", hdev
->name
);
5054 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
5055 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5056 MGMT_STATUS_NOT_SUPPORTED
);
5058 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5059 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5060 MGMT_STATUS_REJECTED
);
5062 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5063 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5064 MGMT_STATUS_INVALID_PARAMS
);
5068 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5069 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5073 if (!hdev_is_powered(hdev
)) {
5075 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
5076 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
5077 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
5078 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5079 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
5082 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
5084 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5088 err
= new_settings(hdev
, sk
);
5092 /* Reject disabling when powered on */
5094 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5095 MGMT_STATUS_REJECTED
);
5098 /* When configuring a dual-mode controller to operate
5099 * with LE only and using a static address, then switching
5100 * BR/EDR back on is not allowed.
5102 * Dual-mode controllers shall operate with the public
5103 * address as its identity address for BR/EDR and LE. So
5104 * reject the attempt to create an invalid configuration.
5106 * The same restrictions applies when secure connections
5107 * has been enabled. For BR/EDR this is a controller feature
5108 * while for LE it is a host stack feature. This means that
5109 * switching BR/EDR back on when secure connections has been
5110 * enabled is not a supported transaction.
5112 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5113 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
5114 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
5115 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5116 MGMT_STATUS_REJECTED
);
5121 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
5122 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5127 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
5133 /* We need to flip the bit already here so that update_adv_data
5134 * generates the correct flags.
5136 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
5138 hci_req_init(&req
, hdev
);
5140 write_fast_connectable(&req
, false);
5141 __hci_update_page_scan(&req
);
5143 /* Since only the advertising data flags will change, there
5144 * is no need to update the scan response data.
5146 update_adv_data(&req
);
5148 err
= hci_req_run(&req
, set_bredr_complete
);
5150 mgmt_pending_remove(cmd
);
5153 hci_dev_unlock(hdev
);
5157 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5159 struct mgmt_pending_cmd
*cmd
;
5160 struct mgmt_mode
*cp
;
5162 BT_DBG("%s status %u", hdev
->name
, status
);
5166 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
5171 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5172 mgmt_status(status
));
5180 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
5181 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5184 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5185 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5188 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5189 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5193 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5194 new_settings(hdev
, cmd
->sk
);
5197 mgmt_pending_remove(cmd
);
5199 hci_dev_unlock(hdev
);
5202 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
5203 void *data
, u16 len
)
5205 struct mgmt_mode
*cp
= data
;
5206 struct mgmt_pending_cmd
*cmd
;
5207 struct hci_request req
;
5211 BT_DBG("request for %s", hdev
->name
);
5213 if (!lmp_sc_capable(hdev
) &&
5214 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5215 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5216 MGMT_STATUS_NOT_SUPPORTED
);
5218 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5219 lmp_sc_capable(hdev
) &&
5220 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
5221 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5222 MGMT_STATUS_REJECTED
);
5224 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5225 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5226 MGMT_STATUS_INVALID_PARAMS
);
5230 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
5231 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5235 changed
= !hci_dev_test_and_set_flag(hdev
,
5237 if (cp
->val
== 0x02)
5238 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5240 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5242 changed
= hci_dev_test_and_clear_flag(hdev
,
5244 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5247 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5252 err
= new_settings(hdev
, sk
);
5257 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
5258 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5265 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5266 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5267 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5271 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
5277 hci_req_init(&req
, hdev
);
5278 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
5279 err
= hci_req_run(&req
, sc_enable_complete
);
5281 mgmt_pending_remove(cmd
);
5286 hci_dev_unlock(hdev
);
5290 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5291 void *data
, u16 len
)
5293 struct mgmt_mode
*cp
= data
;
5294 bool changed
, use_changed
;
5297 BT_DBG("request for %s", hdev
->name
);
5299 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5300 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
5301 MGMT_STATUS_INVALID_PARAMS
);
5306 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
5308 changed
= hci_dev_test_and_clear_flag(hdev
,
5309 HCI_KEEP_DEBUG_KEYS
);
5311 if (cp
->val
== 0x02)
5312 use_changed
= !hci_dev_test_and_set_flag(hdev
,
5313 HCI_USE_DEBUG_KEYS
);
5315 use_changed
= hci_dev_test_and_clear_flag(hdev
,
5316 HCI_USE_DEBUG_KEYS
);
5318 if (hdev_is_powered(hdev
) && use_changed
&&
5319 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5320 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
5321 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
5322 sizeof(mode
), &mode
);
5325 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
5330 err
= new_settings(hdev
, sk
);
5333 hci_dev_unlock(hdev
);
5337 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5340 struct mgmt_cp_set_privacy
*cp
= cp_data
;
5344 BT_DBG("request for %s", hdev
->name
);
5346 if (!lmp_le_capable(hdev
))
5347 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5348 MGMT_STATUS_NOT_SUPPORTED
);
5350 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
5351 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5352 MGMT_STATUS_INVALID_PARAMS
);
5354 if (hdev_is_powered(hdev
))
5355 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5356 MGMT_STATUS_REJECTED
);
5360 /* If user space supports this command it is also expected to
5361 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5363 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5366 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
5367 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
5368 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
5370 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
5371 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
5372 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
5375 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
5380 err
= new_settings(hdev
, sk
);
5383 hci_dev_unlock(hdev
);
5387 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
5389 switch (irk
->addr
.type
) {
5390 case BDADDR_LE_PUBLIC
:
5393 case BDADDR_LE_RANDOM
:
5394 /* Two most significant bits shall be set */
5395 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5403 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5406 struct mgmt_cp_load_irks
*cp
= cp_data
;
5407 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
5408 sizeof(struct mgmt_irk_info
));
5409 u16 irk_count
, expected_len
;
5412 BT_DBG("request for %s", hdev
->name
);
5414 if (!lmp_le_capable(hdev
))
5415 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5416 MGMT_STATUS_NOT_SUPPORTED
);
5418 irk_count
= __le16_to_cpu(cp
->irk_count
);
5419 if (irk_count
> max_irk_count
) {
5420 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
5421 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5422 MGMT_STATUS_INVALID_PARAMS
);
5425 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
5426 if (expected_len
!= len
) {
5427 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5429 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5430 MGMT_STATUS_INVALID_PARAMS
);
5433 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
5435 for (i
= 0; i
< irk_count
; i
++) {
5436 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
5438 if (!irk_is_valid(key
))
5439 return mgmt_cmd_status(sk
, hdev
->id
,
5441 MGMT_STATUS_INVALID_PARAMS
);
5446 hci_smp_irks_clear(hdev
);
5448 for (i
= 0; i
< irk_count
; i
++) {
5449 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
5452 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
5453 addr_type
= ADDR_LE_DEV_PUBLIC
;
5455 addr_type
= ADDR_LE_DEV_RANDOM
;
5457 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
5461 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5463 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
5465 hci_dev_unlock(hdev
);
5470 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
5472 if (key
->master
!= 0x00 && key
->master
!= 0x01)
5475 switch (key
->addr
.type
) {
5476 case BDADDR_LE_PUBLIC
:
5479 case BDADDR_LE_RANDOM
:
5480 /* Two most significant bits shall be set */
5481 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5489 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5490 void *cp_data
, u16 len
)
5492 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5493 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5494 sizeof(struct mgmt_ltk_info
));
5495 u16 key_count
, expected_len
;
5498 BT_DBG("request for %s", hdev
->name
);
5500 if (!lmp_le_capable(hdev
))
5501 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5502 MGMT_STATUS_NOT_SUPPORTED
);
5504 key_count
= __le16_to_cpu(cp
->key_count
);
5505 if (key_count
> max_key_count
) {
5506 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5507 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5508 MGMT_STATUS_INVALID_PARAMS
);
5511 expected_len
= sizeof(*cp
) + key_count
*
5512 sizeof(struct mgmt_ltk_info
);
5513 if (expected_len
!= len
) {
5514 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5516 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5517 MGMT_STATUS_INVALID_PARAMS
);
5520 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5522 for (i
= 0; i
< key_count
; i
++) {
5523 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5525 if (!ltk_is_valid(key
))
5526 return mgmt_cmd_status(sk
, hdev
->id
,
5527 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5528 MGMT_STATUS_INVALID_PARAMS
);
5533 hci_smp_ltks_clear(hdev
);
5535 for (i
= 0; i
< key_count
; i
++) {
5536 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5537 u8 type
, addr_type
, authenticated
;
5539 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
5540 addr_type
= ADDR_LE_DEV_PUBLIC
;
5542 addr_type
= ADDR_LE_DEV_RANDOM
;
5544 switch (key
->type
) {
5545 case MGMT_LTK_UNAUTHENTICATED
:
5546 authenticated
= 0x00;
5547 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5549 case MGMT_LTK_AUTHENTICATED
:
5550 authenticated
= 0x01;
5551 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5553 case MGMT_LTK_P256_UNAUTH
:
5554 authenticated
= 0x00;
5555 type
= SMP_LTK_P256
;
5557 case MGMT_LTK_P256_AUTH
:
5558 authenticated
= 0x01;
5559 type
= SMP_LTK_P256
;
5561 case MGMT_LTK_P256_DEBUG
:
5562 authenticated
= 0x00;
5563 type
= SMP_LTK_P256_DEBUG
;
5568 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
5569 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
5573 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5576 hci_dev_unlock(hdev
);
5581 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5583 struct hci_conn
*conn
= cmd
->user_data
;
5584 struct mgmt_rp_get_conn_info rp
;
5587 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5589 if (status
== MGMT_STATUS_SUCCESS
) {
5590 rp
.rssi
= conn
->rssi
;
5591 rp
.tx_power
= conn
->tx_power
;
5592 rp
.max_tx_power
= conn
->max_tx_power
;
5594 rp
.rssi
= HCI_RSSI_INVALID
;
5595 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5596 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5599 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
5600 status
, &rp
, sizeof(rp
));
5602 hci_conn_drop(conn
);
5608 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
5611 struct hci_cp_read_rssi
*cp
;
5612 struct mgmt_pending_cmd
*cmd
;
5613 struct hci_conn
*conn
;
5617 BT_DBG("status 0x%02x", hci_status
);
5621 /* Commands sent in request are either Read RSSI or Read Transmit Power
5622 * Level so we check which one was last sent to retrieve connection
5623 * handle. Both commands have handle as first parameter so it's safe to
5624 * cast data on the same command struct.
5626 * First command sent is always Read RSSI and we fail only if it fails.
5627 * In other case we simply override error to indicate success as we
5628 * already remembered if TX power value is actually valid.
5630 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5632 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5633 status
= MGMT_STATUS_SUCCESS
;
5635 status
= mgmt_status(hci_status
);
5639 BT_ERR("invalid sent_cmd in conn_info response");
5643 handle
= __le16_to_cpu(cp
->handle
);
5644 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5646 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5650 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5654 cmd
->cmd_complete(cmd
, status
);
5655 mgmt_pending_remove(cmd
);
5658 hci_dev_unlock(hdev
);
5661 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5664 struct mgmt_cp_get_conn_info
*cp
= data
;
5665 struct mgmt_rp_get_conn_info rp
;
5666 struct hci_conn
*conn
;
5667 unsigned long conn_info_age
;
5670 BT_DBG("%s", hdev
->name
);
5672 memset(&rp
, 0, sizeof(rp
));
5673 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5674 rp
.addr
.type
= cp
->addr
.type
;
5676 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5677 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5678 MGMT_STATUS_INVALID_PARAMS
,
5683 if (!hdev_is_powered(hdev
)) {
5684 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5685 MGMT_STATUS_NOT_POWERED
, &rp
,
5690 if (cp
->addr
.type
== BDADDR_BREDR
)
5691 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5694 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5696 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5697 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5698 MGMT_STATUS_NOT_CONNECTED
, &rp
,
5703 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5704 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5705 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5709 /* To avoid client trying to guess when to poll again for information we
5710 * calculate conn info age as random value between min/max set in hdev.
5712 conn_info_age
= hdev
->conn_info_min_age
+
5713 prandom_u32_max(hdev
->conn_info_max_age
-
5714 hdev
->conn_info_min_age
);
5716 /* Query controller to refresh cached values if they are too old or were
5719 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5720 msecs_to_jiffies(conn_info_age
)) ||
5721 !conn
->conn_info_timestamp
) {
5722 struct hci_request req
;
5723 struct hci_cp_read_tx_power req_txp_cp
;
5724 struct hci_cp_read_rssi req_rssi_cp
;
5725 struct mgmt_pending_cmd
*cmd
;
5727 hci_req_init(&req
, hdev
);
5728 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5729 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5732 /* For LE links TX power does not change thus we don't need to
5733 * query for it once value is known.
5735 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5736 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5737 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5738 req_txp_cp
.type
= 0x00;
5739 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5740 sizeof(req_txp_cp
), &req_txp_cp
);
5743 /* Max TX power needs to be read only once per connection */
5744 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5745 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5746 req_txp_cp
.type
= 0x01;
5747 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5748 sizeof(req_txp_cp
), &req_txp_cp
);
5751 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5755 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5762 hci_conn_hold(conn
);
5763 cmd
->user_data
= hci_conn_get(conn
);
5764 cmd
->cmd_complete
= conn_info_cmd_complete
;
5766 conn
->conn_info_timestamp
= jiffies
;
5768 /* Cache is valid, just reply with values cached in hci_conn */
5769 rp
.rssi
= conn
->rssi
;
5770 rp
.tx_power
= conn
->tx_power
;
5771 rp
.max_tx_power
= conn
->max_tx_power
;
5773 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5774 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5778 hci_dev_unlock(hdev
);
5782 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5784 struct hci_conn
*conn
= cmd
->user_data
;
5785 struct mgmt_rp_get_clock_info rp
;
5786 struct hci_dev
*hdev
;
5789 memset(&rp
, 0, sizeof(rp
));
5790 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5795 hdev
= hci_dev_get(cmd
->index
);
5797 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5802 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5803 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5807 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
5811 hci_conn_drop(conn
);
5818 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5820 struct hci_cp_read_clock
*hci_cp
;
5821 struct mgmt_pending_cmd
*cmd
;
5822 struct hci_conn
*conn
;
5824 BT_DBG("%s status %u", hdev
->name
, status
);
5828 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5832 if (hci_cp
->which
) {
5833 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5834 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5839 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5843 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5844 mgmt_pending_remove(cmd
);
5847 hci_dev_unlock(hdev
);
5850 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5853 struct mgmt_cp_get_clock_info
*cp
= data
;
5854 struct mgmt_rp_get_clock_info rp
;
5855 struct hci_cp_read_clock hci_cp
;
5856 struct mgmt_pending_cmd
*cmd
;
5857 struct hci_request req
;
5858 struct hci_conn
*conn
;
5861 BT_DBG("%s", hdev
->name
);
5863 memset(&rp
, 0, sizeof(rp
));
5864 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5865 rp
.addr
.type
= cp
->addr
.type
;
5867 if (cp
->addr
.type
!= BDADDR_BREDR
)
5868 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5869 MGMT_STATUS_INVALID_PARAMS
,
5874 if (!hdev_is_powered(hdev
)) {
5875 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5876 MGMT_STATUS_NOT_POWERED
, &rp
,
5881 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5882 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5884 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5885 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5886 MGMT_OP_GET_CLOCK_INFO
,
5887 MGMT_STATUS_NOT_CONNECTED
,
5895 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5901 cmd
->cmd_complete
= clock_info_cmd_complete
;
5903 hci_req_init(&req
, hdev
);
5905 memset(&hci_cp
, 0, sizeof(hci_cp
));
5906 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5909 hci_conn_hold(conn
);
5910 cmd
->user_data
= hci_conn_get(conn
);
5912 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5913 hci_cp
.which
= 0x01; /* Piconet clock */
5914 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5917 err
= hci_req_run(&req
, get_clock_info_complete
);
5919 mgmt_pending_remove(cmd
);
5922 hci_dev_unlock(hdev
);
5926 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
5928 struct hci_conn
*conn
;
5930 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
5934 if (conn
->dst_type
!= type
)
5937 if (conn
->state
!= BT_CONNECTED
)
5943 /* This function requires the caller holds hdev->lock */
5944 static int hci_conn_params_set(struct hci_request
*req
, bdaddr_t
*addr
,
5945 u8 addr_type
, u8 auto_connect
)
5947 struct hci_dev
*hdev
= req
->hdev
;
5948 struct hci_conn_params
*params
;
5950 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
5954 if (params
->auto_connect
== auto_connect
)
5957 list_del_init(¶ms
->action
);
5959 switch (auto_connect
) {
5960 case HCI_AUTO_CONN_DISABLED
:
5961 case HCI_AUTO_CONN_LINK_LOSS
:
5962 __hci_update_background_scan(req
);
5964 case HCI_AUTO_CONN_REPORT
:
5965 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
5966 __hci_update_background_scan(req
);
5968 case HCI_AUTO_CONN_DIRECT
:
5969 case HCI_AUTO_CONN_ALWAYS
:
5970 if (!is_connected(hdev
, addr
, addr_type
)) {
5971 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5972 __hci_update_background_scan(req
);
5977 params
->auto_connect
= auto_connect
;
5979 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
5985 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5986 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5988 struct mgmt_ev_device_added ev
;
5990 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5991 ev
.addr
.type
= type
;
5994 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5997 static void add_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5999 struct mgmt_pending_cmd
*cmd
;
6001 BT_DBG("status 0x%02x", status
);
6005 cmd
= pending_find(MGMT_OP_ADD_DEVICE
, hdev
);
6009 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6010 mgmt_pending_remove(cmd
);
6013 hci_dev_unlock(hdev
);
6016 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
6017 void *data
, u16 len
)
6019 struct mgmt_cp_add_device
*cp
= data
;
6020 struct mgmt_pending_cmd
*cmd
;
6021 struct hci_request req
;
6022 u8 auto_conn
, addr_type
;
6025 BT_DBG("%s", hdev
->name
);
6027 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
6028 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
6029 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6030 MGMT_STATUS_INVALID_PARAMS
,
6031 &cp
->addr
, sizeof(cp
->addr
));
6033 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
6034 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6035 MGMT_STATUS_INVALID_PARAMS
,
6036 &cp
->addr
, sizeof(cp
->addr
));
6038 hci_req_init(&req
, hdev
);
6042 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_DEVICE
, hdev
, data
, len
);
6048 cmd
->cmd_complete
= addr_cmd_complete
;
6050 if (cp
->addr
.type
== BDADDR_BREDR
) {
6051 /* Only incoming connections action is supported for now */
6052 if (cp
->action
!= 0x01) {
6053 err
= cmd
->cmd_complete(cmd
,
6054 MGMT_STATUS_INVALID_PARAMS
);
6055 mgmt_pending_remove(cmd
);
6059 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
6064 __hci_update_page_scan(&req
);
6069 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
6070 addr_type
= ADDR_LE_DEV_PUBLIC
;
6072 addr_type
= ADDR_LE_DEV_RANDOM
;
6074 if (cp
->action
== 0x02)
6075 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
6076 else if (cp
->action
== 0x01)
6077 auto_conn
= HCI_AUTO_CONN_DIRECT
;
6079 auto_conn
= HCI_AUTO_CONN_REPORT
;
6081 /* If the connection parameters don't exist for this device,
6082 * they will be created and configured with defaults.
6084 if (hci_conn_params_set(&req
, &cp
->addr
.bdaddr
, addr_type
,
6086 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_FAILED
);
6087 mgmt_pending_remove(cmd
);
6092 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
6094 err
= hci_req_run(&req
, add_device_complete
);
6096 /* ENODATA means no HCI commands were needed (e.g. if
6097 * the adapter is powered off).
6099 if (err
== -ENODATA
)
6100 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
6101 mgmt_pending_remove(cmd
);
6105 hci_dev_unlock(hdev
);
6109 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
6110 bdaddr_t
*bdaddr
, u8 type
)
6112 struct mgmt_ev_device_removed ev
;
6114 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6115 ev
.addr
.type
= type
;
6117 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
6120 static void remove_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6122 struct mgmt_pending_cmd
*cmd
;
6124 BT_DBG("status 0x%02x", status
);
6128 cmd
= pending_find(MGMT_OP_REMOVE_DEVICE
, hdev
);
6132 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6133 mgmt_pending_remove(cmd
);
6136 hci_dev_unlock(hdev
);
6139 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
6140 void *data
, u16 len
)
6142 struct mgmt_cp_remove_device
*cp
= data
;
6143 struct mgmt_pending_cmd
*cmd
;
6144 struct hci_request req
;
6147 BT_DBG("%s", hdev
->name
);
6149 hci_req_init(&req
, hdev
);
6153 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_DEVICE
, hdev
, data
, len
);
6159 cmd
->cmd_complete
= addr_cmd_complete
;
6161 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6162 struct hci_conn_params
*params
;
6165 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
6166 err
= cmd
->cmd_complete(cmd
,
6167 MGMT_STATUS_INVALID_PARAMS
);
6168 mgmt_pending_remove(cmd
);
6172 if (cp
->addr
.type
== BDADDR_BREDR
) {
6173 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
6177 err
= cmd
->cmd_complete(cmd
,
6178 MGMT_STATUS_INVALID_PARAMS
);
6179 mgmt_pending_remove(cmd
);
6183 __hci_update_page_scan(&req
);
6185 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
6190 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
6191 addr_type
= ADDR_LE_DEV_PUBLIC
;
6193 addr_type
= ADDR_LE_DEV_RANDOM
;
6195 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
6198 err
= cmd
->cmd_complete(cmd
,
6199 MGMT_STATUS_INVALID_PARAMS
);
6200 mgmt_pending_remove(cmd
);
6204 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
6205 err
= cmd
->cmd_complete(cmd
,
6206 MGMT_STATUS_INVALID_PARAMS
);
6207 mgmt_pending_remove(cmd
);
6211 list_del(¶ms
->action
);
6212 list_del(¶ms
->list
);
6214 __hci_update_background_scan(&req
);
6216 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
6218 struct hci_conn_params
*p
, *tmp
;
6219 struct bdaddr_list
*b
, *btmp
;
6221 if (cp
->addr
.type
) {
6222 err
= cmd
->cmd_complete(cmd
,
6223 MGMT_STATUS_INVALID_PARAMS
);
6224 mgmt_pending_remove(cmd
);
6228 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
6229 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
6234 __hci_update_page_scan(&req
);
6236 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
6237 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
6239 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
6240 list_del(&p
->action
);
6245 BT_DBG("All LE connection parameters were removed");
6247 __hci_update_background_scan(&req
);
6251 err
= hci_req_run(&req
, remove_device_complete
);
6253 /* ENODATA means no HCI commands were needed (e.g. if
6254 * the adapter is powered off).
6256 if (err
== -ENODATA
)
6257 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
6258 mgmt_pending_remove(cmd
);
6262 hci_dev_unlock(hdev
);
6266 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6269 struct mgmt_cp_load_conn_param
*cp
= data
;
6270 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
6271 sizeof(struct mgmt_conn_param
));
6272 u16 param_count
, expected_len
;
6275 if (!lmp_le_capable(hdev
))
6276 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6277 MGMT_STATUS_NOT_SUPPORTED
);
6279 param_count
= __le16_to_cpu(cp
->param_count
);
6280 if (param_count
> max_param_count
) {
6281 BT_ERR("load_conn_param: too big param_count value %u",
6283 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6284 MGMT_STATUS_INVALID_PARAMS
);
6287 expected_len
= sizeof(*cp
) + param_count
*
6288 sizeof(struct mgmt_conn_param
);
6289 if (expected_len
!= len
) {
6290 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6292 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6293 MGMT_STATUS_INVALID_PARAMS
);
6296 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
6300 hci_conn_params_clear_disabled(hdev
);
6302 for (i
= 0; i
< param_count
; i
++) {
6303 struct mgmt_conn_param
*param
= &cp
->params
[i
];
6304 struct hci_conn_params
*hci_param
;
6305 u16 min
, max
, latency
, timeout
;
6308 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
6311 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
6312 addr_type
= ADDR_LE_DEV_PUBLIC
;
6313 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
6314 addr_type
= ADDR_LE_DEV_RANDOM
;
6316 BT_ERR("Ignoring invalid connection parameters");
6320 min
= le16_to_cpu(param
->min_interval
);
6321 max
= le16_to_cpu(param
->max_interval
);
6322 latency
= le16_to_cpu(param
->latency
);
6323 timeout
= le16_to_cpu(param
->timeout
);
6325 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6326 min
, max
, latency
, timeout
);
6328 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
6329 BT_ERR("Ignoring invalid connection parameters");
6333 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
6336 BT_ERR("Failed to add connection parameters");
6340 hci_param
->conn_min_interval
= min
;
6341 hci_param
->conn_max_interval
= max
;
6342 hci_param
->conn_latency
= latency
;
6343 hci_param
->supervision_timeout
= timeout
;
6346 hci_dev_unlock(hdev
);
6348 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
6352 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
6353 void *data
, u16 len
)
6355 struct mgmt_cp_set_external_config
*cp
= data
;
6359 BT_DBG("%s", hdev
->name
);
6361 if (hdev_is_powered(hdev
))
6362 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6363 MGMT_STATUS_REJECTED
);
6365 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
6366 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6367 MGMT_STATUS_INVALID_PARAMS
);
6369 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
6370 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6371 MGMT_STATUS_NOT_SUPPORTED
);
6376 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
6378 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
6380 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
6387 err
= new_options(hdev
, sk
);
6389 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
6390 mgmt_index_removed(hdev
);
6392 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
6393 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6394 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6396 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6398 set_bit(HCI_RAW
, &hdev
->flags
);
6399 mgmt_index_added(hdev
);
6404 hci_dev_unlock(hdev
);
6408 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
6409 void *data
, u16 len
)
6411 struct mgmt_cp_set_public_address
*cp
= data
;
6415 BT_DBG("%s", hdev
->name
);
6417 if (hdev_is_powered(hdev
))
6418 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6419 MGMT_STATUS_REJECTED
);
6421 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
6422 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6423 MGMT_STATUS_INVALID_PARAMS
);
6425 if (!hdev
->set_bdaddr
)
6426 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6427 MGMT_STATUS_NOT_SUPPORTED
);
6431 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
6432 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
6434 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
6441 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
6442 err
= new_options(hdev
, sk
);
6444 if (is_configured(hdev
)) {
6445 mgmt_index_removed(hdev
);
6447 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
6449 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6450 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6452 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6456 hci_dev_unlock(hdev
);
6460 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6463 eir
[eir_len
++] = sizeof(type
) + data_len
;
6464 eir
[eir_len
++] = type
;
6465 memcpy(&eir
[eir_len
], data
, data_len
);
6466 eir_len
+= data_len
;
6471 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
6472 u16 opcode
, struct sk_buff
*skb
)
6474 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
6475 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
6476 u8
*h192
, *r192
, *h256
, *r256
;
6477 struct mgmt_pending_cmd
*cmd
;
6481 BT_DBG("%s status %u", hdev
->name
, status
);
6483 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
6487 mgmt_cp
= cmd
->param
;
6490 status
= mgmt_status(status
);
6497 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
6498 struct hci_rp_read_local_oob_data
*rp
;
6500 if (skb
->len
!= sizeof(*rp
)) {
6501 status
= MGMT_STATUS_FAILED
;
6504 status
= MGMT_STATUS_SUCCESS
;
6505 rp
= (void *)skb
->data
;
6507 eir_len
= 5 + 18 + 18;
6514 struct hci_rp_read_local_oob_ext_data
*rp
;
6516 if (skb
->len
!= sizeof(*rp
)) {
6517 status
= MGMT_STATUS_FAILED
;
6520 status
= MGMT_STATUS_SUCCESS
;
6521 rp
= (void *)skb
->data
;
6523 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
6524 eir_len
= 5 + 18 + 18;
6528 eir_len
= 5 + 18 + 18 + 18 + 18;
6538 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
6545 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
6546 hdev
->dev_class
, 3);
6549 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6550 EIR_SSP_HASH_C192
, h192
, 16);
6551 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6552 EIR_SSP_RAND_R192
, r192
, 16);
6556 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6557 EIR_SSP_HASH_C256
, h256
, 16);
6558 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6559 EIR_SSP_RAND_R256
, r256
, 16);
6563 mgmt_rp
->type
= mgmt_cp
->type
;
6564 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
6566 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
6567 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
6568 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
6569 if (err
< 0 || status
)
6572 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6574 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6575 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
6576 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
6579 mgmt_pending_remove(cmd
);
6582 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
6583 struct mgmt_cp_read_local_oob_ext_data
*cp
)
6585 struct mgmt_pending_cmd
*cmd
;
6586 struct hci_request req
;
6589 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
6594 hci_req_init(&req
, hdev
);
6596 if (bredr_sc_enabled(hdev
))
6597 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
6599 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
6601 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
6603 mgmt_pending_remove(cmd
);
6610 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
6611 void *data
, u16 data_len
)
6613 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
6614 struct mgmt_rp_read_local_oob_ext_data
*rp
;
6617 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
6620 BT_DBG("%s", hdev
->name
);
6622 if (hdev_is_powered(hdev
)) {
6624 case BIT(BDADDR_BREDR
):
6625 status
= mgmt_bredr_support(hdev
);
6631 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6632 status
= mgmt_le_support(hdev
);
6636 eir_len
= 9 + 3 + 18 + 18 + 3;
6639 status
= MGMT_STATUS_INVALID_PARAMS
;
6644 status
= MGMT_STATUS_NOT_POWERED
;
6648 rp_len
= sizeof(*rp
) + eir_len
;
6649 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6660 case BIT(BDADDR_BREDR
):
6661 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
6662 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
6663 hci_dev_unlock(hdev
);
6667 status
= MGMT_STATUS_FAILED
;
6670 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6672 hdev
->dev_class
, 3);
6675 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6676 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
6677 smp_generate_oob(hdev
, hash
, rand
) < 0) {
6678 hci_dev_unlock(hdev
);
6679 status
= MGMT_STATUS_FAILED
;
6683 /* This should return the active RPA, but since the RPA
6684 * is only programmed on demand, it is really hard to fill
6685 * this in at the moment. For now disallow retrieving
6686 * local out-of-band data when privacy is in use.
6688 * Returning the identity address will not help here since
6689 * pairing happens before the identity resolving key is
6690 * known and thus the connection establishment happens
6691 * based on the RPA and not the identity address.
6693 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
6694 hci_dev_unlock(hdev
);
6695 status
= MGMT_STATUS_REJECTED
;
6699 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
6700 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
6701 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
6702 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
6703 memcpy(addr
, &hdev
->static_addr
, 6);
6706 memcpy(addr
, &hdev
->bdaddr
, 6);
6710 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
6711 addr
, sizeof(addr
));
6713 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
6718 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
6719 &role
, sizeof(role
));
6721 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
6722 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6724 hash
, sizeof(hash
));
6726 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6728 rand
, sizeof(rand
));
6731 flags
= get_adv_discov_flags(hdev
);
6733 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
6734 flags
|= LE_AD_NO_BREDR
;
6736 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
6737 &flags
, sizeof(flags
));
6741 hci_dev_unlock(hdev
);
6743 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6745 status
= MGMT_STATUS_SUCCESS
;
6748 rp
->type
= cp
->type
;
6749 rp
->eir_len
= cpu_to_le16(eir_len
);
6751 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
6752 status
, rp
, sizeof(*rp
) + eir_len
);
6753 if (err
< 0 || status
)
6756 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6757 rp
, sizeof(*rp
) + eir_len
,
6758 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
6766 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
6770 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
6771 flags
|= MGMT_ADV_FLAG_DISCOV
;
6772 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
6773 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
6775 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
)
6776 flags
|= MGMT_ADV_FLAG_TX_POWER
;
6781 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
6782 void *data
, u16 data_len
)
6784 struct mgmt_rp_read_adv_features
*rp
;
6788 u32 supported_flags
;
6790 BT_DBG("%s", hdev
->name
);
6792 if (!lmp_le_capable(hdev
))
6793 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6794 MGMT_STATUS_REJECTED
);
6798 rp_len
= sizeof(*rp
);
6800 /* Currently only one instance is supported, so just add 1 to the
6803 instance
= hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6807 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6809 hci_dev_unlock(hdev
);
6813 supported_flags
= get_supported_adv_flags(hdev
);
6815 rp
->supported_flags
= cpu_to_le32(supported_flags
);
6816 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
6817 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
6818 rp
->max_instances
= 1;
6820 /* Currently only one instance is supported, so simply return the
6821 * current instance number.
6824 rp
->num_instances
= 1;
6825 rp
->instance
[0] = 1;
6827 rp
->num_instances
= 0;
6830 hci_dev_unlock(hdev
);
6832 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6833 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
6840 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
6841 u8 len
, bool is_adv_data
)
6843 u8 max_len
= HCI_MAX_AD_LENGTH
;
6845 bool flags_managed
= false;
6846 bool tx_power_managed
= false;
6847 u32 flags_params
= MGMT_ADV_FLAG_DISCOV
| MGMT_ADV_FLAG_LIMITED_DISCOV
|
6848 MGMT_ADV_FLAG_MANAGED_FLAGS
;
6850 if (is_adv_data
&& (adv_flags
& flags_params
)) {
6851 flags_managed
= true;
6855 if (is_adv_data
&& (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
6856 tx_power_managed
= true;
6863 /* Make sure that the data is correctly formatted. */
6864 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
6867 if (flags_managed
&& data
[i
+ 1] == EIR_FLAGS
)
6870 if (tx_power_managed
&& data
[i
+ 1] == EIR_TX_POWER
)
6873 /* If the current field length would exceed the total data
6874 * length, then it's invalid.
6876 if (i
+ cur_len
>= len
)
6883 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
6886 struct mgmt_pending_cmd
*cmd
;
6887 struct mgmt_rp_add_advertising rp
;
6889 BT_DBG("status %d", status
);
6893 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
6896 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6897 memset(&hdev
->adv_instance
, 0, sizeof(hdev
->adv_instance
));
6898 advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, 1);
6907 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6908 mgmt_status(status
));
6910 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6911 mgmt_status(status
), &rp
, sizeof(rp
));
6913 mgmt_pending_remove(cmd
);
6916 hci_dev_unlock(hdev
);
6919 static void adv_timeout_expired(struct work_struct
*work
)
6921 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
6922 adv_instance
.timeout_exp
.work
);
6924 hdev
->adv_instance
.timeout
= 0;
6927 clear_adv_instance(hdev
);
6928 hci_dev_unlock(hdev
);
6931 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6932 void *data
, u16 data_len
)
6934 struct mgmt_cp_add_advertising
*cp
= data
;
6935 struct mgmt_rp_add_advertising rp
;
6937 u32 supported_flags
;
6941 struct mgmt_pending_cmd
*cmd
;
6942 struct hci_request req
;
6944 BT_DBG("%s", hdev
->name
);
6946 status
= mgmt_le_support(hdev
);
6948 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6951 flags
= __le32_to_cpu(cp
->flags
);
6952 timeout
= __le16_to_cpu(cp
->timeout
);
6954 /* The current implementation only supports adding one instance and only
6955 * a subset of the specified flags.
6957 supported_flags
= get_supported_adv_flags(hdev
);
6958 if (cp
->instance
!= 0x01 || (flags
& ~supported_flags
))
6959 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6960 MGMT_STATUS_INVALID_PARAMS
);
6964 if (timeout
&& !hdev_is_powered(hdev
)) {
6965 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6966 MGMT_STATUS_REJECTED
);
6970 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
6971 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
6972 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6973 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6978 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
6979 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
6980 cp
->scan_rsp_len
, false)) {
6981 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6982 MGMT_STATUS_INVALID_PARAMS
);
6986 INIT_DELAYED_WORK(&hdev
->adv_instance
.timeout_exp
, adv_timeout_expired
);
6988 hdev
->adv_instance
.flags
= flags
;
6989 hdev
->adv_instance
.adv_data_len
= cp
->adv_data_len
;
6990 hdev
->adv_instance
.scan_rsp_len
= cp
->scan_rsp_len
;
6992 if (cp
->adv_data_len
)
6993 memcpy(hdev
->adv_instance
.adv_data
, cp
->data
, cp
->adv_data_len
);
6995 if (cp
->scan_rsp_len
)
6996 memcpy(hdev
->adv_instance
.scan_rsp_data
,
6997 cp
->data
+ cp
->adv_data_len
, cp
->scan_rsp_len
);
6999 if (hdev
->adv_instance
.timeout
)
7000 cancel_delayed_work(&hdev
->adv_instance
.timeout_exp
);
7002 hdev
->adv_instance
.timeout
= timeout
;
7005 queue_delayed_work(hdev
->workqueue
,
7006 &hdev
->adv_instance
.timeout_exp
,
7007 msecs_to_jiffies(timeout
* 1000));
7009 if (!hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
7010 advertising_added(sk
, hdev
, 1);
7012 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7013 * we have no HCI communication to make. Simply return.
7015 if (!hdev_is_powered(hdev
) ||
7016 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
7018 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7019 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7023 /* We're good to go, update advertising data, parameters, and start
7026 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
7033 hci_req_init(&req
, hdev
);
7035 update_adv_data(&req
);
7036 update_scan_rsp_data(&req
);
7037 enable_advertising(&req
);
7039 err
= hci_req_run(&req
, add_advertising_complete
);
7041 mgmt_pending_remove(cmd
);
7044 hci_dev_unlock(hdev
);
7049 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7052 struct mgmt_pending_cmd
*cmd
;
7053 struct mgmt_rp_remove_advertising rp
;
7055 BT_DBG("status %d", status
);
7059 /* A failure status here only means that we failed to disable
7060 * advertising. Otherwise, the advertising instance has been removed,
7061 * so report success.
7063 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
7069 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
7071 mgmt_pending_remove(cmd
);
7074 hci_dev_unlock(hdev
);
7077 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7078 void *data
, u16 data_len
)
7080 struct mgmt_cp_remove_advertising
*cp
= data
;
7081 struct mgmt_rp_remove_advertising rp
;
7083 struct mgmt_pending_cmd
*cmd
;
7084 struct hci_request req
;
7086 BT_DBG("%s", hdev
->name
);
7088 /* The current implementation only allows modifying instance no 1. A
7089 * value of 0 indicates that all instances should be cleared.
7091 if (cp
->instance
> 1)
7092 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7093 MGMT_STATUS_INVALID_PARAMS
);
7097 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7098 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7099 pending_find(MGMT_OP_SET_LE
, hdev
)) {
7100 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7105 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
)) {
7106 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7107 MGMT_STATUS_INVALID_PARAMS
);
7111 if (hdev
->adv_instance
.timeout
)
7112 cancel_delayed_work(&hdev
->adv_instance
.timeout_exp
);
7114 memset(&hdev
->adv_instance
, 0, sizeof(hdev
->adv_instance
));
7116 advertising_removed(sk
, hdev
, 1);
7118 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
7120 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7121 * we have no HCI communication to make. Simply return.
7123 if (!hdev_is_powered(hdev
) ||
7124 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
7126 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7127 MGMT_OP_REMOVE_ADVERTISING
,
7128 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7132 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
7139 hci_req_init(&req
, hdev
);
7140 disable_advertising(&req
);
7142 err
= hci_req_run(&req
, remove_advertising_complete
);
7144 mgmt_pending_remove(cmd
);
7147 hci_dev_unlock(hdev
);
7152 static const struct hci_mgmt_handler mgmt_handlers
[] = {
7153 { NULL
}, /* 0x0000 (no command) */
7154 { read_version
, MGMT_READ_VERSION_SIZE
,
7156 HCI_MGMT_UNTRUSTED
},
7157 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
7159 HCI_MGMT_UNTRUSTED
},
7160 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
7162 HCI_MGMT_UNTRUSTED
},
7163 { read_controller_info
, MGMT_READ_INFO_SIZE
,
7164 HCI_MGMT_UNTRUSTED
},
7165 { set_powered
, MGMT_SETTING_SIZE
},
7166 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
7167 { set_connectable
, MGMT_SETTING_SIZE
},
7168 { set_fast_connectable
, MGMT_SETTING_SIZE
},
7169 { set_bondable
, MGMT_SETTING_SIZE
},
7170 { set_link_security
, MGMT_SETTING_SIZE
},
7171 { set_ssp
, MGMT_SETTING_SIZE
},
7172 { set_hs
, MGMT_SETTING_SIZE
},
7173 { set_le
, MGMT_SETTING_SIZE
},
7174 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
7175 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
7176 { add_uuid
, MGMT_ADD_UUID_SIZE
},
7177 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
7178 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
7180 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
7182 { disconnect
, MGMT_DISCONNECT_SIZE
},
7183 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
7184 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
7185 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
7186 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
7187 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
7188 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
7189 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
7190 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
7191 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
7192 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
7193 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
7194 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
7195 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
7197 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
7198 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
7199 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
7200 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
7201 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
7202 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
7203 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
7204 { set_advertising
, MGMT_SETTING_SIZE
},
7205 { set_bredr
, MGMT_SETTING_SIZE
},
7206 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
7207 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
7208 { set_secure_conn
, MGMT_SETTING_SIZE
},
7209 { set_debug_keys
, MGMT_SETTING_SIZE
},
7210 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
7211 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
7213 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
7214 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
7215 { add_device
, MGMT_ADD_DEVICE_SIZE
},
7216 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
7217 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
7219 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
7221 HCI_MGMT_UNTRUSTED
},
7222 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
7223 HCI_MGMT_UNCONFIGURED
|
7224 HCI_MGMT_UNTRUSTED
},
7225 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
7226 HCI_MGMT_UNCONFIGURED
},
7227 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
7228 HCI_MGMT_UNCONFIGURED
},
7229 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
7231 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
7232 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
7234 HCI_MGMT_UNTRUSTED
},
7235 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
7236 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
7238 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
7241 void mgmt_index_added(struct hci_dev
*hdev
)
7243 struct mgmt_ev_ext_index ev
;
7245 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7248 switch (hdev
->dev_type
) {
7250 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7251 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
7252 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7255 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
7256 HCI_MGMT_INDEX_EVENTS
);
7269 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
7270 HCI_MGMT_EXT_INDEX_EVENTS
);
7273 void mgmt_index_removed(struct hci_dev
*hdev
)
7275 struct mgmt_ev_ext_index ev
;
7276 u8 status
= MGMT_STATUS_INVALID_INDEX
;
7278 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7281 switch (hdev
->dev_type
) {
7283 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7285 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7286 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
7287 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7290 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
7291 HCI_MGMT_INDEX_EVENTS
);
7304 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
7305 HCI_MGMT_EXT_INDEX_EVENTS
);
7308 /* This function requires the caller holds hdev->lock */
7309 static void restart_le_actions(struct hci_request
*req
)
7311 struct hci_dev
*hdev
= req
->hdev
;
7312 struct hci_conn_params
*p
;
7314 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
7315 /* Needed for AUTO_OFF case where might not "really"
7316 * have been powered off.
7318 list_del_init(&p
->action
);
7320 switch (p
->auto_connect
) {
7321 case HCI_AUTO_CONN_DIRECT
:
7322 case HCI_AUTO_CONN_ALWAYS
:
7323 list_add(&p
->action
, &hdev
->pend_le_conns
);
7325 case HCI_AUTO_CONN_REPORT
:
7326 list_add(&p
->action
, &hdev
->pend_le_reports
);
7333 __hci_update_background_scan(req
);
7336 static void powered_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
7338 struct cmd_lookup match
= { NULL
, hdev
};
7340 BT_DBG("status 0x%02x", status
);
7343 /* Register the available SMP channels (BR/EDR and LE) only
7344 * when successfully powering on the controller. This late
7345 * registration is required so that LE SMP can clearly
7346 * decide if the public address or static address is used.
7353 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7355 new_settings(hdev
, match
.sk
);
7357 hci_dev_unlock(hdev
);
7363 static int powered_update_hci(struct hci_dev
*hdev
)
7365 struct hci_request req
;
7368 hci_req_init(&req
, hdev
);
7370 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
7371 !lmp_host_ssp_capable(hdev
)) {
7374 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
7376 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
7379 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
,
7380 sizeof(support
), &support
);
7384 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7385 lmp_bredr_capable(hdev
)) {
7386 struct hci_cp_write_le_host_supported cp
;
7391 /* Check first if we already have the right
7392 * host state (host features set)
7394 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
7395 cp
.simul
!= lmp_host_le_br_capable(hdev
))
7396 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
7400 if (lmp_le_capable(hdev
)) {
7401 /* Make sure the controller has a good default for
7402 * advertising data. This also applies to the case
7403 * where BR/EDR was toggled during the AUTO_OFF phase.
7405 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
7406 update_adv_data(&req
);
7407 update_scan_rsp_data(&req
);
7410 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7411 hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
7412 enable_advertising(&req
);
7414 restart_le_actions(&req
);
7417 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
7418 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
7419 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
7420 sizeof(link_sec
), &link_sec
);
7422 if (lmp_bredr_capable(hdev
)) {
7423 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
7424 write_fast_connectable(&req
, true);
7426 write_fast_connectable(&req
, false);
7427 __hci_update_page_scan(&req
);
7433 return hci_req_run(&req
, powered_complete
);
7436 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
7438 struct cmd_lookup match
= { NULL
, hdev
};
7439 u8 status
, zero_cod
[] = { 0, 0, 0 };
7442 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
7446 if (powered_update_hci(hdev
) == 0)
7449 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
7454 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7456 /* If the power off is because of hdev unregistration let
7457 * use the appropriate INVALID_INDEX status. Otherwise use
7458 * NOT_POWERED. We cover both scenarios here since later in
7459 * mgmt_index_removed() any hci_conn callbacks will have already
7460 * been triggered, potentially causing misleading DISCONNECTED
7463 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
7464 status
= MGMT_STATUS_INVALID_INDEX
;
7466 status
= MGMT_STATUS_NOT_POWERED
;
7468 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7470 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
7471 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
7472 zero_cod
, sizeof(zero_cod
), NULL
);
7475 err
= new_settings(hdev
, match
.sk
);
7483 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
7485 struct mgmt_pending_cmd
*cmd
;
7488 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7492 if (err
== -ERFKILL
)
7493 status
= MGMT_STATUS_RFKILLED
;
7495 status
= MGMT_STATUS_FAILED
;
7497 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
7499 mgmt_pending_remove(cmd
);
7502 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
7504 struct hci_request req
;
7508 /* When discoverable timeout triggers, then just make sure
7509 * the limited discoverable flag is cleared. Even in the case
7510 * of a timeout triggered from general discoverable, it is
7511 * safe to unconditionally clear the flag.
7513 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
7514 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
7516 hci_req_init(&req
, hdev
);
7517 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
7518 u8 scan
= SCAN_PAGE
;
7519 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
7520 sizeof(scan
), &scan
);
7524 /* Advertising instances don't use the global discoverable setting, so
7525 * only update AD if advertising was enabled using Set Advertising.
7527 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7528 update_adv_data(&req
);
7530 hci_req_run(&req
, NULL
);
7532 hdev
->discov_timeout
= 0;
7534 new_settings(hdev
, NULL
);
7536 hci_dev_unlock(hdev
);
7539 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
7542 struct mgmt_ev_new_link_key ev
;
7544 memset(&ev
, 0, sizeof(ev
));
7546 ev
.store_hint
= persistent
;
7547 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7548 ev
.key
.addr
.type
= BDADDR_BREDR
;
7549 ev
.key
.type
= key
->type
;
7550 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
7551 ev
.key
.pin_len
= key
->pin_len
;
7553 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7556 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
7558 switch (ltk
->type
) {
7561 if (ltk
->authenticated
)
7562 return MGMT_LTK_AUTHENTICATED
;
7563 return MGMT_LTK_UNAUTHENTICATED
;
7565 if (ltk
->authenticated
)
7566 return MGMT_LTK_P256_AUTH
;
7567 return MGMT_LTK_P256_UNAUTH
;
7568 case SMP_LTK_P256_DEBUG
:
7569 return MGMT_LTK_P256_DEBUG
;
7572 return MGMT_LTK_UNAUTHENTICATED
;
7575 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
7577 struct mgmt_ev_new_long_term_key ev
;
7579 memset(&ev
, 0, sizeof(ev
));
7581 /* Devices using resolvable or non-resolvable random addresses
7582 * without providing an indentity resolving key don't require
7583 * to store long term keys. Their addresses will change the
7586 * Only when a remote device provides an identity address
7587 * make sure the long term key is stored. If the remote
7588 * identity is known, the long term keys are internally
7589 * mapped to the identity address. So allow static random
7590 * and public addresses here.
7592 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7593 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7594 ev
.store_hint
= 0x00;
7596 ev
.store_hint
= persistent
;
7598 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7599 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
7600 ev
.key
.type
= mgmt_ltk_type(key
);
7601 ev
.key
.enc_size
= key
->enc_size
;
7602 ev
.key
.ediv
= key
->ediv
;
7603 ev
.key
.rand
= key
->rand
;
7605 if (key
->type
== SMP_LTK
)
7608 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
7610 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7613 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
7615 struct mgmt_ev_new_irk ev
;
7617 memset(&ev
, 0, sizeof(ev
));
7619 /* For identity resolving keys from devices that are already
7620 * using a public address or static random address, do not
7621 * ask for storing this key. The identity resolving key really
7622 * is only mandatory for devices using resovlable random
7625 * Storing all identity resolving keys has the downside that
7626 * they will be also loaded on next boot of they system. More
7627 * identity resolving keys, means more time during scanning is
7628 * needed to actually resolve these addresses.
7630 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
7631 ev
.store_hint
= 0x01;
7633 ev
.store_hint
= 0x00;
7635 bacpy(&ev
.rpa
, &irk
->rpa
);
7636 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
7637 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
7638 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
7640 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7643 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
7646 struct mgmt_ev_new_csrk ev
;
7648 memset(&ev
, 0, sizeof(ev
));
7650 /* Devices using resolvable or non-resolvable random addresses
7651 * without providing an indentity resolving key don't require
7652 * to store signature resolving keys. Their addresses will change
7653 * the next time around.
7655 * Only when a remote device provides an identity address
7656 * make sure the signature resolving key is stored. So allow
7657 * static random and public addresses here.
7659 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7660 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7661 ev
.store_hint
= 0x00;
7663 ev
.store_hint
= persistent
;
7665 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
7666 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
7667 ev
.key
.type
= csrk
->type
;
7668 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
7670 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7673 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7674 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
7675 u16 max_interval
, u16 latency
, u16 timeout
)
7677 struct mgmt_ev_new_conn_param ev
;
7679 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
7682 memset(&ev
, 0, sizeof(ev
));
7683 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7684 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
7685 ev
.store_hint
= store_hint
;
7686 ev
.min_interval
= cpu_to_le16(min_interval
);
7687 ev
.max_interval
= cpu_to_le16(max_interval
);
7688 ev
.latency
= cpu_to_le16(latency
);
7689 ev
.timeout
= cpu_to_le16(timeout
);
7691 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
7694 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
7695 u32 flags
, u8
*name
, u8 name_len
)
7698 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
7701 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
7702 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7704 ev
->flags
= __cpu_to_le32(flags
);
7706 /* We must ensure that the EIR Data fields are ordered and
7707 * unique. Keep it simple for now and avoid the problem by not
7708 * adding any BR/EDR data to the LE adv.
7710 if (conn
->le_adv_data_len
> 0) {
7711 memcpy(&ev
->eir
[eir_len
],
7712 conn
->le_adv_data
, conn
->le_adv_data_len
);
7713 eir_len
= conn
->le_adv_data_len
;
7716 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
7719 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
7720 eir_len
= eir_append_data(ev
->eir
, eir_len
,
7722 conn
->dev_class
, 3);
7725 ev
->eir_len
= cpu_to_le16(eir_len
);
7727 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
7728 sizeof(*ev
) + eir_len
, NULL
);
7731 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7733 struct sock
**sk
= data
;
7735 cmd
->cmd_complete(cmd
, 0);
7740 mgmt_pending_remove(cmd
);
7743 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7745 struct hci_dev
*hdev
= data
;
7746 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
7748 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
7750 cmd
->cmd_complete(cmd
, 0);
7751 mgmt_pending_remove(cmd
);
7754 bool mgmt_powering_down(struct hci_dev
*hdev
)
7756 struct mgmt_pending_cmd
*cmd
;
7757 struct mgmt_mode
*cp
;
7759 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7770 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7771 u8 link_type
, u8 addr_type
, u8 reason
,
7772 bool mgmt_connected
)
7774 struct mgmt_ev_device_disconnected ev
;
7775 struct sock
*sk
= NULL
;
7777 /* The connection is still in hci_conn_hash so test for 1
7778 * instead of 0 to know if this is the last one.
7780 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
7781 cancel_delayed_work(&hdev
->power_off
);
7782 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
7785 if (!mgmt_connected
)
7788 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
7791 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
7793 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7794 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7797 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
7802 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
7806 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7807 u8 link_type
, u8 addr_type
, u8 status
)
7809 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
7810 struct mgmt_cp_disconnect
*cp
;
7811 struct mgmt_pending_cmd
*cmd
;
7813 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
7816 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
7822 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
7825 if (cp
->addr
.type
!= bdaddr_type
)
7828 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7829 mgmt_pending_remove(cmd
);
7832 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7833 u8 addr_type
, u8 status
)
7835 struct mgmt_ev_connect_failed ev
;
7837 /* The connection is still in hci_conn_hash so test for 1
7838 * instead of 0 to know if this is the last one.
7840 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
7841 cancel_delayed_work(&hdev
->power_off
);
7842 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
7845 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7846 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7847 ev
.status
= mgmt_status(status
);
7849 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
7852 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
7854 struct mgmt_ev_pin_code_request ev
;
7856 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7857 ev
.addr
.type
= BDADDR_BREDR
;
7860 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
7863 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7866 struct mgmt_pending_cmd
*cmd
;
7868 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
7872 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7873 mgmt_pending_remove(cmd
);
7876 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7879 struct mgmt_pending_cmd
*cmd
;
7881 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
7885 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7886 mgmt_pending_remove(cmd
);
7889 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7890 u8 link_type
, u8 addr_type
, u32 value
,
7893 struct mgmt_ev_user_confirm_request ev
;
7895 BT_DBG("%s", hdev
->name
);
7897 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7898 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7899 ev
.confirm_hint
= confirm_hint
;
7900 ev
.value
= cpu_to_le32(value
);
7902 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
7906 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7907 u8 link_type
, u8 addr_type
)
7909 struct mgmt_ev_user_passkey_request ev
;
7911 BT_DBG("%s", hdev
->name
);
7913 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7914 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7916 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
7920 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7921 u8 link_type
, u8 addr_type
, u8 status
,
7924 struct mgmt_pending_cmd
*cmd
;
7926 cmd
= pending_find(opcode
, hdev
);
7930 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7931 mgmt_pending_remove(cmd
);
7936 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7937 u8 link_type
, u8 addr_type
, u8 status
)
7939 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7940 status
, MGMT_OP_USER_CONFIRM_REPLY
);
7943 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7944 u8 link_type
, u8 addr_type
, u8 status
)
7946 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7948 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
7951 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7952 u8 link_type
, u8 addr_type
, u8 status
)
7954 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7955 status
, MGMT_OP_USER_PASSKEY_REPLY
);
7958 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7959 u8 link_type
, u8 addr_type
, u8 status
)
7961 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7963 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
7966 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7967 u8 link_type
, u8 addr_type
, u32 passkey
,
7970 struct mgmt_ev_passkey_notify ev
;
7972 BT_DBG("%s", hdev
->name
);
7974 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7975 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7976 ev
.passkey
= __cpu_to_le32(passkey
);
7977 ev
.entered
= entered
;
7979 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
7982 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
7984 struct mgmt_ev_auth_failed ev
;
7985 struct mgmt_pending_cmd
*cmd
;
7986 u8 status
= mgmt_status(hci_status
);
7988 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
7989 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7992 cmd
= find_pairing(conn
);
7994 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
7995 cmd
? cmd
->sk
: NULL
);
7998 cmd
->cmd_complete(cmd
, status
);
7999 mgmt_pending_remove(cmd
);
8003 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
8005 struct cmd_lookup match
= { NULL
, hdev
};
8009 u8 mgmt_err
= mgmt_status(status
);
8010 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
8011 cmd_status_rsp
, &mgmt_err
);
8015 if (test_bit(HCI_AUTH
, &hdev
->flags
))
8016 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
8018 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
8020 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
8024 new_settings(hdev
, match
.sk
);
8030 static void clear_eir(struct hci_request
*req
)
8032 struct hci_dev
*hdev
= req
->hdev
;
8033 struct hci_cp_write_eir cp
;
8035 if (!lmp_ext_inq_capable(hdev
))
8038 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
8040 memset(&cp
, 0, sizeof(cp
));
8042 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
8045 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
8047 struct cmd_lookup match
= { NULL
, hdev
};
8048 struct hci_request req
;
8049 bool changed
= false;
8052 u8 mgmt_err
= mgmt_status(status
);
8054 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
8056 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8057 new_settings(hdev
, NULL
);
8060 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
8066 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
8068 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
8070 changed
= hci_dev_test_and_clear_flag(hdev
,
8073 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8076 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
8079 new_settings(hdev
, match
.sk
);
8084 hci_req_init(&req
, hdev
);
8086 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
8087 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
8088 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
8089 sizeof(enable
), &enable
);
8095 hci_req_run(&req
, NULL
);
8098 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
8100 struct cmd_lookup
*match
= data
;
8102 if (match
->sk
== NULL
) {
8103 match
->sk
= cmd
->sk
;
8104 sock_hold(match
->sk
);
8108 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
8111 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
8113 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
8114 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
8115 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
8118 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
8119 dev_class
, 3, NULL
);
8125 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
8127 struct mgmt_cp_set_local_name ev
;
8128 struct mgmt_pending_cmd
*cmd
;
8133 memset(&ev
, 0, sizeof(ev
));
8134 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
8135 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
8137 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
8139 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
8141 /* If this is a HCI command related to powering on the
8142 * HCI dev don't send any mgmt signals.
8144 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
8148 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
8149 cmd
? cmd
->sk
: NULL
);
8152 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
8156 for (i
= 0; i
< uuid_count
; i
++) {
8157 if (!memcmp(uuid
, uuids
[i
], 16))
8164 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
8168 while (parsed
< eir_len
) {
8169 u8 field_len
= eir
[0];
8176 if (eir_len
- parsed
< field_len
+ 1)
8180 case EIR_UUID16_ALL
:
8181 case EIR_UUID16_SOME
:
8182 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
8183 memcpy(uuid
, bluetooth_base_uuid
, 16);
8184 uuid
[13] = eir
[i
+ 3];
8185 uuid
[12] = eir
[i
+ 2];
8186 if (has_uuid(uuid
, uuid_count
, uuids
))
8190 case EIR_UUID32_ALL
:
8191 case EIR_UUID32_SOME
:
8192 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
8193 memcpy(uuid
, bluetooth_base_uuid
, 16);
8194 uuid
[15] = eir
[i
+ 5];
8195 uuid
[14] = eir
[i
+ 4];
8196 uuid
[13] = eir
[i
+ 3];
8197 uuid
[12] = eir
[i
+ 2];
8198 if (has_uuid(uuid
, uuid_count
, uuids
))
8202 case EIR_UUID128_ALL
:
8203 case EIR_UUID128_SOME
:
8204 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
8205 memcpy(uuid
, eir
+ i
+ 2, 16);
8206 if (has_uuid(uuid
, uuid_count
, uuids
))
8212 parsed
+= field_len
+ 1;
8213 eir
+= field_len
+ 1;
8219 static void restart_le_scan(struct hci_dev
*hdev
)
8221 /* If controller is not scanning we are done. */
8222 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
8225 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
8226 hdev
->discovery
.scan_start
+
8227 hdev
->discovery
.scan_duration
))
8230 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_restart
,
8231 DISCOV_LE_RESTART_DELAY
);
8234 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
8235 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8237 /* If a RSSI threshold has been specified, and
8238 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8239 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8240 * is set, let it through for further processing, as we might need to
8243 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8244 * the results are also dropped.
8246 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8247 (rssi
== HCI_RSSI_INVALID
||
8248 (rssi
< hdev
->discovery
.rssi
&&
8249 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
8252 if (hdev
->discovery
.uuid_count
!= 0) {
8253 /* If a list of UUIDs is provided in filter, results with no
8254 * matching UUID should be dropped.
8256 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
8257 hdev
->discovery
.uuids
) &&
8258 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
8259 hdev
->discovery
.uuid_count
,
8260 hdev
->discovery
.uuids
))
8264 /* If duplicate filtering does not report RSSI changes, then restart
8265 * scanning to ensure updated result with updated RSSI values.
8267 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
8268 restart_le_scan(hdev
);
8270 /* Validate RSSI value against the RSSI threshold once more. */
8271 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8272 rssi
< hdev
->discovery
.rssi
)
8279 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8280 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
8281 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8284 struct mgmt_ev_device_found
*ev
= (void *)buf
;
8287 /* Don't send events for a non-kernel initiated discovery. With
8288 * LE one exception is if we have pend_le_reports > 0 in which
8289 * case we're doing passive scanning and want these events.
8291 if (!hci_discovery_active(hdev
)) {
8292 if (link_type
== ACL_LINK
)
8294 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
8298 if (hdev
->discovery
.result_filtering
) {
8299 /* We are using service discovery */
8300 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
8305 /* Make sure that the buffer is big enough. The 5 extra bytes
8306 * are for the potential CoD field.
8308 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
8311 memset(buf
, 0, sizeof(buf
));
8313 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8314 * RSSI value was reported as 0 when not available. This behavior
8315 * is kept when using device discovery. This is required for full
8316 * backwards compatibility with the API.
8318 * However when using service discovery, the value 127 will be
8319 * returned when the RSSI is not available.
8321 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
8322 link_type
== ACL_LINK
)
8325 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8326 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8328 ev
->flags
= cpu_to_le32(flags
);
8331 /* Copy EIR or advertising data into event */
8332 memcpy(ev
->eir
, eir
, eir_len
);
8334 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
8335 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
8338 if (scan_rsp_len
> 0)
8339 /* Append scan response data to event */
8340 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
8342 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
8343 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
8345 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
8348 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8349 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
8351 struct mgmt_ev_device_found
*ev
;
8352 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
8355 ev
= (struct mgmt_ev_device_found
*) buf
;
8357 memset(buf
, 0, sizeof(buf
));
8359 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8360 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8363 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
8366 ev
->eir_len
= cpu_to_le16(eir_len
);
8368 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
8371 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
8373 struct mgmt_ev_discovering ev
;
8375 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
8377 memset(&ev
, 0, sizeof(ev
));
8378 ev
.type
= hdev
->discovery
.type
;
8379 ev
.discovering
= discovering
;
8381 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
8384 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
8386 BT_DBG("%s status %u", hdev
->name
, status
);
8389 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
8391 struct hci_request req
;
8393 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
8394 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
8397 hci_req_init(&req
, hdev
);
8398 enable_advertising(&req
);
8399 hci_req_run(&req
, adv_enable_complete
);
8402 static struct hci_mgmt_chan chan
= {
8403 .channel
= HCI_CHANNEL_CONTROL
,
8404 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
8405 .handlers
= mgmt_handlers
,
8406 .hdev_init
= mgmt_init_hdev
,
8411 return hci_mgmt_chan_register(&chan
);
8414 void mgmt_exit(void)
8416 hci_mgmt_chan_unregister(&chan
);