2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
46 static const u16 mgmt_commands
[] = {
47 MGMT_OP_READ_INDEX_LIST
,
50 MGMT_OP_SET_DISCOVERABLE
,
51 MGMT_OP_SET_CONNECTABLE
,
52 MGMT_OP_SET_FAST_CONNECTABLE
,
54 MGMT_OP_SET_LINK_SECURITY
,
58 MGMT_OP_SET_DEV_CLASS
,
59 MGMT_OP_SET_LOCAL_NAME
,
62 MGMT_OP_LOAD_LINK_KEYS
,
63 MGMT_OP_LOAD_LONG_TERM_KEYS
,
65 MGMT_OP_GET_CONNECTIONS
,
66 MGMT_OP_PIN_CODE_REPLY
,
67 MGMT_OP_PIN_CODE_NEG_REPLY
,
68 MGMT_OP_SET_IO_CAPABILITY
,
70 MGMT_OP_CANCEL_PAIR_DEVICE
,
71 MGMT_OP_UNPAIR_DEVICE
,
72 MGMT_OP_USER_CONFIRM_REPLY
,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
74 MGMT_OP_USER_PASSKEY_REPLY
,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
76 MGMT_OP_READ_LOCAL_OOB_DATA
,
77 MGMT_OP_ADD_REMOTE_OOB_DATA
,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
79 MGMT_OP_START_DISCOVERY
,
80 MGMT_OP_STOP_DISCOVERY
,
83 MGMT_OP_UNBLOCK_DEVICE
,
84 MGMT_OP_SET_DEVICE_ID
,
85 MGMT_OP_SET_ADVERTISING
,
87 MGMT_OP_SET_STATIC_ADDRESS
,
88 MGMT_OP_SET_SCAN_PARAMS
,
89 MGMT_OP_SET_SECURE_CONN
,
90 MGMT_OP_SET_DEBUG_KEYS
,
93 MGMT_OP_GET_CONN_INFO
,
94 MGMT_OP_GET_CLOCK_INFO
,
96 MGMT_OP_REMOVE_DEVICE
,
97 MGMT_OP_LOAD_CONN_PARAM
,
98 MGMT_OP_READ_UNCONF_INDEX_LIST
,
99 MGMT_OP_READ_CONFIG_INFO
,
100 MGMT_OP_SET_EXTERNAL_CONFIG
,
101 MGMT_OP_SET_PUBLIC_ADDRESS
,
102 MGMT_OP_START_SERVICE_DISCOVERY
,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
104 MGMT_OP_READ_EXT_INDEX_LIST
,
105 MGMT_OP_READ_ADV_FEATURES
,
106 MGMT_OP_ADD_ADVERTISING
,
107 MGMT_OP_REMOVE_ADVERTISING
,
108 MGMT_OP_GET_ADV_SIZE_INFO
,
109 MGMT_OP_START_LIMITED_DISCOVERY
,
110 MGMT_OP_READ_EXT_INFO
,
111 MGMT_OP_SET_APPEARANCE
,
112 MGMT_OP_GET_PHY_CONFIGURATION
,
113 MGMT_OP_SET_PHY_CONFIGURATION
,
114 MGMT_OP_SET_BLOCKED_KEYS
,
115 MGMT_OP_SET_WIDEBAND_SPEECH
,
116 MGMT_OP_READ_CONTROLLER_CAP
,
117 MGMT_OP_READ_EXP_FEATURES_INFO
,
118 MGMT_OP_SET_EXP_FEATURE
,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG
,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG
,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG
,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG
,
123 MGMT_OP_GET_DEVICE_FLAGS
,
124 MGMT_OP_SET_DEVICE_FLAGS
,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES
,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
127 MGMT_OP_REMOVE_ADV_MONITOR
,
128 MGMT_OP_ADD_EXT_ADV_PARAMS
,
129 MGMT_OP_ADD_EXT_ADV_DATA
,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI
,
131 MGMT_OP_SET_MESH_RECEIVER
,
132 MGMT_OP_MESH_READ_FEATURES
,
134 MGMT_OP_MESH_SEND_CANCEL
,
135 MGMT_OP_HCI_CMD_SYNC
,
138 static const u16 mgmt_events
[] = {
139 MGMT_EV_CONTROLLER_ERROR
,
141 MGMT_EV_INDEX_REMOVED
,
142 MGMT_EV_NEW_SETTINGS
,
143 MGMT_EV_CLASS_OF_DEV_CHANGED
,
144 MGMT_EV_LOCAL_NAME_CHANGED
,
145 MGMT_EV_NEW_LINK_KEY
,
146 MGMT_EV_NEW_LONG_TERM_KEY
,
147 MGMT_EV_DEVICE_CONNECTED
,
148 MGMT_EV_DEVICE_DISCONNECTED
,
149 MGMT_EV_CONNECT_FAILED
,
150 MGMT_EV_PIN_CODE_REQUEST
,
151 MGMT_EV_USER_CONFIRM_REQUEST
,
152 MGMT_EV_USER_PASSKEY_REQUEST
,
154 MGMT_EV_DEVICE_FOUND
,
156 MGMT_EV_DEVICE_BLOCKED
,
157 MGMT_EV_DEVICE_UNBLOCKED
,
158 MGMT_EV_DEVICE_UNPAIRED
,
159 MGMT_EV_PASSKEY_NOTIFY
,
162 MGMT_EV_DEVICE_ADDED
,
163 MGMT_EV_DEVICE_REMOVED
,
164 MGMT_EV_NEW_CONN_PARAM
,
165 MGMT_EV_UNCONF_INDEX_ADDED
,
166 MGMT_EV_UNCONF_INDEX_REMOVED
,
167 MGMT_EV_NEW_CONFIG_OPTIONS
,
168 MGMT_EV_EXT_INDEX_ADDED
,
169 MGMT_EV_EXT_INDEX_REMOVED
,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
171 MGMT_EV_ADVERTISING_ADDED
,
172 MGMT_EV_ADVERTISING_REMOVED
,
173 MGMT_EV_EXT_INFO_CHANGED
,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED
,
175 MGMT_EV_EXP_FEATURE_CHANGED
,
176 MGMT_EV_DEVICE_FLAGS_CHANGED
,
177 MGMT_EV_ADV_MONITOR_ADDED
,
178 MGMT_EV_ADV_MONITOR_REMOVED
,
179 MGMT_EV_CONTROLLER_SUSPEND
,
180 MGMT_EV_CONTROLLER_RESUME
,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND
,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST
,
185 static const u16 mgmt_untrusted_commands
[] = {
186 MGMT_OP_READ_INDEX_LIST
,
188 MGMT_OP_READ_UNCONF_INDEX_LIST
,
189 MGMT_OP_READ_CONFIG_INFO
,
190 MGMT_OP_READ_EXT_INDEX_LIST
,
191 MGMT_OP_READ_EXT_INFO
,
192 MGMT_OP_READ_CONTROLLER_CAP
,
193 MGMT_OP_READ_EXP_FEATURES_INFO
,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG
,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG
,
198 static const u16 mgmt_untrusted_events
[] = {
200 MGMT_EV_INDEX_REMOVED
,
201 MGMT_EV_NEW_SETTINGS
,
202 MGMT_EV_CLASS_OF_DEV_CHANGED
,
203 MGMT_EV_LOCAL_NAME_CHANGED
,
204 MGMT_EV_UNCONF_INDEX_ADDED
,
205 MGMT_EV_UNCONF_INDEX_REMOVED
,
206 MGMT_EV_NEW_CONFIG_OPTIONS
,
207 MGMT_EV_EXT_INDEX_ADDED
,
208 MGMT_EV_EXT_INDEX_REMOVED
,
209 MGMT_EV_EXT_INFO_CHANGED
,
210 MGMT_EV_EXP_FEATURE_CHANGED
,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table
[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
223 MGMT_STATUS_FAILED
, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
228 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY
, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED
, /* Rejected Security */
235 MGMT_STATUS_REJECTED
, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
243 MGMT_STATUS_BUSY
, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED
, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED
, /* Transaction Collision */
263 MGMT_STATUS_FAILED
, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED
, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED
, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED
, /* Reserved for future use */
270 MGMT_STATUS_BUSY
, /* Role Switch Pending */
271 MGMT_STATUS_FAILED
, /* Reserved for future use */
272 MGMT_STATUS_FAILED
, /* Slot Violation */
273 MGMT_STATUS_FAILED
, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY
, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
286 static u8
mgmt_errno_status(int err
)
290 return MGMT_STATUS_SUCCESS
;
292 return MGMT_STATUS_REJECTED
;
294 return MGMT_STATUS_INVALID_PARAMS
;
296 return MGMT_STATUS_NOT_SUPPORTED
;
298 return MGMT_STATUS_BUSY
;
300 return MGMT_STATUS_AUTH_FAILED
;
302 return MGMT_STATUS_NO_RESOURCES
;
304 return MGMT_STATUS_ALREADY_CONNECTED
;
306 return MGMT_STATUS_DISCONNECTED
;
309 return MGMT_STATUS_FAILED
;
312 static u8
mgmt_status(int err
)
315 return mgmt_errno_status(err
);
317 if (err
< ARRAY_SIZE(mgmt_status_table
))
318 return mgmt_status_table
[err
];
320 return MGMT_STATUS_FAILED
;
323 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
326 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
330 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
331 u16 len
, int flag
, struct sock
*skip_sk
)
333 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
337 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
338 struct sock
*skip_sk
)
340 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
341 HCI_SOCK_TRUSTED
, skip_sk
);
344 static int mgmt_event_skb(struct sk_buff
*skb
, struct sock
*skip_sk
)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL
, skb
, HCI_SOCK_TRUSTED
,
350 static u8
le_addr_type(u8 mgmt_addr_type
)
352 if (mgmt_addr_type
== BDADDR_LE_PUBLIC
)
353 return ADDR_LE_DEV_PUBLIC
;
355 return ADDR_LE_DEV_RANDOM
;
358 void mgmt_fill_version_info(void *ver
)
360 struct mgmt_rp_read_version
*rp
= ver
;
362 rp
->version
= MGMT_VERSION
;
363 rp
->revision
= cpu_to_le16(MGMT_REVISION
);
366 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
369 struct mgmt_rp_read_version rp
;
371 bt_dev_dbg(hdev
, "sock %p", sk
);
373 mgmt_fill_version_info(&rp
);
375 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
379 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
382 struct mgmt_rp_read_commands
*rp
;
383 u16 num_commands
, num_events
;
387 bt_dev_dbg(hdev
, "sock %p", sk
);
389 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
390 num_commands
= ARRAY_SIZE(mgmt_commands
);
391 num_events
= ARRAY_SIZE(mgmt_events
);
393 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
394 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
397 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
399 rp
= kmalloc(rp_size
, GFP_KERNEL
);
403 rp
->num_commands
= cpu_to_le16(num_commands
);
404 rp
->num_events
= cpu_to_le16(num_events
);
406 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
407 __le16
*opcode
= rp
->opcodes
;
409 for (i
= 0; i
< num_commands
; i
++, opcode
++)
410 put_unaligned_le16(mgmt_commands
[i
], opcode
);
412 for (i
= 0; i
< num_events
; i
++, opcode
++)
413 put_unaligned_le16(mgmt_events
[i
], opcode
);
415 __le16
*opcode
= rp
->opcodes
;
417 for (i
= 0; i
< num_commands
; i
++, opcode
++)
418 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
420 for (i
= 0; i
< num_events
; i
++, opcode
++)
421 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
424 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
431 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
434 struct mgmt_rp_read_index_list
*rp
;
440 bt_dev_dbg(hdev
, "sock %p", sk
);
442 read_lock(&hci_dev_list_lock
);
445 list_for_each_entry(d
, &hci_dev_list
, list
) {
446 if (!hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
450 rp_len
= sizeof(*rp
) + (2 * count
);
451 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
453 read_unlock(&hci_dev_list_lock
);
458 list_for_each_entry(d
, &hci_dev_list
, list
) {
459 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
460 hci_dev_test_flag(d
, HCI_CONFIG
) ||
461 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
467 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
470 if (!hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
471 rp
->index
[count
++] = cpu_to_le16(d
->id
);
472 bt_dev_dbg(hdev
, "Added hci%u", d
->id
);
476 rp
->num_controllers
= cpu_to_le16(count
);
477 rp_len
= sizeof(*rp
) + (2 * count
);
479 read_unlock(&hci_dev_list_lock
);
481 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
489 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
490 void *data
, u16 data_len
)
492 struct mgmt_rp_read_unconf_index_list
*rp
;
498 bt_dev_dbg(hdev
, "sock %p", sk
);
500 read_lock(&hci_dev_list_lock
);
503 list_for_each_entry(d
, &hci_dev_list
, list
) {
504 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
508 rp_len
= sizeof(*rp
) + (2 * count
);
509 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
511 read_unlock(&hci_dev_list_lock
);
516 list_for_each_entry(d
, &hci_dev_list
, list
) {
517 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
518 hci_dev_test_flag(d
, HCI_CONFIG
) ||
519 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
525 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
528 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
529 rp
->index
[count
++] = cpu_to_le16(d
->id
);
530 bt_dev_dbg(hdev
, "Added hci%u", d
->id
);
534 rp
->num_controllers
= cpu_to_le16(count
);
535 rp_len
= sizeof(*rp
) + (2 * count
);
537 read_unlock(&hci_dev_list_lock
);
539 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
540 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
547 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
548 void *data
, u16 data_len
)
550 struct mgmt_rp_read_ext_index_list
*rp
;
555 bt_dev_dbg(hdev
, "sock %p", sk
);
557 read_lock(&hci_dev_list_lock
);
560 list_for_each_entry(d
, &hci_dev_list
, list
)
563 rp
= kmalloc(struct_size(rp
, entry
, count
), GFP_ATOMIC
);
565 read_unlock(&hci_dev_list_lock
);
570 list_for_each_entry(d
, &hci_dev_list
, list
) {
571 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
572 hci_dev_test_flag(d
, HCI_CONFIG
) ||
573 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
579 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
582 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
583 rp
->entry
[count
].type
= 0x01;
585 rp
->entry
[count
].type
= 0x00;
587 rp
->entry
[count
].bus
= d
->bus
;
588 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
589 bt_dev_dbg(hdev
, "Added hci%u", d
->id
);
592 rp
->num_controllers
= cpu_to_le16(count
);
594 read_unlock(&hci_dev_list_lock
);
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
600 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
601 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
602 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
604 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
605 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
,
606 struct_size(rp
, entry
, count
));
613 static bool is_configured(struct hci_dev
*hdev
)
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
616 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY
, &hdev
->quirks
)) &&
621 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
627 static __le32
get_missing_options(struct hci_dev
*hdev
)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
632 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
633 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY
, &hdev
->quirks
)) &&
637 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
638 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
640 return cpu_to_le32(options
);
643 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
645 __le32 options
= get_missing_options(hdev
);
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
648 sizeof(options
), HCI_MGMT_OPTION_EVENTS
, skip
);
651 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
653 __le32 options
= get_missing_options(hdev
);
655 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
659 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
660 void *data
, u16 data_len
)
662 struct mgmt_rp_read_config_info rp
;
665 bt_dev_dbg(hdev
, "sock %p", sk
);
669 memset(&rp
, 0, sizeof(rp
));
670 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
673 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
675 if (hdev
->set_bdaddr
)
676 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
678 rp
.supported_options
= cpu_to_le32(options
);
679 rp
.missing_options
= get_missing_options(hdev
);
681 hci_dev_unlock(hdev
);
683 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
687 static u32
get_supported_phys(struct hci_dev
*hdev
)
689 u32 supported_phys
= 0;
691 if (lmp_bredr_capable(hdev
)) {
692 supported_phys
|= MGMT_PHY_BR_1M_1SLOT
;
694 if (hdev
->features
[0][0] & LMP_3SLOT
)
695 supported_phys
|= MGMT_PHY_BR_1M_3SLOT
;
697 if (hdev
->features
[0][0] & LMP_5SLOT
)
698 supported_phys
|= MGMT_PHY_BR_1M_5SLOT
;
700 if (lmp_edr_2m_capable(hdev
)) {
701 supported_phys
|= MGMT_PHY_EDR_2M_1SLOT
;
703 if (lmp_edr_3slot_capable(hdev
))
704 supported_phys
|= MGMT_PHY_EDR_2M_3SLOT
;
706 if (lmp_edr_5slot_capable(hdev
))
707 supported_phys
|= MGMT_PHY_EDR_2M_5SLOT
;
709 if (lmp_edr_3m_capable(hdev
)) {
710 supported_phys
|= MGMT_PHY_EDR_3M_1SLOT
;
712 if (lmp_edr_3slot_capable(hdev
))
713 supported_phys
|= MGMT_PHY_EDR_3M_3SLOT
;
715 if (lmp_edr_5slot_capable(hdev
))
716 supported_phys
|= MGMT_PHY_EDR_3M_5SLOT
;
721 if (lmp_le_capable(hdev
)) {
722 supported_phys
|= MGMT_PHY_LE_1M_TX
;
723 supported_phys
|= MGMT_PHY_LE_1M_RX
;
725 if (hdev
->le_features
[1] & HCI_LE_PHY_2M
) {
726 supported_phys
|= MGMT_PHY_LE_2M_TX
;
727 supported_phys
|= MGMT_PHY_LE_2M_RX
;
730 if (hdev
->le_features
[1] & HCI_LE_PHY_CODED
) {
731 supported_phys
|= MGMT_PHY_LE_CODED_TX
;
732 supported_phys
|= MGMT_PHY_LE_CODED_RX
;
736 return supported_phys
;
739 static u32
get_selected_phys(struct hci_dev
*hdev
)
741 u32 selected_phys
= 0;
743 if (lmp_bredr_capable(hdev
)) {
744 selected_phys
|= MGMT_PHY_BR_1M_1SLOT
;
746 if (hdev
->pkt_type
& (HCI_DM3
| HCI_DH3
))
747 selected_phys
|= MGMT_PHY_BR_1M_3SLOT
;
749 if (hdev
->pkt_type
& (HCI_DM5
| HCI_DH5
))
750 selected_phys
|= MGMT_PHY_BR_1M_5SLOT
;
752 if (lmp_edr_2m_capable(hdev
)) {
753 if (!(hdev
->pkt_type
& HCI_2DH1
))
754 selected_phys
|= MGMT_PHY_EDR_2M_1SLOT
;
756 if (lmp_edr_3slot_capable(hdev
) &&
757 !(hdev
->pkt_type
& HCI_2DH3
))
758 selected_phys
|= MGMT_PHY_EDR_2M_3SLOT
;
760 if (lmp_edr_5slot_capable(hdev
) &&
761 !(hdev
->pkt_type
& HCI_2DH5
))
762 selected_phys
|= MGMT_PHY_EDR_2M_5SLOT
;
764 if (lmp_edr_3m_capable(hdev
)) {
765 if (!(hdev
->pkt_type
& HCI_3DH1
))
766 selected_phys
|= MGMT_PHY_EDR_3M_1SLOT
;
768 if (lmp_edr_3slot_capable(hdev
) &&
769 !(hdev
->pkt_type
& HCI_3DH3
))
770 selected_phys
|= MGMT_PHY_EDR_3M_3SLOT
;
772 if (lmp_edr_5slot_capable(hdev
) &&
773 !(hdev
->pkt_type
& HCI_3DH5
))
774 selected_phys
|= MGMT_PHY_EDR_3M_5SLOT
;
779 if (lmp_le_capable(hdev
)) {
780 if (hdev
->le_tx_def_phys
& HCI_LE_SET_PHY_1M
)
781 selected_phys
|= MGMT_PHY_LE_1M_TX
;
783 if (hdev
->le_rx_def_phys
& HCI_LE_SET_PHY_1M
)
784 selected_phys
|= MGMT_PHY_LE_1M_RX
;
786 if (hdev
->le_tx_def_phys
& HCI_LE_SET_PHY_2M
)
787 selected_phys
|= MGMT_PHY_LE_2M_TX
;
789 if (hdev
->le_rx_def_phys
& HCI_LE_SET_PHY_2M
)
790 selected_phys
|= MGMT_PHY_LE_2M_RX
;
792 if (hdev
->le_tx_def_phys
& HCI_LE_SET_PHY_CODED
)
793 selected_phys
|= MGMT_PHY_LE_CODED_TX
;
795 if (hdev
->le_rx_def_phys
& HCI_LE_SET_PHY_CODED
)
796 selected_phys
|= MGMT_PHY_LE_CODED_RX
;
799 return selected_phys
;
802 static u32
get_configurable_phys(struct hci_dev
*hdev
)
804 return (get_supported_phys(hdev
) & ~MGMT_PHY_BR_1M_1SLOT
&
805 ~MGMT_PHY_LE_1M_TX
& ~MGMT_PHY_LE_1M_RX
);
808 static u32
get_supported_settings(struct hci_dev
*hdev
)
812 settings
|= MGMT_SETTING_POWERED
;
813 settings
|= MGMT_SETTING_BONDABLE
;
814 settings
|= MGMT_SETTING_DEBUG_KEYS
;
815 settings
|= MGMT_SETTING_CONNECTABLE
;
816 settings
|= MGMT_SETTING_DISCOVERABLE
;
818 if (lmp_bredr_capable(hdev
)) {
819 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
820 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
821 settings
|= MGMT_SETTING_BREDR
;
822 settings
|= MGMT_SETTING_LINK_SECURITY
;
824 if (lmp_ssp_capable(hdev
)) {
825 settings
|= MGMT_SETTING_SSP
;
828 if (lmp_sc_capable(hdev
))
829 settings
|= MGMT_SETTING_SECURE_CONN
;
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED
,
833 settings
|= MGMT_SETTING_WIDEBAND_SPEECH
;
836 if (lmp_le_capable(hdev
)) {
837 settings
|= MGMT_SETTING_LE
;
838 settings
|= MGMT_SETTING_SECURE_CONN
;
839 settings
|= MGMT_SETTING_PRIVACY
;
840 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
841 settings
|= MGMT_SETTING_ADVERTISING
;
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
846 settings
|= MGMT_SETTING_CONFIGURATION
;
848 if (cis_central_capable(hdev
))
849 settings
|= MGMT_SETTING_CIS_CENTRAL
;
851 if (cis_peripheral_capable(hdev
))
852 settings
|= MGMT_SETTING_CIS_PERIPHERAL
;
854 settings
|= MGMT_SETTING_PHY_CONFIGURATION
;
859 static u32
get_current_settings(struct hci_dev
*hdev
)
863 if (hdev_is_powered(hdev
))
864 settings
|= MGMT_SETTING_POWERED
;
866 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
867 settings
|= MGMT_SETTING_CONNECTABLE
;
869 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
870 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
872 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
873 settings
|= MGMT_SETTING_DISCOVERABLE
;
875 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
876 settings
|= MGMT_SETTING_BONDABLE
;
878 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
879 settings
|= MGMT_SETTING_BREDR
;
881 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
882 settings
|= MGMT_SETTING_LE
;
884 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
885 settings
|= MGMT_SETTING_LINK_SECURITY
;
887 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
888 settings
|= MGMT_SETTING_SSP
;
890 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
891 settings
|= MGMT_SETTING_ADVERTISING
;
893 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
894 settings
|= MGMT_SETTING_SECURE_CONN
;
896 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
897 settings
|= MGMT_SETTING_DEBUG_KEYS
;
899 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
900 settings
|= MGMT_SETTING_PRIVACY
;
902 /* The current setting for static address has two purposes. The
903 * first is to indicate if the static address will be used and
904 * the second is to indicate if it is actually set.
906 * This means if the static address is not configured, this flag
907 * will never be set. If the address is configured, then if the
908 * address is actually used decides if the flag is set or not.
910 * For single mode LE only controllers and dual-mode controllers
911 * with BR/EDR disabled, the existence of the static address will
914 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
915 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
916 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
917 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
918 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
921 if (hci_dev_test_flag(hdev
, HCI_WIDEBAND_SPEECH_ENABLED
))
922 settings
|= MGMT_SETTING_WIDEBAND_SPEECH
;
924 if (cis_central_capable(hdev
))
925 settings
|= MGMT_SETTING_CIS_CENTRAL
;
927 if (cis_peripheral_capable(hdev
))
928 settings
|= MGMT_SETTING_CIS_PERIPHERAL
;
930 if (bis_capable(hdev
))
931 settings
|= MGMT_SETTING_ISO_BROADCASTER
;
933 if (sync_recv_capable(hdev
))
934 settings
|= MGMT_SETTING_ISO_SYNC_RECEIVER
;
939 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
941 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
944 u8
mgmt_get_adv_discov_flags(struct hci_dev
*hdev
)
946 struct mgmt_pending_cmd
*cmd
;
948 /* If there's a pending mgmt command the flags will not yet have
949 * their final values, so check for this first.
951 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
953 struct mgmt_mode
*cp
= cmd
->param
;
955 return LE_AD_GENERAL
;
956 else if (cp
->val
== 0x02)
957 return LE_AD_LIMITED
;
959 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
960 return LE_AD_LIMITED
;
961 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
962 return LE_AD_GENERAL
;
968 bool mgmt_get_connectable(struct hci_dev
*hdev
)
970 struct mgmt_pending_cmd
*cmd
;
972 /* If there's a pending mgmt command the flag will not yet have
973 * it's final value, so check for this first.
975 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
977 struct mgmt_mode
*cp
= cmd
->param
;
982 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
985 static int service_cache_sync(struct hci_dev
*hdev
, void *data
)
987 hci_update_eir_sync(hdev
);
988 hci_update_class_sync(hdev
);
993 static void service_cache_off(struct work_struct
*work
)
995 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
998 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1001 hci_cmd_sync_queue(hdev
, service_cache_sync
, NULL
, NULL
);
1004 static int rpa_expired_sync(struct hci_dev
*hdev
, void *data
)
1006 /* The generation of a new RPA and programming it into the
1007 * controller happens in the hci_req_enable_advertising()
1010 if (ext_adv_capable(hdev
))
1011 return hci_start_ext_adv_sync(hdev
, hdev
->cur_adv_instance
);
1013 return hci_enable_advertising_sync(hdev
);
1016 static void rpa_expired(struct work_struct
*work
)
1018 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1021 bt_dev_dbg(hdev
, "");
1023 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1025 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1028 hci_cmd_sync_queue(hdev
, rpa_expired_sync
, NULL
, NULL
);
1031 static int set_discoverable_sync(struct hci_dev
*hdev
, void *data
);
1033 static void discov_off(struct work_struct
*work
)
1035 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1038 bt_dev_dbg(hdev
, "");
1042 /* When discoverable timeout triggers, then just make sure
1043 * the limited discoverable flag is cleared. Even in the case
1044 * of a timeout triggered from general discoverable, it is
1045 * safe to unconditionally clear the flag.
1047 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1048 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1049 hdev
->discov_timeout
= 0;
1051 hci_cmd_sync_queue(hdev
, set_discoverable_sync
, NULL
, NULL
);
1053 mgmt_new_settings(hdev
);
1055 hci_dev_unlock(hdev
);
1058 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
);
1060 static void mesh_send_complete(struct hci_dev
*hdev
,
1061 struct mgmt_mesh_tx
*mesh_tx
, bool silent
)
1063 u8 handle
= mesh_tx
->handle
;
1066 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT
, hdev
, &handle
,
1067 sizeof(handle
), NULL
);
1069 mgmt_mesh_remove(mesh_tx
);
1072 static int mesh_send_done_sync(struct hci_dev
*hdev
, void *data
)
1074 struct mgmt_mesh_tx
*mesh_tx
;
1076 hci_dev_clear_flag(hdev
, HCI_MESH_SENDING
);
1077 hci_disable_advertising_sync(hdev
);
1078 mesh_tx
= mgmt_mesh_next(hdev
, NULL
);
1081 mesh_send_complete(hdev
, mesh_tx
, false);
1086 static int mesh_send_sync(struct hci_dev
*hdev
, void *data
);
1087 static void mesh_send_start_complete(struct hci_dev
*hdev
, void *data
, int err
);
1088 static void mesh_next(struct hci_dev
*hdev
, void *data
, int err
)
1090 struct mgmt_mesh_tx
*mesh_tx
= mgmt_mesh_next(hdev
, NULL
);
1095 err
= hci_cmd_sync_queue(hdev
, mesh_send_sync
, mesh_tx
,
1096 mesh_send_start_complete
);
1099 mesh_send_complete(hdev
, mesh_tx
, false);
1101 hci_dev_set_flag(hdev
, HCI_MESH_SENDING
);
1104 static void mesh_send_done(struct work_struct
*work
)
1106 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1107 mesh_send_done
.work
);
1109 if (!hci_dev_test_flag(hdev
, HCI_MESH_SENDING
))
1112 hci_cmd_sync_queue(hdev
, mesh_send_done_sync
, NULL
, mesh_next
);
1115 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1117 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1120 BT_INFO("MGMT ver %d.%d", MGMT_VERSION
, MGMT_REVISION
);
1122 INIT_DELAYED_WORK(&hdev
->discov_off
, discov_off
);
1123 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1124 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1125 INIT_DELAYED_WORK(&hdev
->mesh_send_done
, mesh_send_done
);
1127 /* Non-mgmt controlled devices get this bit set
1128 * implicitly so that pairing works for them, however
1129 * for mgmt we require user-space to explicitly enable
1132 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
1134 hci_dev_set_flag(hdev
, HCI_MGMT
);
1137 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1138 void *data
, u16 data_len
)
1140 struct mgmt_rp_read_info rp
;
1142 bt_dev_dbg(hdev
, "sock %p", sk
);
1146 memset(&rp
, 0, sizeof(rp
));
1148 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1150 rp
.version
= hdev
->hci_ver
;
1151 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1153 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1154 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1156 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1158 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1159 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1161 hci_dev_unlock(hdev
);
1163 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1167 static u16
append_eir_data_to_buf(struct hci_dev
*hdev
, u8
*eir
)
1172 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1173 eir_len
= eir_append_data(eir
, eir_len
, EIR_CLASS_OF_DEV
,
1174 hdev
->dev_class
, 3);
1176 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1177 eir_len
= eir_append_le16(eir
, eir_len
, EIR_APPEARANCE
,
1180 name_len
= strnlen(hdev
->dev_name
, sizeof(hdev
->dev_name
));
1181 eir_len
= eir_append_data(eir
, eir_len
, EIR_NAME_COMPLETE
,
1182 hdev
->dev_name
, name_len
);
1184 name_len
= strnlen(hdev
->short_name
, sizeof(hdev
->short_name
));
1185 eir_len
= eir_append_data(eir
, eir_len
, EIR_NAME_SHORT
,
1186 hdev
->short_name
, name_len
);
1191 static int read_ext_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1192 void *data
, u16 data_len
)
1195 struct mgmt_rp_read_ext_info
*rp
= (void *)buf
;
1198 bt_dev_dbg(hdev
, "sock %p", sk
);
1200 memset(&buf
, 0, sizeof(buf
));
1204 bacpy(&rp
->bdaddr
, &hdev
->bdaddr
);
1206 rp
->version
= hdev
->hci_ver
;
1207 rp
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1209 rp
->supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1210 rp
->current_settings
= cpu_to_le32(get_current_settings(hdev
));
1213 eir_len
= append_eir_data_to_buf(hdev
, rp
->eir
);
1214 rp
->eir_len
= cpu_to_le16(eir_len
);
1216 hci_dev_unlock(hdev
);
1218 /* If this command is called at least once, then the events
1219 * for class of device and local name changes are disabled
1220 * and only the new extended controller information event
1223 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INFO_EVENTS
);
1224 hci_sock_clear_flag(sk
, HCI_MGMT_DEV_CLASS_EVENTS
);
1225 hci_sock_clear_flag(sk
, HCI_MGMT_LOCAL_NAME_EVENTS
);
1227 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_EXT_INFO
, 0, rp
,
1228 sizeof(*rp
) + eir_len
);
1231 static int ext_info_changed(struct hci_dev
*hdev
, struct sock
*skip
)
1234 struct mgmt_ev_ext_info_changed
*ev
= (void *)buf
;
1237 memset(buf
, 0, sizeof(buf
));
1239 eir_len
= append_eir_data_to_buf(hdev
, ev
->eir
);
1240 ev
->eir_len
= cpu_to_le16(eir_len
);
1242 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED
, hdev
, ev
,
1243 sizeof(*ev
) + eir_len
,
1244 HCI_MGMT_EXT_INFO_EVENTS
, skip
);
1247 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1249 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1251 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1255 void mgmt_advertising_added(struct sock
*sk
, struct hci_dev
*hdev
, u8 instance
)
1257 struct mgmt_ev_advertising_added ev
;
1259 ev
.instance
= instance
;
1261 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
1264 void mgmt_advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1267 struct mgmt_ev_advertising_removed ev
;
1269 ev
.instance
= instance
;
1271 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1274 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1276 if (hdev
->adv_instance_timeout
) {
1277 hdev
->adv_instance_timeout
= 0;
1278 cancel_delayed_work(&hdev
->adv_instance_expire
);
1282 /* This function requires the caller holds hdev->lock */
1283 static void restart_le_actions(struct hci_dev
*hdev
)
1285 struct hci_conn_params
*p
;
1287 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1288 /* Needed for AUTO_OFF case where might not "really"
1289 * have been powered off.
1291 hci_pend_le_list_del_init(p
);
1293 switch (p
->auto_connect
) {
1294 case HCI_AUTO_CONN_DIRECT
:
1295 case HCI_AUTO_CONN_ALWAYS
:
1296 hci_pend_le_list_add(p
, &hdev
->pend_le_conns
);
1298 case HCI_AUTO_CONN_REPORT
:
1299 hci_pend_le_list_add(p
, &hdev
->pend_le_reports
);
1307 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1309 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1311 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1312 sizeof(ev
), HCI_MGMT_SETTING_EVENTS
, skip
);
1315 static void mgmt_set_powered_complete(struct hci_dev
*hdev
, void *data
, int err
)
1317 struct mgmt_pending_cmd
*cmd
= data
;
1318 struct mgmt_mode
*cp
;
1320 /* Make sure cmd still outstanding. */
1321 if (err
== -ECANCELED
||
1322 cmd
!= pending_find(MGMT_OP_SET_POWERED
, hdev
))
1327 bt_dev_dbg(hdev
, "err %d", err
);
1332 restart_le_actions(hdev
);
1333 hci_update_passive_scan(hdev
);
1334 hci_dev_unlock(hdev
);
1337 send_settings_rsp(cmd
->sk
, cmd
->opcode
, hdev
);
1339 /* Only call new_setting for power on as power off is deferred
1340 * to hdev->power_off work which does call hci_dev_do_close.
1343 new_settings(hdev
, cmd
->sk
);
1345 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1349 mgmt_pending_remove(cmd
);
1352 static int set_powered_sync(struct hci_dev
*hdev
, void *data
)
1354 struct mgmt_pending_cmd
*cmd
= data
;
1355 struct mgmt_mode
*cp
;
1357 /* Make sure cmd still outstanding. */
1358 if (cmd
!= pending_find(MGMT_OP_SET_POWERED
, hdev
))
1363 BT_DBG("%s", hdev
->name
);
1365 return hci_set_powered_sync(hdev
, cp
->val
);
1368 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1371 struct mgmt_mode
*cp
= data
;
1372 struct mgmt_pending_cmd
*cmd
;
1375 bt_dev_dbg(hdev
, "sock %p", sk
);
1377 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1378 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1379 MGMT_STATUS_INVALID_PARAMS
);
1384 if (hci_dev_test_flag(hdev
, HCI_POWERING_DOWN
)) {
1385 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1391 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1392 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1397 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1398 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1402 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp
->val
== 0x00) {
1410 hci_cmd_sync_cancel_sync(hdev
, -EHOSTDOWN
);
1411 err
= hci_cmd_sync_queue(hdev
, set_powered_sync
, cmd
,
1412 mgmt_set_powered_complete
);
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err
= hci_cmd_sync_submit(hdev
, set_powered_sync
, cmd
,
1416 mgmt_set_powered_complete
);
1420 mgmt_pending_remove(cmd
);
1423 hci_dev_unlock(hdev
);
1427 int mgmt_new_settings(struct hci_dev
*hdev
)
1429 return new_settings(hdev
, NULL
);
1434 struct hci_dev
*hdev
;
1438 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1440 struct cmd_lookup
*match
= data
;
1442 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1444 list_del(&cmd
->list
);
1446 if (match
->sk
== NULL
) {
1447 match
->sk
= cmd
->sk
;
1448 sock_hold(match
->sk
);
1451 mgmt_pending_free(cmd
);
1454 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1458 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1459 mgmt_pending_remove(cmd
);
1462 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1464 struct cmd_lookup
*match
= data
;
1466 /* dequeue cmd_sync entries using cmd as data as that is about to be
1469 hci_cmd_sync_dequeue(match
->hdev
, NULL
, cmd
, NULL
);
1471 if (cmd
->cmd_complete
) {
1472 cmd
->cmd_complete(cmd
, match
->mgmt_status
);
1473 mgmt_pending_remove(cmd
);
1478 cmd_status_rsp(cmd
, data
);
1481 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1483 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1484 cmd
->param
, cmd
->param_len
);
1487 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1489 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1490 cmd
->param
, sizeof(struct mgmt_addr_info
));
1493 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1495 if (!lmp_bredr_capable(hdev
))
1496 return MGMT_STATUS_NOT_SUPPORTED
;
1497 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1498 return MGMT_STATUS_REJECTED
;
1500 return MGMT_STATUS_SUCCESS
;
1503 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1505 if (!lmp_le_capable(hdev
))
1506 return MGMT_STATUS_NOT_SUPPORTED
;
1507 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1508 return MGMT_STATUS_REJECTED
;
1510 return MGMT_STATUS_SUCCESS
;
1513 static void mgmt_set_discoverable_complete(struct hci_dev
*hdev
, void *data
,
1516 struct mgmt_pending_cmd
*cmd
= data
;
1518 bt_dev_dbg(hdev
, "err %d", err
);
1520 /* Make sure cmd still outstanding. */
1521 if (err
== -ECANCELED
||
1522 cmd
!= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
1528 u8 mgmt_err
= mgmt_status(err
);
1529 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1530 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1534 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1535 hdev
->discov_timeout
> 0) {
1536 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1537 queue_delayed_work(hdev
->req_workqueue
, &hdev
->discov_off
, to
);
1540 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1541 new_settings(hdev
, cmd
->sk
);
1544 mgmt_pending_remove(cmd
);
1545 hci_dev_unlock(hdev
);
1548 static int set_discoverable_sync(struct hci_dev
*hdev
, void *data
)
1550 BT_DBG("%s", hdev
->name
);
1552 return hci_update_discoverable_sync(hdev
);
1555 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1558 struct mgmt_cp_set_discoverable
*cp
= data
;
1559 struct mgmt_pending_cmd
*cmd
;
1563 bt_dev_dbg(hdev
, "sock %p", sk
);
1565 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1566 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1567 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1568 MGMT_STATUS_REJECTED
);
1570 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1571 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1572 MGMT_STATUS_INVALID_PARAMS
);
1574 timeout
= __le16_to_cpu(cp
->timeout
);
1576 /* Disabling discoverable requires that no timeout is set,
1577 * and enabling limited discoverable requires a timeout.
1579 if ((cp
->val
== 0x00 && timeout
> 0) ||
1580 (cp
->val
== 0x02 && timeout
== 0))
1581 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1582 MGMT_STATUS_INVALID_PARAMS
);
1586 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1587 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1588 MGMT_STATUS_NOT_POWERED
);
1592 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1593 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1594 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1599 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1600 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1601 MGMT_STATUS_REJECTED
);
1605 if (hdev
->advertising_paused
) {
1606 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1611 if (!hdev_is_powered(hdev
)) {
1612 bool changed
= false;
1614 /* Setting limited discoverable when powered off is
1615 * not a valid operation since it requires a timeout
1616 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1618 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1619 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1623 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1628 err
= new_settings(hdev
, sk
);
1633 /* If the current mode is the same, then just update the timeout
1634 * value with the new value. And if only the timeout gets updated,
1635 * then no need for any HCI transactions.
1637 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1638 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1639 HCI_LIMITED_DISCOVERABLE
)) {
1640 cancel_delayed_work(&hdev
->discov_off
);
1641 hdev
->discov_timeout
= timeout
;
1643 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1644 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1645 queue_delayed_work(hdev
->req_workqueue
,
1646 &hdev
->discov_off
, to
);
1649 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1653 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1659 /* Cancel any potential discoverable timeout that might be
1660 * still active and store new timeout value. The arming of
1661 * the timeout happens in the complete handler.
1663 cancel_delayed_work(&hdev
->discov_off
);
1664 hdev
->discov_timeout
= timeout
;
1667 hci_dev_set_flag(hdev
, HCI_DISCOVERABLE
);
1669 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1671 /* Limited discoverable mode */
1672 if (cp
->val
== 0x02)
1673 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1675 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1677 err
= hci_cmd_sync_queue(hdev
, set_discoverable_sync
, cmd
,
1678 mgmt_set_discoverable_complete
);
1681 mgmt_pending_remove(cmd
);
1684 hci_dev_unlock(hdev
);
1688 static void mgmt_set_connectable_complete(struct hci_dev
*hdev
, void *data
,
1691 struct mgmt_pending_cmd
*cmd
= data
;
1693 bt_dev_dbg(hdev
, "err %d", err
);
1695 /* Make sure cmd still outstanding. */
1696 if (err
== -ECANCELED
||
1697 cmd
!= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
1703 u8 mgmt_err
= mgmt_status(err
);
1704 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1708 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1709 new_settings(hdev
, cmd
->sk
);
1712 mgmt_pending_remove(cmd
);
1714 hci_dev_unlock(hdev
);
1717 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1718 struct sock
*sk
, u8 val
)
1720 bool changed
= false;
1723 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
1727 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1729 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1730 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1733 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1738 hci_update_scan(hdev
);
1739 hci_update_passive_scan(hdev
);
1740 return new_settings(hdev
, sk
);
1746 static int set_connectable_sync(struct hci_dev
*hdev
, void *data
)
1748 BT_DBG("%s", hdev
->name
);
1750 return hci_update_connectable_sync(hdev
);
1753 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1756 struct mgmt_mode
*cp
= data
;
1757 struct mgmt_pending_cmd
*cmd
;
1760 bt_dev_dbg(hdev
, "sock %p", sk
);
1762 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1763 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1764 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1765 MGMT_STATUS_REJECTED
);
1767 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1768 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1769 MGMT_STATUS_INVALID_PARAMS
);
1773 if (!hdev_is_powered(hdev
)) {
1774 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1778 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1779 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1780 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1785 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1792 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1794 if (hdev
->discov_timeout
> 0)
1795 cancel_delayed_work(&hdev
->discov_off
);
1797 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1798 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1799 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1802 err
= hci_cmd_sync_queue(hdev
, set_connectable_sync
, cmd
,
1803 mgmt_set_connectable_complete
);
1806 mgmt_pending_remove(cmd
);
1809 hci_dev_unlock(hdev
);
1813 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1816 struct mgmt_mode
*cp
= data
;
1820 bt_dev_dbg(hdev
, "sock %p", sk
);
1822 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1823 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1824 MGMT_STATUS_INVALID_PARAMS
);
1829 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
1831 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
1833 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1838 /* In limited privacy mode the change of bondable mode
1839 * may affect the local advertising address.
1841 hci_update_discoverable(hdev
);
1843 err
= new_settings(hdev
, sk
);
1847 hci_dev_unlock(hdev
);
1851 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1854 struct mgmt_mode
*cp
= data
;
1855 struct mgmt_pending_cmd
*cmd
;
1859 bt_dev_dbg(hdev
, "sock %p", sk
);
1861 status
= mgmt_bredr_support(hdev
);
1863 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1866 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1867 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1868 MGMT_STATUS_INVALID_PARAMS
);
1872 if (!hdev_is_powered(hdev
)) {
1873 bool changed
= false;
1875 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
1876 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
1880 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1885 err
= new_settings(hdev
, sk
);
1890 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1891 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1898 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1899 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1903 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1909 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1911 mgmt_pending_remove(cmd
);
1916 hci_dev_unlock(hdev
);
1920 static void set_ssp_complete(struct hci_dev
*hdev
, void *data
, int err
)
1922 struct cmd_lookup match
= { NULL
, hdev
};
1923 struct mgmt_pending_cmd
*cmd
= data
;
1924 struct mgmt_mode
*cp
= cmd
->param
;
1925 u8 enable
= cp
->val
;
1928 /* Make sure cmd still outstanding. */
1929 if (err
== -ECANCELED
|| cmd
!= pending_find(MGMT_OP_SET_SSP
, hdev
))
1933 u8 mgmt_err
= mgmt_status(err
);
1935 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
1937 new_settings(hdev
, NULL
);
1940 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
1946 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
1948 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
1951 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
1954 new_settings(hdev
, match
.sk
);
1959 hci_update_eir_sync(hdev
);
1962 static int set_ssp_sync(struct hci_dev
*hdev
, void *data
)
1964 struct mgmt_pending_cmd
*cmd
= data
;
1965 struct mgmt_mode
*cp
= cmd
->param
;
1966 bool changed
= false;
1970 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
1972 err
= hci_write_ssp_mode_sync(hdev
, cp
->val
);
1974 if (!err
&& changed
)
1975 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
1980 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1982 struct mgmt_mode
*cp
= data
;
1983 struct mgmt_pending_cmd
*cmd
;
1987 bt_dev_dbg(hdev
, "sock %p", sk
);
1989 status
= mgmt_bredr_support(hdev
);
1991 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1993 if (!lmp_ssp_capable(hdev
))
1994 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1995 MGMT_STATUS_NOT_SUPPORTED
);
1997 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1998 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1999 MGMT_STATUS_INVALID_PARAMS
);
2003 if (!hdev_is_powered(hdev
)) {
2007 changed
= !hci_dev_test_and_set_flag(hdev
,
2010 changed
= hci_dev_test_and_clear_flag(hdev
,
2014 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2019 err
= new_settings(hdev
, sk
);
2024 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2025 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2030 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
2031 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2035 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2039 err
= hci_cmd_sync_queue(hdev
, set_ssp_sync
, cmd
,
2043 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2044 MGMT_STATUS_FAILED
);
2047 mgmt_pending_remove(cmd
);
2051 hci_dev_unlock(hdev
);
2055 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2057 bt_dev_dbg(hdev
, "sock %p", sk
);
2059 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2060 MGMT_STATUS_NOT_SUPPORTED
);
2063 static void set_le_complete(struct hci_dev
*hdev
, void *data
, int err
)
2065 struct cmd_lookup match
= { NULL
, hdev
};
2066 u8 status
= mgmt_status(err
);
2068 bt_dev_dbg(hdev
, "err %d", err
);
2071 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2076 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2078 new_settings(hdev
, match
.sk
);
2084 static int set_le_sync(struct hci_dev
*hdev
, void *data
)
2086 struct mgmt_pending_cmd
*cmd
= data
;
2087 struct mgmt_mode
*cp
= cmd
->param
;
2092 hci_clear_adv_instance_sync(hdev
, NULL
, 0x00, true);
2094 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
2095 hci_disable_advertising_sync(hdev
);
2097 if (ext_adv_capable(hdev
))
2098 hci_remove_ext_adv_instance_sync(hdev
, 0, cmd
->sk
);
2100 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
2103 err
= hci_write_le_host_supported_sync(hdev
, val
, 0);
2105 /* Make sure the controller has a good default for
2106 * advertising data. Restrict the update to when LE
2107 * has actually been enabled. During power on, the
2108 * update in powered_update_hci will take care of it.
2110 if (!err
&& hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2111 if (ext_adv_capable(hdev
)) {
2114 status
= hci_setup_ext_adv_instance_sync(hdev
, 0x00);
2116 hci_update_scan_rsp_data_sync(hdev
, 0x00);
2118 hci_update_adv_data_sync(hdev
, 0x00);
2119 hci_update_scan_rsp_data_sync(hdev
, 0x00);
2122 hci_update_passive_scan(hdev
);
2128 static void set_mesh_complete(struct hci_dev
*hdev
, void *data
, int err
)
2130 struct mgmt_pending_cmd
*cmd
= data
;
2131 u8 status
= mgmt_status(err
);
2132 struct sock
*sk
= cmd
->sk
;
2135 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER
, hdev
,
2136 cmd_status_rsp
, &status
);
2140 mgmt_pending_remove(cmd
);
2141 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_MESH_RECEIVER
, 0, NULL
, 0);
2144 static int set_mesh_sync(struct hci_dev
*hdev
, void *data
)
2146 struct mgmt_pending_cmd
*cmd
= data
;
2147 struct mgmt_cp_set_mesh
*cp
= cmd
->param
;
2148 size_t len
= cmd
->param_len
;
2150 memset(hdev
->mesh_ad_types
, 0, sizeof(hdev
->mesh_ad_types
));
2153 hci_dev_set_flag(hdev
, HCI_MESH
);
2155 hci_dev_clear_flag(hdev
, HCI_MESH
);
2159 /* If filters don't fit, forward all adv pkts */
2160 if (len
<= sizeof(hdev
->mesh_ad_types
))
2161 memcpy(hdev
->mesh_ad_types
, cp
->ad_types
, len
);
2163 hci_update_passive_scan_sync(hdev
);
2167 static int set_mesh(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2169 struct mgmt_cp_set_mesh
*cp
= data
;
2170 struct mgmt_pending_cmd
*cmd
;
2173 bt_dev_dbg(hdev
, "sock %p", sk
);
2175 if (!lmp_le_capable(hdev
) ||
2176 !hci_dev_test_flag(hdev
, HCI_MESH_EXPERIMENTAL
))
2177 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_MESH_RECEIVER
,
2178 MGMT_STATUS_NOT_SUPPORTED
);
2180 if (cp
->enable
!= 0x00 && cp
->enable
!= 0x01)
2181 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_MESH_RECEIVER
,
2182 MGMT_STATUS_INVALID_PARAMS
);
2186 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_MESH_RECEIVER
, hdev
, data
, len
);
2190 err
= hci_cmd_sync_queue(hdev
, set_mesh_sync
, cmd
,
2194 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_MESH_RECEIVER
,
2195 MGMT_STATUS_FAILED
);
2198 mgmt_pending_remove(cmd
);
2201 hci_dev_unlock(hdev
);
2205 static void mesh_send_start_complete(struct hci_dev
*hdev
, void *data
, int err
)
2207 struct mgmt_mesh_tx
*mesh_tx
= data
;
2208 struct mgmt_cp_mesh_send
*send
= (void *)mesh_tx
->param
;
2209 unsigned long mesh_send_interval
;
2210 u8 mgmt_err
= mgmt_status(err
);
2212 /* Report any errors here, but don't report completion */
2215 hci_dev_clear_flag(hdev
, HCI_MESH_SENDING
);
2216 /* Send Complete Error Code for handle */
2217 mesh_send_complete(hdev
, mesh_tx
, false);
2221 mesh_send_interval
= msecs_to_jiffies((send
->cnt
) * 25);
2222 queue_delayed_work(hdev
->req_workqueue
, &hdev
->mesh_send_done
,
2223 mesh_send_interval
);
2226 static int mesh_send_sync(struct hci_dev
*hdev
, void *data
)
2228 struct mgmt_mesh_tx
*mesh_tx
= data
;
2229 struct mgmt_cp_mesh_send
*send
= (void *)mesh_tx
->param
;
2230 struct adv_info
*adv
, *next_instance
;
2231 u8 instance
= hdev
->le_num_of_adv_sets
+ 1;
2232 u16 timeout
, duration
;
2235 if (hdev
->le_num_of_adv_sets
<= hdev
->adv_instance_cnt
)
2236 return MGMT_STATUS_BUSY
;
2239 duration
= send
->cnt
* INTERVAL_TO_MS(hdev
->le_adv_max_interval
);
2240 adv
= hci_add_adv_instance(hdev
, instance
, 0,
2241 send
->adv_data_len
, send
->adv_data
,
2244 HCI_ADV_TX_POWER_NO_PREFERENCE
,
2245 hdev
->le_adv_min_interval
,
2246 hdev
->le_adv_max_interval
,
2250 mesh_tx
->instance
= instance
;
2254 if (hdev
->cur_adv_instance
== instance
) {
2255 /* If the currently advertised instance is being changed then
2256 * cancel the current advertising and schedule the next
2257 * instance. If there is only one instance then the overridden
2258 * advertising data will be visible right away.
2260 cancel_adv_timeout(hdev
);
2262 next_instance
= hci_get_next_instance(hdev
, instance
);
2264 instance
= next_instance
->instance
;
2267 } else if (hdev
->adv_instance_timeout
) {
2268 /* Immediately advertise the new instance if no other, or
2269 * let it go naturally from queue if ADV is already happening
2275 return hci_schedule_adv_instance_sync(hdev
, instance
, true);
2280 static void send_count(struct mgmt_mesh_tx
*mesh_tx
, void *data
)
2282 struct mgmt_rp_mesh_read_features
*rp
= data
;
2284 if (rp
->used_handles
>= rp
->max_handles
)
2287 rp
->handles
[rp
->used_handles
++] = mesh_tx
->handle
;
2290 static int mesh_features(struct sock
*sk
, struct hci_dev
*hdev
,
2291 void *data
, u16 len
)
2293 struct mgmt_rp_mesh_read_features rp
;
2295 if (!lmp_le_capable(hdev
) ||
2296 !hci_dev_test_flag(hdev
, HCI_MESH_EXPERIMENTAL
))
2297 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_READ_FEATURES
,
2298 MGMT_STATUS_NOT_SUPPORTED
);
2300 memset(&rp
, 0, sizeof(rp
));
2301 rp
.index
= cpu_to_le16(hdev
->id
);
2302 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
2303 rp
.max_handles
= MESH_HANDLES_MAX
;
2308 mgmt_mesh_foreach(hdev
, send_count
, &rp
, sk
);
2310 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_MESH_READ_FEATURES
, 0, &rp
,
2311 rp
.used_handles
+ sizeof(rp
) - MESH_HANDLES_MAX
);
2313 hci_dev_unlock(hdev
);
2317 static int send_cancel(struct hci_dev
*hdev
, void *data
)
2319 struct mgmt_pending_cmd
*cmd
= data
;
2320 struct mgmt_cp_mesh_send_cancel
*cancel
= (void *)cmd
->param
;
2321 struct mgmt_mesh_tx
*mesh_tx
;
2323 if (!cancel
->handle
) {
2325 mesh_tx
= mgmt_mesh_next(hdev
, cmd
->sk
);
2328 mesh_send_complete(hdev
, mesh_tx
, false);
2331 mesh_tx
= mgmt_mesh_find(hdev
, cancel
->handle
);
2333 if (mesh_tx
&& mesh_tx
->sk
== cmd
->sk
)
2334 mesh_send_complete(hdev
, mesh_tx
, false);
2337 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_MESH_SEND_CANCEL
,
2339 mgmt_pending_free(cmd
);
2344 static int mesh_send_cancel(struct sock
*sk
, struct hci_dev
*hdev
,
2345 void *data
, u16 len
)
2347 struct mgmt_pending_cmd
*cmd
;
2350 if (!lmp_le_capable(hdev
) ||
2351 !hci_dev_test_flag(hdev
, HCI_MESH_EXPERIMENTAL
))
2352 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND_CANCEL
,
2353 MGMT_STATUS_NOT_SUPPORTED
);
2355 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
2356 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND_CANCEL
,
2357 MGMT_STATUS_REJECTED
);
2360 cmd
= mgmt_pending_new(sk
, MGMT_OP_MESH_SEND_CANCEL
, hdev
, data
, len
);
2364 err
= hci_cmd_sync_queue(hdev
, send_cancel
, cmd
, NULL
);
2367 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND_CANCEL
,
2368 MGMT_STATUS_FAILED
);
2371 mgmt_pending_free(cmd
);
2374 hci_dev_unlock(hdev
);
2378 static int mesh_send(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2380 struct mgmt_mesh_tx
*mesh_tx
;
2381 struct mgmt_cp_mesh_send
*send
= data
;
2382 struct mgmt_rp_mesh_read_features rp
;
2386 if (!lmp_le_capable(hdev
) ||
2387 !hci_dev_test_flag(hdev
, HCI_MESH_EXPERIMENTAL
))
2388 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND
,
2389 MGMT_STATUS_NOT_SUPPORTED
);
2390 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) ||
2391 len
<= MGMT_MESH_SEND_SIZE
||
2392 len
> (MGMT_MESH_SEND_SIZE
+ 31))
2393 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND
,
2394 MGMT_STATUS_REJECTED
);
2398 memset(&rp
, 0, sizeof(rp
));
2399 rp
.max_handles
= MESH_HANDLES_MAX
;
2401 mgmt_mesh_foreach(hdev
, send_count
, &rp
, sk
);
2403 if (rp
.max_handles
<= rp
.used_handles
) {
2404 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND
,
2409 sending
= hci_dev_test_flag(hdev
, HCI_MESH_SENDING
);
2410 mesh_tx
= mgmt_mesh_add(sk
, hdev
, send
, len
);
2415 err
= hci_cmd_sync_queue(hdev
, mesh_send_sync
, mesh_tx
,
2416 mesh_send_start_complete
);
2419 bt_dev_err(hdev
, "Send Mesh Failed %d", err
);
2420 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_MESH_SEND
,
2421 MGMT_STATUS_FAILED
);
2425 mgmt_mesh_remove(mesh_tx
);
2428 hci_dev_set_flag(hdev
, HCI_MESH_SENDING
);
2430 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_MESH_SEND
, 0,
2431 &mesh_tx
->handle
, 1);
2435 hci_dev_unlock(hdev
);
2439 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2441 struct mgmt_mode
*cp
= data
;
2442 struct mgmt_pending_cmd
*cmd
;
2446 bt_dev_dbg(hdev
, "sock %p", sk
);
2448 if (!lmp_le_capable(hdev
))
2449 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2450 MGMT_STATUS_NOT_SUPPORTED
);
2452 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2453 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2454 MGMT_STATUS_INVALID_PARAMS
);
2456 /* Bluetooth single mode LE only controllers or dual-mode
2457 * controllers configured as LE only devices, do not allow
2458 * switching LE off. These have either LE enabled explicitly
2459 * or BR/EDR has been previously switched off.
2461 * When trying to enable an already enabled LE, then gracefully
2462 * send a positive response. Trying to disable it however will
2463 * result into rejection.
2465 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2466 if (cp
->val
== 0x01)
2467 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2469 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2470 MGMT_STATUS_REJECTED
);
2476 enabled
= lmp_host_le_capable(hdev
);
2478 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2479 bool changed
= false;
2481 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2482 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
2486 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2487 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
2491 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2496 err
= new_settings(hdev
, sk
);
2501 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
2502 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2503 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2508 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2512 err
= hci_cmd_sync_queue(hdev
, set_le_sync
, cmd
,
2516 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2517 MGMT_STATUS_FAILED
);
2520 mgmt_pending_remove(cmd
);
2524 hci_dev_unlock(hdev
);
2528 static int send_hci_cmd_sync(struct hci_dev
*hdev
, void *data
)
2530 struct mgmt_pending_cmd
*cmd
= data
;
2531 struct mgmt_cp_hci_cmd_sync
*cp
= cmd
->param
;
2532 struct sk_buff
*skb
;
2534 skb
= __hci_cmd_sync_ev(hdev
, le16_to_cpu(cp
->opcode
),
2535 le16_to_cpu(cp
->params_len
), cp
->params
,
2536 cp
->event
, cp
->timeout
?
2537 msecs_to_jiffies(cp
->timeout
* 1000) :
2540 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_HCI_CMD_SYNC
,
2541 mgmt_status(PTR_ERR(skb
)));
2545 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_HCI_CMD_SYNC
, 0,
2546 skb
->data
, skb
->len
);
2551 mgmt_pending_free(cmd
);
2556 static int mgmt_hci_cmd_sync(struct sock
*sk
, struct hci_dev
*hdev
,
2557 void *data
, u16 len
)
2559 struct mgmt_cp_hci_cmd_sync
*cp
= data
;
2560 struct mgmt_pending_cmd
*cmd
;
2563 if (len
< sizeof(*cp
))
2564 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_HCI_CMD_SYNC
,
2565 MGMT_STATUS_INVALID_PARAMS
);
2568 cmd
= mgmt_pending_new(sk
, MGMT_OP_HCI_CMD_SYNC
, hdev
, data
, len
);
2572 err
= hci_cmd_sync_queue(hdev
, send_hci_cmd_sync
, cmd
, NULL
);
2575 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_HCI_CMD_SYNC
,
2576 MGMT_STATUS_FAILED
);
2579 mgmt_pending_free(cmd
);
2582 hci_dev_unlock(hdev
);
2586 /* This is a helper function to test for pending mgmt commands that can
2587 * cause CoD or EIR HCI commands. We can only allow one such pending
2588 * mgmt command at a time since otherwise we cannot easily track what
2589 * the current values are, will be, and based on that calculate if a new
2590 * HCI command needs to be sent and if yes with what value.
2592 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2594 struct mgmt_pending_cmd
*cmd
;
2596 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2597 switch (cmd
->opcode
) {
2598 case MGMT_OP_ADD_UUID
:
2599 case MGMT_OP_REMOVE_UUID
:
2600 case MGMT_OP_SET_DEV_CLASS
:
2601 case MGMT_OP_SET_POWERED
:
2609 static const u8 bluetooth_base_uuid
[] = {
2610 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2611 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2614 static u8
get_uuid_size(const u8
*uuid
)
2618 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2621 val
= get_unaligned_le32(&uuid
[12]);
2628 static void mgmt_class_complete(struct hci_dev
*hdev
, void *data
, int err
)
2630 struct mgmt_pending_cmd
*cmd
= data
;
2632 bt_dev_dbg(hdev
, "err %d", err
);
2634 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
2635 mgmt_status(err
), hdev
->dev_class
, 3);
2637 mgmt_pending_free(cmd
);
2640 static int add_uuid_sync(struct hci_dev
*hdev
, void *data
)
2644 err
= hci_update_class_sync(hdev
);
2648 return hci_update_eir_sync(hdev
);
2651 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2653 struct mgmt_cp_add_uuid
*cp
= data
;
2654 struct mgmt_pending_cmd
*cmd
;
2655 struct bt_uuid
*uuid
;
2658 bt_dev_dbg(hdev
, "sock %p", sk
);
2662 if (pending_eir_or_class(hdev
)) {
2663 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2668 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2674 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2675 uuid
->svc_hint
= cp
->svc_hint
;
2676 uuid
->size
= get_uuid_size(cp
->uuid
);
2678 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2680 cmd
= mgmt_pending_new(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2686 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2687 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2689 err
= hci_cmd_sync_submit(hdev
, add_uuid_sync
, cmd
,
2690 mgmt_class_complete
);
2692 mgmt_pending_free(cmd
);
2697 hci_dev_unlock(hdev
);
2701 static bool enable_service_cache(struct hci_dev
*hdev
)
2703 if (!hdev_is_powered(hdev
))
2706 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
2707 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2715 static int remove_uuid_sync(struct hci_dev
*hdev
, void *data
)
2719 err
= hci_update_class_sync(hdev
);
2723 return hci_update_eir_sync(hdev
);
2726 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2729 struct mgmt_cp_remove_uuid
*cp
= data
;
2730 struct mgmt_pending_cmd
*cmd
;
2731 struct bt_uuid
*match
, *tmp
;
2732 static const u8 bt_uuid_any
[] = {
2733 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2737 bt_dev_dbg(hdev
, "sock %p", sk
);
2741 if (pending_eir_or_class(hdev
)) {
2742 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2747 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2748 hci_uuids_clear(hdev
);
2750 if (enable_service_cache(hdev
)) {
2751 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2752 MGMT_OP_REMOVE_UUID
,
2753 0, hdev
->dev_class
, 3);
2762 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2763 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2766 list_del(&match
->list
);
2772 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2773 MGMT_STATUS_INVALID_PARAMS
);
2778 cmd
= mgmt_pending_new(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2784 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2785 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2787 err
= hci_cmd_sync_submit(hdev
, remove_uuid_sync
, cmd
,
2788 mgmt_class_complete
);
2790 mgmt_pending_free(cmd
);
2793 hci_dev_unlock(hdev
);
2797 static int set_class_sync(struct hci_dev
*hdev
, void *data
)
2801 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2802 cancel_delayed_work_sync(&hdev
->service_cache
);
2803 err
= hci_update_eir_sync(hdev
);
2809 return hci_update_class_sync(hdev
);
2812 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2815 struct mgmt_cp_set_dev_class
*cp
= data
;
2816 struct mgmt_pending_cmd
*cmd
;
2819 bt_dev_dbg(hdev
, "sock %p", sk
);
2821 if (!lmp_bredr_capable(hdev
))
2822 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2823 MGMT_STATUS_NOT_SUPPORTED
);
2827 if (pending_eir_or_class(hdev
)) {
2828 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2833 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2834 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2835 MGMT_STATUS_INVALID_PARAMS
);
2839 hdev
->major_class
= cp
->major
;
2840 hdev
->minor_class
= cp
->minor
;
2842 if (!hdev_is_powered(hdev
)) {
2843 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2844 hdev
->dev_class
, 3);
2848 cmd
= mgmt_pending_new(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2854 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2855 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2857 err
= hci_cmd_sync_submit(hdev
, set_class_sync
, cmd
,
2858 mgmt_class_complete
);
2860 mgmt_pending_free(cmd
);
2863 hci_dev_unlock(hdev
);
2867 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2870 struct mgmt_cp_load_link_keys
*cp
= data
;
2871 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2872 sizeof(struct mgmt_link_key_info
));
2873 u16 key_count
, expected_len
;
2877 bt_dev_dbg(hdev
, "sock %p", sk
);
2879 if (!lmp_bredr_capable(hdev
))
2880 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2881 MGMT_STATUS_NOT_SUPPORTED
);
2883 key_count
= __le16_to_cpu(cp
->key_count
);
2884 if (key_count
> max_key_count
) {
2885 bt_dev_err(hdev
, "load_link_keys: too big key_count value %u",
2887 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2888 MGMT_STATUS_INVALID_PARAMS
);
2891 expected_len
= struct_size(cp
, keys
, key_count
);
2892 if (expected_len
!= len
) {
2893 bt_dev_err(hdev
, "load_link_keys: expected %u bytes, got %u bytes",
2895 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2896 MGMT_STATUS_INVALID_PARAMS
);
2899 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2900 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2901 MGMT_STATUS_INVALID_PARAMS
);
2903 bt_dev_dbg(hdev
, "debug_keys %u key_count %u", cp
->debug_keys
,
2908 hci_link_keys_clear(hdev
);
2911 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2913 changed
= hci_dev_test_and_clear_flag(hdev
,
2914 HCI_KEEP_DEBUG_KEYS
);
2917 new_settings(hdev
, NULL
);
2919 for (i
= 0; i
< key_count
; i
++) {
2920 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2922 if (hci_is_blocked_key(hdev
,
2923 HCI_BLOCKED_KEY_TYPE_LINKKEY
,
2925 bt_dev_warn(hdev
, "Skipping blocked link key for %pMR",
2930 if (key
->addr
.type
!= BDADDR_BREDR
) {
2932 "Invalid link address type %u for %pMR",
2933 key
->addr
.type
, &key
->addr
.bdaddr
);
2937 if (key
->type
> 0x08) {
2938 bt_dev_warn(hdev
, "Invalid link key type %u for %pMR",
2939 key
->type
, &key
->addr
.bdaddr
);
2943 /* Always ignore debug keys and require a new pairing if
2944 * the user wants to use them.
2946 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2949 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2950 key
->type
, key
->pin_len
, NULL
);
2953 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2955 hci_dev_unlock(hdev
);
2960 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2961 u8 addr_type
, struct sock
*skip_sk
)
2963 struct mgmt_ev_device_unpaired ev
;
2965 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2966 ev
.addr
.type
= addr_type
;
2968 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2972 static void unpair_device_complete(struct hci_dev
*hdev
, void *data
, int err
)
2974 struct mgmt_pending_cmd
*cmd
= data
;
2975 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
2978 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
2980 cmd
->cmd_complete(cmd
, err
);
2981 mgmt_pending_free(cmd
);
2984 static int unpair_device_sync(struct hci_dev
*hdev
, void *data
)
2986 struct mgmt_pending_cmd
*cmd
= data
;
2987 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
2988 struct hci_conn
*conn
;
2990 if (cp
->addr
.type
== BDADDR_BREDR
)
2991 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2994 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
2995 le_addr_type(cp
->addr
.type
));
3000 /* Disregard any possible error since the likes of hci_abort_conn_sync
3001 * will clean up the connection no matter the error.
3003 hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3008 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3011 struct mgmt_cp_unpair_device
*cp
= data
;
3012 struct mgmt_rp_unpair_device rp
;
3013 struct hci_conn_params
*params
;
3014 struct mgmt_pending_cmd
*cmd
;
3015 struct hci_conn
*conn
;
3019 memset(&rp
, 0, sizeof(rp
));
3020 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3021 rp
.addr
.type
= cp
->addr
.type
;
3023 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3024 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3025 MGMT_STATUS_INVALID_PARAMS
,
3028 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
3029 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3030 MGMT_STATUS_INVALID_PARAMS
,
3035 if (!hdev_is_powered(hdev
)) {
3036 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3037 MGMT_STATUS_NOT_POWERED
, &rp
,
3042 if (cp
->addr
.type
== BDADDR_BREDR
) {
3043 /* If disconnection is requested, then look up the
3044 * connection. If the remote device is connected, it
3045 * will be later used to terminate the link.
3047 * Setting it to NULL explicitly will cause no
3048 * termination of the link.
3051 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3056 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
3058 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3059 MGMT_OP_UNPAIR_DEVICE
,
3060 MGMT_STATUS_NOT_PAIRED
, &rp
,
3068 /* LE address type */
3069 addr_type
= le_addr_type(cp
->addr
.type
);
3071 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3072 err
= smp_cancel_and_remove_pairing(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3074 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3075 MGMT_STATUS_NOT_PAIRED
, &rp
,
3080 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3082 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3087 /* Defer clearing up the connection parameters until closing to
3088 * give a chance of keeping them if a repairing happens.
3090 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3092 /* Disable auto-connection parameters if present */
3093 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3095 if (params
->explicit_connect
)
3096 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
3098 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3101 /* If disconnection is not requested, then clear the connection
3102 * variable so that the link is not terminated.
3104 if (!cp
->disconnect
)
3108 /* If the connection variable is set, then termination of the
3109 * link is requested.
3112 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
3114 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
3118 cmd
= mgmt_pending_new(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
3125 cmd
->cmd_complete
= addr_cmd_complete
;
3127 err
= hci_cmd_sync_queue(hdev
, unpair_device_sync
, cmd
,
3128 unpair_device_complete
);
3130 mgmt_pending_free(cmd
);
3133 hci_dev_unlock(hdev
);
3137 static void disconnect_complete(struct hci_dev
*hdev
, void *data
, int err
)
3139 struct mgmt_pending_cmd
*cmd
= data
;
3141 cmd
->cmd_complete(cmd
, mgmt_status(err
));
3142 mgmt_pending_free(cmd
);
3145 static int disconnect_sync(struct hci_dev
*hdev
, void *data
)
3147 struct mgmt_pending_cmd
*cmd
= data
;
3148 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
3149 struct hci_conn
*conn
;
3151 if (cp
->addr
.type
== BDADDR_BREDR
)
3152 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3155 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
3156 le_addr_type(cp
->addr
.type
));
3161 /* Disregard any possible error since the likes of hci_abort_conn_sync
3162 * will clean up the connection no matter the error.
3164 hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3169 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3172 struct mgmt_cp_disconnect
*cp
= data
;
3173 struct mgmt_rp_disconnect rp
;
3174 struct mgmt_pending_cmd
*cmd
;
3177 bt_dev_dbg(hdev
, "sock %p", sk
);
3179 memset(&rp
, 0, sizeof(rp
));
3180 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3181 rp
.addr
.type
= cp
->addr
.type
;
3183 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3184 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3185 MGMT_STATUS_INVALID_PARAMS
,
3190 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
3191 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3192 MGMT_STATUS_NOT_POWERED
, &rp
,
3197 cmd
= mgmt_pending_new(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
3203 cmd
->cmd_complete
= generic_cmd_complete
;
3205 err
= hci_cmd_sync_queue(hdev
, disconnect_sync
, cmd
,
3206 disconnect_complete
);
3208 mgmt_pending_free(cmd
);
3211 hci_dev_unlock(hdev
);
3215 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
3217 switch (link_type
) {
3220 switch (addr_type
) {
3221 case ADDR_LE_DEV_PUBLIC
:
3222 return BDADDR_LE_PUBLIC
;
3225 /* Fallback to LE Random address type */
3226 return BDADDR_LE_RANDOM
;
3230 /* Fallback to BR/EDR type */
3231 return BDADDR_BREDR
;
3235 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3238 struct mgmt_rp_get_connections
*rp
;
3243 bt_dev_dbg(hdev
, "sock %p", sk
);
3247 if (!hdev_is_powered(hdev
)) {
3248 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
3249 MGMT_STATUS_NOT_POWERED
);
3254 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3255 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3259 rp
= kmalloc(struct_size(rp
, addr
, i
), GFP_KERNEL
);
3266 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3267 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3269 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
3270 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
3271 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
3276 rp
->conn_count
= cpu_to_le16(i
);
3278 /* Recalculate length in case of filtered SCO connections, etc */
3279 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
3280 struct_size(rp
, addr
, i
));
3285 hci_dev_unlock(hdev
);
3289 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3290 struct mgmt_cp_pin_code_neg_reply
*cp
)
3292 struct mgmt_pending_cmd
*cmd
;
3295 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
3300 cmd
->cmd_complete
= addr_cmd_complete
;
3302 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
3303 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
3305 mgmt_pending_remove(cmd
);
3310 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3313 struct hci_conn
*conn
;
3314 struct mgmt_cp_pin_code_reply
*cp
= data
;
3315 struct hci_cp_pin_code_reply reply
;
3316 struct mgmt_pending_cmd
*cmd
;
3319 bt_dev_dbg(hdev
, "sock %p", sk
);
3323 if (!hdev_is_powered(hdev
)) {
3324 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3325 MGMT_STATUS_NOT_POWERED
);
3329 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3331 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3332 MGMT_STATUS_NOT_CONNECTED
);
3336 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3337 struct mgmt_cp_pin_code_neg_reply ncp
;
3339 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3341 bt_dev_err(hdev
, "PIN code is not 16 bytes long");
3343 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3345 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3346 MGMT_STATUS_INVALID_PARAMS
);
3351 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3357 cmd
->cmd_complete
= addr_cmd_complete
;
3359 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3360 reply
.pin_len
= cp
->pin_len
;
3361 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3363 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3365 mgmt_pending_remove(cmd
);
3368 hci_dev_unlock(hdev
);
3372 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3375 struct mgmt_cp_set_io_capability
*cp
= data
;
3377 bt_dev_dbg(hdev
, "sock %p", sk
);
3379 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3380 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3381 MGMT_STATUS_INVALID_PARAMS
);
3385 hdev
->io_capability
= cp
->io_capability
;
3387 bt_dev_dbg(hdev
, "IO capability set to 0x%02x", hdev
->io_capability
);
3389 hci_dev_unlock(hdev
);
3391 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
3395 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
3397 struct hci_dev
*hdev
= conn
->hdev
;
3398 struct mgmt_pending_cmd
*cmd
;
3400 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3401 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3404 if (cmd
->user_data
!= conn
)
3413 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
3415 struct mgmt_rp_pair_device rp
;
3416 struct hci_conn
*conn
= cmd
->user_data
;
3419 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3420 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3422 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
3423 status
, &rp
, sizeof(rp
));
3425 /* So we don't get further callbacks for this connection */
3426 conn
->connect_cfm_cb
= NULL
;
3427 conn
->security_cfm_cb
= NULL
;
3428 conn
->disconn_cfm_cb
= NULL
;
3430 hci_conn_drop(conn
);
3432 /* The device is paired so there is no need to remove
3433 * its connection parameters anymore.
3435 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3442 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3444 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3445 struct mgmt_pending_cmd
*cmd
;
3447 cmd
= find_pairing(conn
);
3449 cmd
->cmd_complete(cmd
, status
);
3450 mgmt_pending_remove(cmd
);
3454 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3456 struct mgmt_pending_cmd
*cmd
;
3458 BT_DBG("status %u", status
);
3460 cmd
= find_pairing(conn
);
3462 BT_DBG("Unable to find a pending command");
3466 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3467 mgmt_pending_remove(cmd
);
3470 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3472 struct mgmt_pending_cmd
*cmd
;
3474 BT_DBG("status %u", status
);
3479 cmd
= find_pairing(conn
);
3481 BT_DBG("Unable to find a pending command");
3485 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3486 mgmt_pending_remove(cmd
);
3489 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3492 struct mgmt_cp_pair_device
*cp
= data
;
3493 struct mgmt_rp_pair_device rp
;
3494 struct mgmt_pending_cmd
*cmd
;
3495 u8 sec_level
, auth_type
;
3496 struct hci_conn
*conn
;
3499 bt_dev_dbg(hdev
, "sock %p", sk
);
3501 memset(&rp
, 0, sizeof(rp
));
3502 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3503 rp
.addr
.type
= cp
->addr
.type
;
3505 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3506 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3507 MGMT_STATUS_INVALID_PARAMS
,
3510 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3511 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3512 MGMT_STATUS_INVALID_PARAMS
,
3517 if (!hdev_is_powered(hdev
)) {
3518 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3519 MGMT_STATUS_NOT_POWERED
, &rp
,
3524 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
3525 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3526 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
3531 sec_level
= BT_SECURITY_MEDIUM
;
3532 auth_type
= HCI_AT_DEDICATED_BONDING
;
3534 if (cp
->addr
.type
== BDADDR_BREDR
) {
3535 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3536 auth_type
, CONN_REASON_PAIR_DEVICE
,
3537 HCI_ACL_CONN_TIMEOUT
);
3539 u8 addr_type
= le_addr_type(cp
->addr
.type
);
3540 struct hci_conn_params
*p
;
3542 /* When pairing a new device, it is expected to remember
3543 * this device for future connections. Adding the connection
3544 * parameter information ahead of time allows tracking
3545 * of the peripheral preferred values and will speed up any
3546 * further connection establishment.
3548 * If connection parameters already exist, then they
3549 * will be kept and this function does nothing.
3551 p
= hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3557 if (p
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
)
3558 p
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3560 conn
= hci_connect_le_scan(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3561 sec_level
, HCI_LE_CONN_TIMEOUT
,
3562 CONN_REASON_PAIR_DEVICE
);
3568 if (PTR_ERR(conn
) == -EBUSY
)
3569 status
= MGMT_STATUS_BUSY
;
3570 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
3571 status
= MGMT_STATUS_NOT_SUPPORTED
;
3572 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
3573 status
= MGMT_STATUS_REJECTED
;
3575 status
= MGMT_STATUS_CONNECT_FAILED
;
3577 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3578 status
, &rp
, sizeof(rp
));
3582 if (conn
->connect_cfm_cb
) {
3583 hci_conn_drop(conn
);
3584 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3585 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3589 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3592 hci_conn_drop(conn
);
3596 cmd
->cmd_complete
= pairing_complete
;
3598 /* For LE, just connecting isn't a proof that the pairing finished */
3599 if (cp
->addr
.type
== BDADDR_BREDR
) {
3600 conn
->connect_cfm_cb
= pairing_complete_cb
;
3601 conn
->security_cfm_cb
= pairing_complete_cb
;
3602 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3604 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3605 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3606 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3609 conn
->io_capability
= cp
->io_cap
;
3610 cmd
->user_data
= hci_conn_get(conn
);
3612 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3613 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3614 cmd
->cmd_complete(cmd
, 0);
3615 mgmt_pending_remove(cmd
);
3621 hci_dev_unlock(hdev
);
3625 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3628 struct mgmt_addr_info
*addr
= data
;
3629 struct mgmt_pending_cmd
*cmd
;
3630 struct hci_conn
*conn
;
3633 bt_dev_dbg(hdev
, "sock %p", sk
);
3637 if (!hdev_is_powered(hdev
)) {
3638 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3639 MGMT_STATUS_NOT_POWERED
);
3643 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3645 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3646 MGMT_STATUS_INVALID_PARAMS
);
3650 conn
= cmd
->user_data
;
3652 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3653 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3654 MGMT_STATUS_INVALID_PARAMS
);
3658 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3659 mgmt_pending_remove(cmd
);
3661 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3662 addr
, sizeof(*addr
));
3664 /* Since user doesn't want to proceed with the connection, abort any
3665 * ongoing pairing and then terminate the link if it was created
3666 * because of the pair device action.
3668 if (addr
->type
== BDADDR_BREDR
)
3669 hci_remove_link_key(hdev
, &addr
->bdaddr
);
3671 smp_cancel_and_remove_pairing(hdev
, &addr
->bdaddr
,
3672 le_addr_type(addr
->type
));
3674 if (conn
->conn_reason
== CONN_REASON_PAIR_DEVICE
)
3675 hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3678 hci_dev_unlock(hdev
);
3682 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3683 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3684 u16 hci_op
, __le32 passkey
)
3686 struct mgmt_pending_cmd
*cmd
;
3687 struct hci_conn
*conn
;
3692 if (!hdev_is_powered(hdev
)) {
3693 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3694 MGMT_STATUS_NOT_POWERED
, addr
,
3699 if (addr
->type
== BDADDR_BREDR
)
3700 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3702 conn
= hci_conn_hash_lookup_le(hdev
, &addr
->bdaddr
,
3703 le_addr_type(addr
->type
));
3706 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3707 MGMT_STATUS_NOT_CONNECTED
, addr
,
3712 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3713 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3715 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3716 MGMT_STATUS_SUCCESS
, addr
,
3719 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3720 MGMT_STATUS_FAILED
, addr
,
3726 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3732 cmd
->cmd_complete
= addr_cmd_complete
;
3734 /* Continue with pairing via HCI */
3735 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3736 struct hci_cp_user_passkey_reply cp
;
3738 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3739 cp
.passkey
= passkey
;
3740 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3742 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3746 mgmt_pending_remove(cmd
);
3749 hci_dev_unlock(hdev
);
3753 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3754 void *data
, u16 len
)
3756 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3758 bt_dev_dbg(hdev
, "sock %p", sk
);
3760 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3761 MGMT_OP_PIN_CODE_NEG_REPLY
,
3762 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3765 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3768 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3770 bt_dev_dbg(hdev
, "sock %p", sk
);
3772 if (len
!= sizeof(*cp
))
3773 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3774 MGMT_STATUS_INVALID_PARAMS
);
3776 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3777 MGMT_OP_USER_CONFIRM_REPLY
,
3778 HCI_OP_USER_CONFIRM_REPLY
, 0);
3781 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3782 void *data
, u16 len
)
3784 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3786 bt_dev_dbg(hdev
, "sock %p", sk
);
3788 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3789 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3790 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3793 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3796 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3798 bt_dev_dbg(hdev
, "sock %p", sk
);
3800 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3801 MGMT_OP_USER_PASSKEY_REPLY
,
3802 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3805 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3806 void *data
, u16 len
)
3808 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3810 bt_dev_dbg(hdev
, "sock %p", sk
);
3812 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3813 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3814 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3817 static int adv_expire_sync(struct hci_dev
*hdev
, u32 flags
)
3819 struct adv_info
*adv_instance
;
3821 adv_instance
= hci_find_adv_instance(hdev
, hdev
->cur_adv_instance
);
3825 /* stop if current instance doesn't need to be changed */
3826 if (!(adv_instance
->flags
& flags
))
3829 cancel_adv_timeout(hdev
);
3831 adv_instance
= hci_get_next_instance(hdev
, adv_instance
->instance
);
3835 hci_schedule_adv_instance_sync(hdev
, adv_instance
->instance
, true);
3840 static int name_changed_sync(struct hci_dev
*hdev
, void *data
)
3842 return adv_expire_sync(hdev
, MGMT_ADV_FLAG_LOCAL_NAME
);
3845 static void set_name_complete(struct hci_dev
*hdev
, void *data
, int err
)
3847 struct mgmt_pending_cmd
*cmd
= data
;
3848 struct mgmt_cp_set_local_name
*cp
= cmd
->param
;
3849 u8 status
= mgmt_status(err
);
3851 bt_dev_dbg(hdev
, "err %d", err
);
3853 if (err
== -ECANCELED
||
3854 cmd
!= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
))
3858 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3861 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3864 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3865 hci_cmd_sync_queue(hdev
, name_changed_sync
, NULL
, NULL
);
3868 mgmt_pending_remove(cmd
);
3871 static int set_name_sync(struct hci_dev
*hdev
, void *data
)
3873 if (lmp_bredr_capable(hdev
)) {
3874 hci_update_name_sync(hdev
);
3875 hci_update_eir_sync(hdev
);
3878 /* The name is stored in the scan response data and so
3879 * no need to update the advertising data here.
3881 if (lmp_le_capable(hdev
) && hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
3882 hci_update_scan_rsp_data_sync(hdev
, hdev
->cur_adv_instance
);
3887 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3890 struct mgmt_cp_set_local_name
*cp
= data
;
3891 struct mgmt_pending_cmd
*cmd
;
3894 bt_dev_dbg(hdev
, "sock %p", sk
);
3898 /* If the old values are the same as the new ones just return a
3899 * direct command complete event.
3901 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3902 !memcmp(hdev
->short_name
, cp
->short_name
,
3903 sizeof(hdev
->short_name
))) {
3904 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3909 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3911 if (!hdev_is_powered(hdev
)) {
3912 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3914 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3919 err
= mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
,
3920 len
, HCI_MGMT_LOCAL_NAME_EVENTS
, sk
);
3921 ext_info_changed(hdev
, sk
);
3926 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3930 err
= hci_cmd_sync_queue(hdev
, set_name_sync
, cmd
,
3934 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3935 MGMT_STATUS_FAILED
);
3938 mgmt_pending_remove(cmd
);
3943 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3946 hci_dev_unlock(hdev
);
3950 static int appearance_changed_sync(struct hci_dev
*hdev
, void *data
)
3952 return adv_expire_sync(hdev
, MGMT_ADV_FLAG_APPEARANCE
);
3955 static int set_appearance(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3958 struct mgmt_cp_set_appearance
*cp
= data
;
3962 bt_dev_dbg(hdev
, "sock %p", sk
);
3964 if (!lmp_le_capable(hdev
))
3965 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_APPEARANCE
,
3966 MGMT_STATUS_NOT_SUPPORTED
);
3968 appearance
= le16_to_cpu(cp
->appearance
);
3972 if (hdev
->appearance
!= appearance
) {
3973 hdev
->appearance
= appearance
;
3975 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3976 hci_cmd_sync_queue(hdev
, appearance_changed_sync
, NULL
,
3979 ext_info_changed(hdev
, sk
);
3982 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_APPEARANCE
, 0, NULL
,
3985 hci_dev_unlock(hdev
);
3990 static int get_phy_configuration(struct sock
*sk
, struct hci_dev
*hdev
,
3991 void *data
, u16 len
)
3993 struct mgmt_rp_get_phy_configuration rp
;
3995 bt_dev_dbg(hdev
, "sock %p", sk
);
3999 memset(&rp
, 0, sizeof(rp
));
4001 rp
.supported_phys
= cpu_to_le32(get_supported_phys(hdev
));
4002 rp
.selected_phys
= cpu_to_le32(get_selected_phys(hdev
));
4003 rp
.configurable_phys
= cpu_to_le32(get_configurable_phys(hdev
));
4005 hci_dev_unlock(hdev
);
4007 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_PHY_CONFIGURATION
, 0,
4011 int mgmt_phy_configuration_changed(struct hci_dev
*hdev
, struct sock
*skip
)
4013 struct mgmt_ev_phy_configuration_changed ev
;
4015 memset(&ev
, 0, sizeof(ev
));
4017 ev
.selected_phys
= cpu_to_le32(get_selected_phys(hdev
));
4019 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED
, hdev
, &ev
,
4023 static void set_default_phy_complete(struct hci_dev
*hdev
, void *data
, int err
)
4025 struct mgmt_pending_cmd
*cmd
= data
;
4026 struct sk_buff
*skb
= cmd
->skb
;
4027 u8 status
= mgmt_status(err
);
4029 if (err
== -ECANCELED
||
4030 cmd
!= pending_find(MGMT_OP_SET_PHY_CONFIGURATION
, hdev
))
4035 status
= MGMT_STATUS_FAILED
;
4036 else if (IS_ERR(skb
))
4037 status
= mgmt_status(PTR_ERR(skb
));
4039 status
= mgmt_status(skb
->data
[0]);
4042 bt_dev_dbg(hdev
, "status %d", status
);
4045 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
4046 MGMT_OP_SET_PHY_CONFIGURATION
, status
);
4048 mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
4049 MGMT_OP_SET_PHY_CONFIGURATION
, 0,
4052 mgmt_phy_configuration_changed(hdev
, cmd
->sk
);
4055 if (skb
&& !IS_ERR(skb
))
4058 mgmt_pending_remove(cmd
);
4061 static int set_default_phy_sync(struct hci_dev
*hdev
, void *data
)
4063 struct mgmt_pending_cmd
*cmd
= data
;
4064 struct mgmt_cp_set_phy_configuration
*cp
= cmd
->param
;
4065 struct hci_cp_le_set_default_phy cp_phy
;
4066 u32 selected_phys
= __le32_to_cpu(cp
->selected_phys
);
4068 memset(&cp_phy
, 0, sizeof(cp_phy
));
4070 if (!(selected_phys
& MGMT_PHY_LE_TX_MASK
))
4071 cp_phy
.all_phys
|= 0x01;
4073 if (!(selected_phys
& MGMT_PHY_LE_RX_MASK
))
4074 cp_phy
.all_phys
|= 0x02;
4076 if (selected_phys
& MGMT_PHY_LE_1M_TX
)
4077 cp_phy
.tx_phys
|= HCI_LE_SET_PHY_1M
;
4079 if (selected_phys
& MGMT_PHY_LE_2M_TX
)
4080 cp_phy
.tx_phys
|= HCI_LE_SET_PHY_2M
;
4082 if (selected_phys
& MGMT_PHY_LE_CODED_TX
)
4083 cp_phy
.tx_phys
|= HCI_LE_SET_PHY_CODED
;
4085 if (selected_phys
& MGMT_PHY_LE_1M_RX
)
4086 cp_phy
.rx_phys
|= HCI_LE_SET_PHY_1M
;
4088 if (selected_phys
& MGMT_PHY_LE_2M_RX
)
4089 cp_phy
.rx_phys
|= HCI_LE_SET_PHY_2M
;
4091 if (selected_phys
& MGMT_PHY_LE_CODED_RX
)
4092 cp_phy
.rx_phys
|= HCI_LE_SET_PHY_CODED
;
4094 cmd
->skb
= __hci_cmd_sync(hdev
, HCI_OP_LE_SET_DEFAULT_PHY
,
4095 sizeof(cp_phy
), &cp_phy
, HCI_CMD_TIMEOUT
);
4100 static int set_phy_configuration(struct sock
*sk
, struct hci_dev
*hdev
,
4101 void *data
, u16 len
)
4103 struct mgmt_cp_set_phy_configuration
*cp
= data
;
4104 struct mgmt_pending_cmd
*cmd
;
4105 u32 selected_phys
, configurable_phys
, supported_phys
, unconfigure_phys
;
4106 u16 pkt_type
= (HCI_DH1
| HCI_DM1
);
4107 bool changed
= false;
4110 bt_dev_dbg(hdev
, "sock %p", sk
);
4112 configurable_phys
= get_configurable_phys(hdev
);
4113 supported_phys
= get_supported_phys(hdev
);
4114 selected_phys
= __le32_to_cpu(cp
->selected_phys
);
4116 if (selected_phys
& ~supported_phys
)
4117 return mgmt_cmd_status(sk
, hdev
->id
,
4118 MGMT_OP_SET_PHY_CONFIGURATION
,
4119 MGMT_STATUS_INVALID_PARAMS
);
4121 unconfigure_phys
= supported_phys
& ~configurable_phys
;
4123 if ((selected_phys
& unconfigure_phys
) != unconfigure_phys
)
4124 return mgmt_cmd_status(sk
, hdev
->id
,
4125 MGMT_OP_SET_PHY_CONFIGURATION
,
4126 MGMT_STATUS_INVALID_PARAMS
);
4128 if (selected_phys
== get_selected_phys(hdev
))
4129 return mgmt_cmd_complete(sk
, hdev
->id
,
4130 MGMT_OP_SET_PHY_CONFIGURATION
,
4135 if (!hdev_is_powered(hdev
)) {
4136 err
= mgmt_cmd_status(sk
, hdev
->id
,
4137 MGMT_OP_SET_PHY_CONFIGURATION
,
4138 MGMT_STATUS_REJECTED
);
4142 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION
, hdev
)) {
4143 err
= mgmt_cmd_status(sk
, hdev
->id
,
4144 MGMT_OP_SET_PHY_CONFIGURATION
,
4149 if (selected_phys
& MGMT_PHY_BR_1M_3SLOT
)
4150 pkt_type
|= (HCI_DH3
| HCI_DM3
);
4152 pkt_type
&= ~(HCI_DH3
| HCI_DM3
);
4154 if (selected_phys
& MGMT_PHY_BR_1M_5SLOT
)
4155 pkt_type
|= (HCI_DH5
| HCI_DM5
);
4157 pkt_type
&= ~(HCI_DH5
| HCI_DM5
);
4159 if (selected_phys
& MGMT_PHY_EDR_2M_1SLOT
)
4160 pkt_type
&= ~HCI_2DH1
;
4162 pkt_type
|= HCI_2DH1
;
4164 if (selected_phys
& MGMT_PHY_EDR_2M_3SLOT
)
4165 pkt_type
&= ~HCI_2DH3
;
4167 pkt_type
|= HCI_2DH3
;
4169 if (selected_phys
& MGMT_PHY_EDR_2M_5SLOT
)
4170 pkt_type
&= ~HCI_2DH5
;
4172 pkt_type
|= HCI_2DH5
;
4174 if (selected_phys
& MGMT_PHY_EDR_3M_1SLOT
)
4175 pkt_type
&= ~HCI_3DH1
;
4177 pkt_type
|= HCI_3DH1
;
4179 if (selected_phys
& MGMT_PHY_EDR_3M_3SLOT
)
4180 pkt_type
&= ~HCI_3DH3
;
4182 pkt_type
|= HCI_3DH3
;
4184 if (selected_phys
& MGMT_PHY_EDR_3M_5SLOT
)
4185 pkt_type
&= ~HCI_3DH5
;
4187 pkt_type
|= HCI_3DH5
;
4189 if (pkt_type
!= hdev
->pkt_type
) {
4190 hdev
->pkt_type
= pkt_type
;
4194 if ((selected_phys
& MGMT_PHY_LE_MASK
) ==
4195 (get_selected_phys(hdev
) & MGMT_PHY_LE_MASK
)) {
4197 mgmt_phy_configuration_changed(hdev
, sk
);
4199 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4200 MGMT_OP_SET_PHY_CONFIGURATION
,
4206 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_PHY_CONFIGURATION
, hdev
, data
,
4211 err
= hci_cmd_sync_queue(hdev
, set_default_phy_sync
, cmd
,
4212 set_default_phy_complete
);
4215 err
= mgmt_cmd_status(sk
, hdev
->id
,
4216 MGMT_OP_SET_PHY_CONFIGURATION
,
4217 MGMT_STATUS_FAILED
);
4220 mgmt_pending_remove(cmd
);
4224 hci_dev_unlock(hdev
);
4229 static int set_blocked_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4232 int err
= MGMT_STATUS_SUCCESS
;
4233 struct mgmt_cp_set_blocked_keys
*keys
= data
;
4234 const u16 max_key_count
= ((U16_MAX
- sizeof(*keys
)) /
4235 sizeof(struct mgmt_blocked_key_info
));
4236 u16 key_count
, expected_len
;
4239 bt_dev_dbg(hdev
, "sock %p", sk
);
4241 key_count
= __le16_to_cpu(keys
->key_count
);
4242 if (key_count
> max_key_count
) {
4243 bt_dev_err(hdev
, "too big key_count value %u", key_count
);
4244 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BLOCKED_KEYS
,
4245 MGMT_STATUS_INVALID_PARAMS
);
4248 expected_len
= struct_size(keys
, keys
, key_count
);
4249 if (expected_len
!= len
) {
4250 bt_dev_err(hdev
, "expected %u bytes, got %u bytes",
4252 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BLOCKED_KEYS
,
4253 MGMT_STATUS_INVALID_PARAMS
);
4258 hci_blocked_keys_clear(hdev
);
4260 for (i
= 0; i
< key_count
; ++i
) {
4261 struct blocked_key
*b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
4264 err
= MGMT_STATUS_NO_RESOURCES
;
4268 b
->type
= keys
->keys
[i
].type
;
4269 memcpy(b
->val
, keys
->keys
[i
].val
, sizeof(b
->val
));
4270 list_add_rcu(&b
->list
, &hdev
->blocked_keys
);
4272 hci_dev_unlock(hdev
);
4274 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_BLOCKED_KEYS
,
4278 static int set_wideband_speech(struct sock
*sk
, struct hci_dev
*hdev
,
4279 void *data
, u16 len
)
4281 struct mgmt_mode
*cp
= data
;
4283 bool changed
= false;
4285 bt_dev_dbg(hdev
, "sock %p", sk
);
4287 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED
, &hdev
->quirks
))
4288 return mgmt_cmd_status(sk
, hdev
->id
,
4289 MGMT_OP_SET_WIDEBAND_SPEECH
,
4290 MGMT_STATUS_NOT_SUPPORTED
);
4292 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4293 return mgmt_cmd_status(sk
, hdev
->id
,
4294 MGMT_OP_SET_WIDEBAND_SPEECH
,
4295 MGMT_STATUS_INVALID_PARAMS
);
4299 if (hdev_is_powered(hdev
) &&
4300 !!cp
->val
!= hci_dev_test_flag(hdev
,
4301 HCI_WIDEBAND_SPEECH_ENABLED
)) {
4302 err
= mgmt_cmd_status(sk
, hdev
->id
,
4303 MGMT_OP_SET_WIDEBAND_SPEECH
,
4304 MGMT_STATUS_REJECTED
);
4309 changed
= !hci_dev_test_and_set_flag(hdev
,
4310 HCI_WIDEBAND_SPEECH_ENABLED
);
4312 changed
= hci_dev_test_and_clear_flag(hdev
,
4313 HCI_WIDEBAND_SPEECH_ENABLED
);
4315 err
= send_settings_rsp(sk
, MGMT_OP_SET_WIDEBAND_SPEECH
, hdev
);
4320 err
= new_settings(hdev
, sk
);
4323 hci_dev_unlock(hdev
);
4327 static int read_controller_cap(struct sock
*sk
, struct hci_dev
*hdev
,
4328 void *data
, u16 data_len
)
4331 struct mgmt_rp_read_controller_cap
*rp
= (void *)buf
;
4334 u8 tx_power_range
[2];
4336 bt_dev_dbg(hdev
, "sock %p", sk
);
4338 memset(&buf
, 0, sizeof(buf
));
4342 /* When the Read Simple Pairing Options command is supported, then
4343 * the remote public key validation is supported.
4345 * Alternatively, when Microsoft extensions are available, they can
4346 * indicate support for public key validation as well.
4348 if ((hdev
->commands
[41] & 0x08) || msft_curve_validity(hdev
))
4349 flags
|= 0x01; /* Remote public key validation (BR/EDR) */
4351 flags
|= 0x02; /* Remote public key validation (LE) */
4353 /* When the Read Encryption Key Size command is supported, then the
4354 * encryption key size is enforced.
4356 if (hdev
->commands
[20] & 0x10)
4357 flags
|= 0x04; /* Encryption key size enforcement (BR/EDR) */
4359 flags
|= 0x08; /* Encryption key size enforcement (LE) */
4361 cap_len
= eir_append_data(rp
->cap
, cap_len
, MGMT_CAP_SEC_FLAGS
,
4364 /* When the Read Simple Pairing Options command is supported, then
4365 * also max encryption key size information is provided.
4367 if (hdev
->commands
[41] & 0x08)
4368 cap_len
= eir_append_le16(rp
->cap
, cap_len
,
4369 MGMT_CAP_MAX_ENC_KEY_SIZE
,
4370 hdev
->max_enc_key_size
);
4372 cap_len
= eir_append_le16(rp
->cap
, cap_len
,
4373 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE
,
4374 SMP_MAX_ENC_KEY_SIZE
);
4376 /* Append the min/max LE tx power parameters if we were able to fetch
4377 * it from the controller
4379 if (hdev
->commands
[38] & 0x80) {
4380 memcpy(&tx_power_range
[0], &hdev
->min_le_tx_power
, 1);
4381 memcpy(&tx_power_range
[1], &hdev
->max_le_tx_power
, 1);
4382 cap_len
= eir_append_data(rp
->cap
, cap_len
, MGMT_CAP_LE_TX_PWR
,
4386 rp
->cap_len
= cpu_to_le16(cap_len
);
4388 hci_dev_unlock(hdev
);
4390 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONTROLLER_CAP
, 0,
4391 rp
, sizeof(*rp
) + cap_len
);
4394 #ifdef CONFIG_BT_FEATURE_DEBUG
4395 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4396 static const u8 debug_uuid
[16] = {
4397 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4398 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4402 /* 330859bc-7506-492d-9370-9a6f0614037f */
4403 static const u8 quality_report_uuid
[16] = {
4404 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4405 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4408 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4409 static const u8 offload_codecs_uuid
[16] = {
4410 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4411 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4414 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4415 static const u8 le_simultaneous_roles_uuid
[16] = {
4416 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4417 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4420 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4421 static const u8 rpa_resolution_uuid
[16] = {
4422 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4423 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4426 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4427 static const u8 iso_socket_uuid
[16] = {
4428 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4429 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4432 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4433 static const u8 mgmt_mesh_uuid
[16] = {
4434 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4435 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4438 static int read_exp_features_info(struct sock
*sk
, struct hci_dev
*hdev
,
4439 void *data
, u16 data_len
)
4441 struct mgmt_rp_read_exp_features_info
*rp
;
4447 bt_dev_dbg(hdev
, "sock %p", sk
);
4449 /* Enough space for 7 features */
4450 len
= sizeof(*rp
) + (sizeof(rp
->features
[0]) * 7);
4451 rp
= kzalloc(len
, GFP_KERNEL
);
4455 #ifdef CONFIG_BT_FEATURE_DEBUG
4457 flags
= bt_dbg_get() ? BIT(0) : 0;
4459 memcpy(rp
->features
[idx
].uuid
, debug_uuid
, 16);
4460 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4465 if (hdev
&& hci_dev_le_state_simultaneous(hdev
)) {
4466 if (hci_dev_test_flag(hdev
, HCI_LE_SIMULTANEOUS_ROLES
))
4471 memcpy(rp
->features
[idx
].uuid
, le_simultaneous_roles_uuid
, 16);
4472 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4476 if (hdev
&& ll_privacy_capable(hdev
)) {
4477 if (hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
4478 flags
= BIT(0) | BIT(1);
4482 memcpy(rp
->features
[idx
].uuid
, rpa_resolution_uuid
, 16);
4483 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4487 if (hdev
&& (aosp_has_quality_report(hdev
) ||
4488 hdev
->set_quality_report
)) {
4489 if (hci_dev_test_flag(hdev
, HCI_QUALITY_REPORT
))
4494 memcpy(rp
->features
[idx
].uuid
, quality_report_uuid
, 16);
4495 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4499 if (hdev
&& hdev
->get_data_path_id
) {
4500 if (hci_dev_test_flag(hdev
, HCI_OFFLOAD_CODECS_ENABLED
))
4505 memcpy(rp
->features
[idx
].uuid
, offload_codecs_uuid
, 16);
4506 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4510 if (IS_ENABLED(CONFIG_BT_LE
)) {
4511 flags
= iso_enabled() ? BIT(0) : 0;
4512 memcpy(rp
->features
[idx
].uuid
, iso_socket_uuid
, 16);
4513 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4517 if (hdev
&& lmp_le_capable(hdev
)) {
4518 if (hci_dev_test_flag(hdev
, HCI_MESH_EXPERIMENTAL
))
4523 memcpy(rp
->features
[idx
].uuid
, mgmt_mesh_uuid
, 16);
4524 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
4528 rp
->feature_count
= cpu_to_le16(idx
);
4530 /* After reading the experimental features information, enable
4531 * the events to update client on any future change.
4533 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4535 status
= mgmt_cmd_complete(sk
, hdev
? hdev
->id
: MGMT_INDEX_NONE
,
4536 MGMT_OP_READ_EXP_FEATURES_INFO
,
4537 0, rp
, sizeof(*rp
) + (20 * idx
));
4543 static int exp_ll_privacy_feature_changed(bool enabled
, struct hci_dev
*hdev
,
4546 struct mgmt_ev_exp_feature_changed ev
;
4548 memset(&ev
, 0, sizeof(ev
));
4549 memcpy(ev
.uuid
, rpa_resolution_uuid
, 16);
4550 ev
.flags
= cpu_to_le32((enabled
? BIT(0) : 0) | BIT(1));
4552 // Do we need to be atomic with the conn_flags?
4553 if (enabled
&& privacy_mode_capable(hdev
))
4554 hdev
->conn_flags
|= HCI_CONN_FLAG_DEVICE_PRIVACY
;
4556 hdev
->conn_flags
&= ~HCI_CONN_FLAG_DEVICE_PRIVACY
;
4558 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED
, hdev
,
4560 HCI_MGMT_EXP_FEATURE_EVENTS
, skip
);
4564 static int exp_feature_changed(struct hci_dev
*hdev
, const u8
*uuid
,
4565 bool enabled
, struct sock
*skip
)
4567 struct mgmt_ev_exp_feature_changed ev
;
4569 memset(&ev
, 0, sizeof(ev
));
4570 memcpy(ev
.uuid
, uuid
, 16);
4571 ev
.flags
= cpu_to_le32(enabled
? BIT(0) : 0);
4573 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED
, hdev
,
4575 HCI_MGMT_EXP_FEATURE_EVENTS
, skip
);
4578 #define EXP_FEAT(_uuid, _set_func) \
4581 .set_func = _set_func, \
4584 /* The zero key uuid is special. Multiple exp features are set through it. */
4585 static int set_zero_key_func(struct sock
*sk
, struct hci_dev
*hdev
,
4586 struct mgmt_cp_set_exp_feature
*cp
, u16 data_len
)
4588 struct mgmt_rp_set_exp_feature rp
;
4590 memset(rp
.uuid
, 0, 16);
4591 rp
.flags
= cpu_to_le32(0);
4593 #ifdef CONFIG_BT_FEATURE_DEBUG
4595 bool changed
= bt_dbg_get();
4600 exp_feature_changed(NULL
, ZERO_KEY
, false, sk
);
4604 if (hdev
&& use_ll_privacy(hdev
) && !hdev_is_powered(hdev
)) {
4607 changed
= hci_dev_test_and_clear_flag(hdev
,
4608 HCI_ENABLE_LL_PRIVACY
);
4610 exp_feature_changed(hdev
, rpa_resolution_uuid
, false,
4614 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4616 return mgmt_cmd_complete(sk
, hdev
? hdev
->id
: MGMT_INDEX_NONE
,
4617 MGMT_OP_SET_EXP_FEATURE
, 0,
4621 #ifdef CONFIG_BT_FEATURE_DEBUG
4622 static int set_debug_func(struct sock
*sk
, struct hci_dev
*hdev
,
4623 struct mgmt_cp_set_exp_feature
*cp
, u16 data_len
)
4625 struct mgmt_rp_set_exp_feature rp
;
4630 /* Command requires to use the non-controller index */
4632 return mgmt_cmd_status(sk
, hdev
->id
,
4633 MGMT_OP_SET_EXP_FEATURE
,
4634 MGMT_STATUS_INVALID_INDEX
);
4636 /* Parameters are limited to a single octet */
4637 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4638 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4639 MGMT_OP_SET_EXP_FEATURE
,
4640 MGMT_STATUS_INVALID_PARAMS
);
4642 /* Only boolean on/off is supported */
4643 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4644 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4645 MGMT_OP_SET_EXP_FEATURE
,
4646 MGMT_STATUS_INVALID_PARAMS
);
4648 val
= !!cp
->param
[0];
4649 changed
= val
? !bt_dbg_get() : bt_dbg_get();
4652 memcpy(rp
.uuid
, debug_uuid
, 16);
4653 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
4655 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4657 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
4658 MGMT_OP_SET_EXP_FEATURE
, 0,
4662 exp_feature_changed(hdev
, debug_uuid
, val
, sk
);
4668 static int set_mgmt_mesh_func(struct sock
*sk
, struct hci_dev
*hdev
,
4669 struct mgmt_cp_set_exp_feature
*cp
, u16 data_len
)
4671 struct mgmt_rp_set_exp_feature rp
;
4675 /* Command requires to use the controller index */
4677 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4678 MGMT_OP_SET_EXP_FEATURE
,
4679 MGMT_STATUS_INVALID_INDEX
);
4681 /* Parameters are limited to a single octet */
4682 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4683 return mgmt_cmd_status(sk
, hdev
->id
,
4684 MGMT_OP_SET_EXP_FEATURE
,
4685 MGMT_STATUS_INVALID_PARAMS
);
4687 /* Only boolean on/off is supported */
4688 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4689 return mgmt_cmd_status(sk
, hdev
->id
,
4690 MGMT_OP_SET_EXP_FEATURE
,
4691 MGMT_STATUS_INVALID_PARAMS
);
4693 val
= !!cp
->param
[0];
4696 changed
= !hci_dev_test_and_set_flag(hdev
,
4697 HCI_MESH_EXPERIMENTAL
);
4699 hci_dev_clear_flag(hdev
, HCI_MESH
);
4700 changed
= hci_dev_test_and_clear_flag(hdev
,
4701 HCI_MESH_EXPERIMENTAL
);
4704 memcpy(rp
.uuid
, mgmt_mesh_uuid
, 16);
4705 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
4707 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4709 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4710 MGMT_OP_SET_EXP_FEATURE
, 0,
4714 exp_feature_changed(hdev
, mgmt_mesh_uuid
, val
, sk
);
4719 static int set_rpa_resolution_func(struct sock
*sk
, struct hci_dev
*hdev
,
4720 struct mgmt_cp_set_exp_feature
*cp
,
4723 struct mgmt_rp_set_exp_feature rp
;
4728 /* Command requires to use the controller index */
4730 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4731 MGMT_OP_SET_EXP_FEATURE
,
4732 MGMT_STATUS_INVALID_INDEX
);
4734 /* Changes can only be made when controller is powered down */
4735 if (hdev_is_powered(hdev
))
4736 return mgmt_cmd_status(sk
, hdev
->id
,
4737 MGMT_OP_SET_EXP_FEATURE
,
4738 MGMT_STATUS_REJECTED
);
4740 /* Parameters are limited to a single octet */
4741 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4742 return mgmt_cmd_status(sk
, hdev
->id
,
4743 MGMT_OP_SET_EXP_FEATURE
,
4744 MGMT_STATUS_INVALID_PARAMS
);
4746 /* Only boolean on/off is supported */
4747 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4748 return mgmt_cmd_status(sk
, hdev
->id
,
4749 MGMT_OP_SET_EXP_FEATURE
,
4750 MGMT_STATUS_INVALID_PARAMS
);
4752 val
= !!cp
->param
[0];
4755 changed
= !hci_dev_test_and_set_flag(hdev
,
4756 HCI_ENABLE_LL_PRIVACY
);
4757 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
4759 /* Enable LL privacy + supported settings changed */
4760 flags
= BIT(0) | BIT(1);
4762 changed
= hci_dev_test_and_clear_flag(hdev
,
4763 HCI_ENABLE_LL_PRIVACY
);
4765 /* Disable LL privacy + supported settings changed */
4769 memcpy(rp
.uuid
, rpa_resolution_uuid
, 16);
4770 rp
.flags
= cpu_to_le32(flags
);
4772 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4774 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4775 MGMT_OP_SET_EXP_FEATURE
, 0,
4779 exp_ll_privacy_feature_changed(val
, hdev
, sk
);
4784 static int set_quality_report_func(struct sock
*sk
, struct hci_dev
*hdev
,
4785 struct mgmt_cp_set_exp_feature
*cp
,
4788 struct mgmt_rp_set_exp_feature rp
;
4792 /* Command requires to use a valid controller index */
4794 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4795 MGMT_OP_SET_EXP_FEATURE
,
4796 MGMT_STATUS_INVALID_INDEX
);
4798 /* Parameters are limited to a single octet */
4799 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4800 return mgmt_cmd_status(sk
, hdev
->id
,
4801 MGMT_OP_SET_EXP_FEATURE
,
4802 MGMT_STATUS_INVALID_PARAMS
);
4804 /* Only boolean on/off is supported */
4805 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4806 return mgmt_cmd_status(sk
, hdev
->id
,
4807 MGMT_OP_SET_EXP_FEATURE
,
4808 MGMT_STATUS_INVALID_PARAMS
);
4810 hci_req_sync_lock(hdev
);
4812 val
= !!cp
->param
[0];
4813 changed
= (val
!= hci_dev_test_flag(hdev
, HCI_QUALITY_REPORT
));
4815 if (!aosp_has_quality_report(hdev
) && !hdev
->set_quality_report
) {
4816 err
= mgmt_cmd_status(sk
, hdev
->id
,
4817 MGMT_OP_SET_EXP_FEATURE
,
4818 MGMT_STATUS_NOT_SUPPORTED
);
4819 goto unlock_quality_report
;
4823 if (hdev
->set_quality_report
)
4824 err
= hdev
->set_quality_report(hdev
, val
);
4826 err
= aosp_set_quality_report(hdev
, val
);
4829 err
= mgmt_cmd_status(sk
, hdev
->id
,
4830 MGMT_OP_SET_EXP_FEATURE
,
4831 MGMT_STATUS_FAILED
);
4832 goto unlock_quality_report
;
4836 hci_dev_set_flag(hdev
, HCI_QUALITY_REPORT
);
4838 hci_dev_clear_flag(hdev
, HCI_QUALITY_REPORT
);
4841 bt_dev_dbg(hdev
, "quality report enable %d changed %d", val
, changed
);
4843 memcpy(rp
.uuid
, quality_report_uuid
, 16);
4844 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
4845 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4847 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_EXP_FEATURE
, 0,
4851 exp_feature_changed(hdev
, quality_report_uuid
, val
, sk
);
4853 unlock_quality_report
:
4854 hci_req_sync_unlock(hdev
);
4858 static int set_offload_codec_func(struct sock
*sk
, struct hci_dev
*hdev
,
4859 struct mgmt_cp_set_exp_feature
*cp
,
4864 struct mgmt_rp_set_exp_feature rp
;
4866 /* Command requires to use a valid controller index */
4868 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4869 MGMT_OP_SET_EXP_FEATURE
,
4870 MGMT_STATUS_INVALID_INDEX
);
4872 /* Parameters are limited to a single octet */
4873 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4874 return mgmt_cmd_status(sk
, hdev
->id
,
4875 MGMT_OP_SET_EXP_FEATURE
,
4876 MGMT_STATUS_INVALID_PARAMS
);
4878 /* Only boolean on/off is supported */
4879 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4880 return mgmt_cmd_status(sk
, hdev
->id
,
4881 MGMT_OP_SET_EXP_FEATURE
,
4882 MGMT_STATUS_INVALID_PARAMS
);
4884 val
= !!cp
->param
[0];
4885 changed
= (val
!= hci_dev_test_flag(hdev
, HCI_OFFLOAD_CODECS_ENABLED
));
4887 if (!hdev
->get_data_path_id
) {
4888 return mgmt_cmd_status(sk
, hdev
->id
,
4889 MGMT_OP_SET_EXP_FEATURE
,
4890 MGMT_STATUS_NOT_SUPPORTED
);
4895 hci_dev_set_flag(hdev
, HCI_OFFLOAD_CODECS_ENABLED
);
4897 hci_dev_clear_flag(hdev
, HCI_OFFLOAD_CODECS_ENABLED
);
4900 bt_dev_info(hdev
, "offload codecs enable %d changed %d",
4903 memcpy(rp
.uuid
, offload_codecs_uuid
, 16);
4904 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
4905 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4906 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4907 MGMT_OP_SET_EXP_FEATURE
, 0,
4911 exp_feature_changed(hdev
, offload_codecs_uuid
, val
, sk
);
4916 static int set_le_simultaneous_roles_func(struct sock
*sk
, struct hci_dev
*hdev
,
4917 struct mgmt_cp_set_exp_feature
*cp
,
4922 struct mgmt_rp_set_exp_feature rp
;
4924 /* Command requires to use a valid controller index */
4926 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4927 MGMT_OP_SET_EXP_FEATURE
,
4928 MGMT_STATUS_INVALID_INDEX
);
4930 /* Parameters are limited to a single octet */
4931 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4932 return mgmt_cmd_status(sk
, hdev
->id
,
4933 MGMT_OP_SET_EXP_FEATURE
,
4934 MGMT_STATUS_INVALID_PARAMS
);
4936 /* Only boolean on/off is supported */
4937 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4938 return mgmt_cmd_status(sk
, hdev
->id
,
4939 MGMT_OP_SET_EXP_FEATURE
,
4940 MGMT_STATUS_INVALID_PARAMS
);
4942 val
= !!cp
->param
[0];
4943 changed
= (val
!= hci_dev_test_flag(hdev
, HCI_LE_SIMULTANEOUS_ROLES
));
4945 if (!hci_dev_le_state_simultaneous(hdev
)) {
4946 return mgmt_cmd_status(sk
, hdev
->id
,
4947 MGMT_OP_SET_EXP_FEATURE
,
4948 MGMT_STATUS_NOT_SUPPORTED
);
4953 hci_dev_set_flag(hdev
, HCI_LE_SIMULTANEOUS_ROLES
);
4955 hci_dev_clear_flag(hdev
, HCI_LE_SIMULTANEOUS_ROLES
);
4958 bt_dev_info(hdev
, "LE simultaneous roles enable %d changed %d",
4961 memcpy(rp
.uuid
, le_simultaneous_roles_uuid
, 16);
4962 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
4963 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4964 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4965 MGMT_OP_SET_EXP_FEATURE
, 0,
4969 exp_feature_changed(hdev
, le_simultaneous_roles_uuid
, val
, sk
);
4975 static int set_iso_socket_func(struct sock
*sk
, struct hci_dev
*hdev
,
4976 struct mgmt_cp_set_exp_feature
*cp
, u16 data_len
)
4978 struct mgmt_rp_set_exp_feature rp
;
4979 bool val
, changed
= false;
4982 /* Command requires to use the non-controller index */
4984 return mgmt_cmd_status(sk
, hdev
->id
,
4985 MGMT_OP_SET_EXP_FEATURE
,
4986 MGMT_STATUS_INVALID_INDEX
);
4988 /* Parameters are limited to a single octet */
4989 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
4990 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4991 MGMT_OP_SET_EXP_FEATURE
,
4992 MGMT_STATUS_INVALID_PARAMS
);
4994 /* Only boolean on/off is supported */
4995 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
4996 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
4997 MGMT_OP_SET_EXP_FEATURE
,
4998 MGMT_STATUS_INVALID_PARAMS
);
5000 val
= cp
->param
[0] ? true : false;
5009 memcpy(rp
.uuid
, iso_socket_uuid
, 16);
5010 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
5012 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
5014 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
5015 MGMT_OP_SET_EXP_FEATURE
, 0,
5019 exp_feature_changed(hdev
, iso_socket_uuid
, val
, sk
);
5025 static const struct mgmt_exp_feature
{
5027 int (*set_func
)(struct sock
*sk
, struct hci_dev
*hdev
,
5028 struct mgmt_cp_set_exp_feature
*cp
, u16 data_len
);
5029 } exp_features
[] = {
5030 EXP_FEAT(ZERO_KEY
, set_zero_key_func
),
5031 #ifdef CONFIG_BT_FEATURE_DEBUG
5032 EXP_FEAT(debug_uuid
, set_debug_func
),
5034 EXP_FEAT(mgmt_mesh_uuid
, set_mgmt_mesh_func
),
5035 EXP_FEAT(rpa_resolution_uuid
, set_rpa_resolution_func
),
5036 EXP_FEAT(quality_report_uuid
, set_quality_report_func
),
5037 EXP_FEAT(offload_codecs_uuid
, set_offload_codec_func
),
5038 EXP_FEAT(le_simultaneous_roles_uuid
, set_le_simultaneous_roles_func
),
5040 EXP_FEAT(iso_socket_uuid
, set_iso_socket_func
),
5043 /* end with a null feature */
5044 EXP_FEAT(NULL
, NULL
)
5047 static int set_exp_feature(struct sock
*sk
, struct hci_dev
*hdev
,
5048 void *data
, u16 data_len
)
5050 struct mgmt_cp_set_exp_feature
*cp
= data
;
5053 bt_dev_dbg(hdev
, "sock %p", sk
);
5055 for (i
= 0; exp_features
[i
].uuid
; i
++) {
5056 if (!memcmp(cp
->uuid
, exp_features
[i
].uuid
, 16))
5057 return exp_features
[i
].set_func(sk
, hdev
, cp
, data_len
);
5060 return mgmt_cmd_status(sk
, hdev
? hdev
->id
: MGMT_INDEX_NONE
,
5061 MGMT_OP_SET_EXP_FEATURE
,
5062 MGMT_STATUS_NOT_SUPPORTED
);
5065 static u32
get_params_flags(struct hci_dev
*hdev
,
5066 struct hci_conn_params
*params
)
5068 u32 flags
= hdev
->conn_flags
;
5070 /* Devices using RPAs can only be programmed in the acceptlist if
5071 * LL Privacy has been enable otherwise they cannot mark
5072 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5074 if ((flags
& HCI_CONN_FLAG_REMOTE_WAKEUP
) && !use_ll_privacy(hdev
) &&
5075 hci_find_irk_by_addr(hdev
, ¶ms
->addr
, params
->addr_type
))
5076 flags
&= ~HCI_CONN_FLAG_REMOTE_WAKEUP
;
5081 static int get_device_flags(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5084 struct mgmt_cp_get_device_flags
*cp
= data
;
5085 struct mgmt_rp_get_device_flags rp
;
5086 struct bdaddr_list_with_flags
*br_params
;
5087 struct hci_conn_params
*params
;
5088 u32 supported_flags
;
5089 u32 current_flags
= 0;
5090 u8 status
= MGMT_STATUS_INVALID_PARAMS
;
5092 bt_dev_dbg(hdev
, "Get device flags %pMR (type 0x%x)\n",
5093 &cp
->addr
.bdaddr
, cp
->addr
.type
);
5097 supported_flags
= hdev
->conn_flags
;
5099 memset(&rp
, 0, sizeof(rp
));
5101 if (cp
->addr
.type
== BDADDR_BREDR
) {
5102 br_params
= hci_bdaddr_list_lookup_with_flags(&hdev
->accept_list
,
5108 current_flags
= br_params
->flags
;
5110 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5111 le_addr_type(cp
->addr
.type
));
5115 supported_flags
= get_params_flags(hdev
, params
);
5116 current_flags
= params
->flags
;
5119 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5120 rp
.addr
.type
= cp
->addr
.type
;
5121 rp
.supported_flags
= cpu_to_le32(supported_flags
);
5122 rp
.current_flags
= cpu_to_le32(current_flags
);
5124 status
= MGMT_STATUS_SUCCESS
;
5127 hci_dev_unlock(hdev
);
5129 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_DEVICE_FLAGS
, status
,
5133 static void device_flags_changed(struct sock
*sk
, struct hci_dev
*hdev
,
5134 bdaddr_t
*bdaddr
, u8 bdaddr_type
,
5135 u32 supported_flags
, u32 current_flags
)
5137 struct mgmt_ev_device_flags_changed ev
;
5139 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5140 ev
.addr
.type
= bdaddr_type
;
5141 ev
.supported_flags
= cpu_to_le32(supported_flags
);
5142 ev
.current_flags
= cpu_to_le32(current_flags
);
5144 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED
, hdev
, &ev
, sizeof(ev
), sk
);
5147 static int set_device_flags(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5150 struct mgmt_cp_set_device_flags
*cp
= data
;
5151 struct bdaddr_list_with_flags
*br_params
;
5152 struct hci_conn_params
*params
;
5153 u8 status
= MGMT_STATUS_INVALID_PARAMS
;
5154 u32 supported_flags
;
5155 u32 current_flags
= __le32_to_cpu(cp
->current_flags
);
5157 bt_dev_dbg(hdev
, "Set device flags %pMR (type 0x%x) = 0x%x",
5158 &cp
->addr
.bdaddr
, cp
->addr
.type
, current_flags
);
5160 // We should take hci_dev_lock() early, I think.. conn_flags can change
5161 supported_flags
= hdev
->conn_flags
;
5163 if ((supported_flags
| current_flags
) != supported_flags
) {
5164 bt_dev_warn(hdev
, "Bad flag given (0x%x) vs supported (0x%0x)",
5165 current_flags
, supported_flags
);
5171 if (cp
->addr
.type
== BDADDR_BREDR
) {
5172 br_params
= hci_bdaddr_list_lookup_with_flags(&hdev
->accept_list
,
5177 br_params
->flags
= current_flags
;
5178 status
= MGMT_STATUS_SUCCESS
;
5180 bt_dev_warn(hdev
, "No such BR/EDR device %pMR (0x%x)",
5181 &cp
->addr
.bdaddr
, cp
->addr
.type
);
5187 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5188 le_addr_type(cp
->addr
.type
));
5190 bt_dev_warn(hdev
, "No such LE device %pMR (0x%x)",
5191 &cp
->addr
.bdaddr
, le_addr_type(cp
->addr
.type
));
5195 supported_flags
= get_params_flags(hdev
, params
);
5197 if ((supported_flags
| current_flags
) != supported_flags
) {
5198 bt_dev_warn(hdev
, "Bad flag given (0x%x) vs supported (0x%0x)",
5199 current_flags
, supported_flags
);
5203 WRITE_ONCE(params
->flags
, current_flags
);
5204 status
= MGMT_STATUS_SUCCESS
;
5206 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5209 if (params
->flags
& HCI_CONN_FLAG_DEVICE_PRIVACY
)
5210 hci_update_passive_scan(hdev
);
5213 hci_dev_unlock(hdev
);
5216 if (status
== MGMT_STATUS_SUCCESS
)
5217 device_flags_changed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
,
5218 supported_flags
, current_flags
);
5220 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_FLAGS
, status
,
5221 &cp
->addr
, sizeof(cp
->addr
));
5224 static void mgmt_adv_monitor_added(struct sock
*sk
, struct hci_dev
*hdev
,
5227 struct mgmt_ev_adv_monitor_added ev
;
5229 ev
.monitor_handle
= cpu_to_le16(handle
);
5231 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5234 void mgmt_adv_monitor_removed(struct hci_dev
*hdev
, u16 handle
)
5236 struct mgmt_ev_adv_monitor_removed ev
;
5237 struct mgmt_pending_cmd
*cmd
;
5238 struct sock
*sk_skip
= NULL
;
5239 struct mgmt_cp_remove_adv_monitor
*cp
;
5241 cmd
= pending_find(MGMT_OP_REMOVE_ADV_MONITOR
, hdev
);
5245 if (cp
->monitor_handle
)
5249 ev
.monitor_handle
= cpu_to_le16(handle
);
5251 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED
, hdev
, &ev
, sizeof(ev
), sk_skip
);
5254 static int read_adv_mon_features(struct sock
*sk
, struct hci_dev
*hdev
,
5255 void *data
, u16 len
)
5257 struct adv_monitor
*monitor
= NULL
;
5258 struct mgmt_rp_read_adv_monitor_features
*rp
= NULL
;
5261 __u32 supported
= 0;
5263 __u16 num_handles
= 0;
5264 __u16 handles
[HCI_MAX_ADV_MONITOR_NUM_HANDLES
];
5266 BT_DBG("request for %s", hdev
->name
);
5270 if (msft_monitor_supported(hdev
))
5271 supported
|= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS
;
5273 idr_for_each_entry(&hdev
->adv_monitors_idr
, monitor
, handle
)
5274 handles
[num_handles
++] = monitor
->handle
;
5276 hci_dev_unlock(hdev
);
5278 rp_size
= sizeof(*rp
) + (num_handles
* sizeof(u16
));
5279 rp
= kmalloc(rp_size
, GFP_KERNEL
);
5283 /* All supported features are currently enabled */
5284 enabled
= supported
;
5286 rp
->supported_features
= cpu_to_le32(supported
);
5287 rp
->enabled_features
= cpu_to_le32(enabled
);
5288 rp
->max_num_handles
= cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES
);
5289 rp
->max_num_patterns
= HCI_MAX_ADV_MONITOR_NUM_PATTERNS
;
5290 rp
->num_handles
= cpu_to_le16(num_handles
);
5292 memcpy(&rp
->handles
, &handles
, (num_handles
* sizeof(u16
)));
5294 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5295 MGMT_OP_READ_ADV_MONITOR_FEATURES
,
5296 MGMT_STATUS_SUCCESS
, rp
, rp_size
);
5303 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev
*hdev
,
5304 void *data
, int status
)
5306 struct mgmt_rp_add_adv_patterns_monitor rp
;
5307 struct mgmt_pending_cmd
*cmd
= data
;
5308 struct adv_monitor
*monitor
= cmd
->user_data
;
5312 rp
.monitor_handle
= cpu_to_le16(monitor
->handle
);
5315 mgmt_adv_monitor_added(cmd
->sk
, hdev
, monitor
->handle
);
5316 hdev
->adv_monitors_cnt
++;
5317 if (monitor
->state
== ADV_MONITOR_STATE_NOT_REGISTERED
)
5318 monitor
->state
= ADV_MONITOR_STATE_REGISTERED
;
5319 hci_update_passive_scan(hdev
);
5322 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5323 mgmt_status(status
), &rp
, sizeof(rp
));
5324 mgmt_pending_remove(cmd
);
5326 hci_dev_unlock(hdev
);
5327 bt_dev_dbg(hdev
, "add monitor %d complete, status %d",
5328 rp
.monitor_handle
, status
);
5331 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev
*hdev
, void *data
)
5333 struct mgmt_pending_cmd
*cmd
= data
;
5334 struct adv_monitor
*monitor
= cmd
->user_data
;
5336 return hci_add_adv_monitor(hdev
, monitor
);
5339 static int __add_adv_patterns_monitor(struct sock
*sk
, struct hci_dev
*hdev
,
5340 struct adv_monitor
*m
, u8 status
,
5341 void *data
, u16 len
, u16 op
)
5343 struct mgmt_pending_cmd
*cmd
;
5351 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
5352 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR
, hdev
) ||
5353 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI
, hdev
) ||
5354 pending_find(MGMT_OP_REMOVE_ADV_MONITOR
, hdev
)) {
5355 status
= MGMT_STATUS_BUSY
;
5359 cmd
= mgmt_pending_add(sk
, op
, hdev
, data
, len
);
5361 status
= MGMT_STATUS_NO_RESOURCES
;
5366 err
= hci_cmd_sync_queue(hdev
, mgmt_add_adv_patterns_monitor_sync
, cmd
,
5367 mgmt_add_adv_patterns_monitor_complete
);
5370 status
= MGMT_STATUS_NO_RESOURCES
;
5372 status
= MGMT_STATUS_FAILED
;
5377 hci_dev_unlock(hdev
);
5382 hci_free_adv_monitor(hdev
, m
);
5383 hci_dev_unlock(hdev
);
5384 return mgmt_cmd_status(sk
, hdev
->id
, op
, status
);
5387 static void parse_adv_monitor_rssi(struct adv_monitor
*m
,
5388 struct mgmt_adv_rssi_thresholds
*rssi
)
5391 m
->rssi
.low_threshold
= rssi
->low_threshold
;
5392 m
->rssi
.low_threshold_timeout
=
5393 __le16_to_cpu(rssi
->low_threshold_timeout
);
5394 m
->rssi
.high_threshold
= rssi
->high_threshold
;
5395 m
->rssi
.high_threshold_timeout
=
5396 __le16_to_cpu(rssi
->high_threshold_timeout
);
5397 m
->rssi
.sampling_period
= rssi
->sampling_period
;
5399 /* Default values. These numbers are the least constricting
5400 * parameters for MSFT API to work, so it behaves as if there
5401 * are no rssi parameter to consider. May need to be changed
5402 * if other API are to be supported.
5404 m
->rssi
.low_threshold
= -127;
5405 m
->rssi
.low_threshold_timeout
= 60;
5406 m
->rssi
.high_threshold
= -127;
5407 m
->rssi
.high_threshold_timeout
= 0;
5408 m
->rssi
.sampling_period
= 0;
5412 static u8
parse_adv_monitor_pattern(struct adv_monitor
*m
, u8 pattern_count
,
5413 struct mgmt_adv_pattern
*patterns
)
5415 u8 offset
= 0, length
= 0;
5416 struct adv_pattern
*p
= NULL
;
5419 for (i
= 0; i
< pattern_count
; i
++) {
5420 offset
= patterns
[i
].offset
;
5421 length
= patterns
[i
].length
;
5422 if (offset
>= HCI_MAX_EXT_AD_LENGTH
||
5423 length
> HCI_MAX_EXT_AD_LENGTH
||
5424 (offset
+ length
) > HCI_MAX_EXT_AD_LENGTH
)
5425 return MGMT_STATUS_INVALID_PARAMS
;
5427 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
5429 return MGMT_STATUS_NO_RESOURCES
;
5431 p
->ad_type
= patterns
[i
].ad_type
;
5432 p
->offset
= patterns
[i
].offset
;
5433 p
->length
= patterns
[i
].length
;
5434 memcpy(p
->value
, patterns
[i
].value
, p
->length
);
5436 INIT_LIST_HEAD(&p
->list
);
5437 list_add(&p
->list
, &m
->patterns
);
5440 return MGMT_STATUS_SUCCESS
;
5443 static int add_adv_patterns_monitor(struct sock
*sk
, struct hci_dev
*hdev
,
5444 void *data
, u16 len
)
5446 struct mgmt_cp_add_adv_patterns_monitor
*cp
= data
;
5447 struct adv_monitor
*m
= NULL
;
5448 u8 status
= MGMT_STATUS_SUCCESS
;
5449 size_t expected_size
= sizeof(*cp
);
5451 BT_DBG("request for %s", hdev
->name
);
5453 if (len
<= sizeof(*cp
)) {
5454 status
= MGMT_STATUS_INVALID_PARAMS
;
5458 expected_size
+= cp
->pattern_count
* sizeof(struct mgmt_adv_pattern
);
5459 if (len
!= expected_size
) {
5460 status
= MGMT_STATUS_INVALID_PARAMS
;
5464 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5466 status
= MGMT_STATUS_NO_RESOURCES
;
5470 INIT_LIST_HEAD(&m
->patterns
);
5472 parse_adv_monitor_rssi(m
, NULL
);
5473 status
= parse_adv_monitor_pattern(m
, cp
->pattern_count
, cp
->patterns
);
5476 return __add_adv_patterns_monitor(sk
, hdev
, m
, status
, data
, len
,
5477 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
);
5480 static int add_adv_patterns_monitor_rssi(struct sock
*sk
, struct hci_dev
*hdev
,
5481 void *data
, u16 len
)
5483 struct mgmt_cp_add_adv_patterns_monitor_rssi
*cp
= data
;
5484 struct adv_monitor
*m
= NULL
;
5485 u8 status
= MGMT_STATUS_SUCCESS
;
5486 size_t expected_size
= sizeof(*cp
);
5488 BT_DBG("request for %s", hdev
->name
);
5490 if (len
<= sizeof(*cp
)) {
5491 status
= MGMT_STATUS_INVALID_PARAMS
;
5495 expected_size
+= cp
->pattern_count
* sizeof(struct mgmt_adv_pattern
);
5496 if (len
!= expected_size
) {
5497 status
= MGMT_STATUS_INVALID_PARAMS
;
5501 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
5503 status
= MGMT_STATUS_NO_RESOURCES
;
5507 INIT_LIST_HEAD(&m
->patterns
);
5509 parse_adv_monitor_rssi(m
, &cp
->rssi
);
5510 status
= parse_adv_monitor_pattern(m
, cp
->pattern_count
, cp
->patterns
);
5513 return __add_adv_patterns_monitor(sk
, hdev
, m
, status
, data
, len
,
5514 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI
);
5517 static void mgmt_remove_adv_monitor_complete(struct hci_dev
*hdev
,
5518 void *data
, int status
)
5520 struct mgmt_rp_remove_adv_monitor rp
;
5521 struct mgmt_pending_cmd
*cmd
= data
;
5522 struct mgmt_cp_remove_adv_monitor
*cp
= cmd
->param
;
5526 rp
.monitor_handle
= cp
->monitor_handle
;
5529 hci_update_passive_scan(hdev
);
5531 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5532 mgmt_status(status
), &rp
, sizeof(rp
));
5533 mgmt_pending_remove(cmd
);
5535 hci_dev_unlock(hdev
);
5536 bt_dev_dbg(hdev
, "remove monitor %d complete, status %d",
5537 rp
.monitor_handle
, status
);
5540 static int mgmt_remove_adv_monitor_sync(struct hci_dev
*hdev
, void *data
)
5542 struct mgmt_pending_cmd
*cmd
= data
;
5543 struct mgmt_cp_remove_adv_monitor
*cp
= cmd
->param
;
5544 u16 handle
= __le16_to_cpu(cp
->monitor_handle
);
5547 return hci_remove_all_adv_monitor(hdev
);
5549 return hci_remove_single_adv_monitor(hdev
, handle
);
5552 static int remove_adv_monitor(struct sock
*sk
, struct hci_dev
*hdev
,
5553 void *data
, u16 len
)
5555 struct mgmt_pending_cmd
*cmd
;
5560 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
5561 pending_find(MGMT_OP_REMOVE_ADV_MONITOR
, hdev
) ||
5562 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR
, hdev
) ||
5563 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI
, hdev
)) {
5564 status
= MGMT_STATUS_BUSY
;
5568 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADV_MONITOR
, hdev
, data
, len
);
5570 status
= MGMT_STATUS_NO_RESOURCES
;
5574 err
= hci_cmd_sync_submit(hdev
, mgmt_remove_adv_monitor_sync
, cmd
,
5575 mgmt_remove_adv_monitor_complete
);
5578 mgmt_pending_remove(cmd
);
5581 status
= MGMT_STATUS_NO_RESOURCES
;
5583 status
= MGMT_STATUS_FAILED
;
5588 hci_dev_unlock(hdev
);
5593 hci_dev_unlock(hdev
);
5594 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADV_MONITOR
,
5598 static void read_local_oob_data_complete(struct hci_dev
*hdev
, void *data
, int err
)
5600 struct mgmt_rp_read_local_oob_data mgmt_rp
;
5601 size_t rp_size
= sizeof(mgmt_rp
);
5602 struct mgmt_pending_cmd
*cmd
= data
;
5603 struct sk_buff
*skb
= cmd
->skb
;
5604 u8 status
= mgmt_status(err
);
5608 status
= MGMT_STATUS_FAILED
;
5609 else if (IS_ERR(skb
))
5610 status
= mgmt_status(PTR_ERR(skb
));
5612 status
= mgmt_status(skb
->data
[0]);
5615 bt_dev_dbg(hdev
, "status %d", status
);
5618 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
, status
);
5622 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
5624 if (!bredr_sc_enabled(hdev
)) {
5625 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
5627 if (skb
->len
< sizeof(*rp
)) {
5628 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
5629 MGMT_OP_READ_LOCAL_OOB_DATA
,
5630 MGMT_STATUS_FAILED
);
5634 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
5635 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
5637 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
5639 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
5641 if (skb
->len
< sizeof(*rp
)) {
5642 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
5643 MGMT_OP_READ_LOCAL_OOB_DATA
,
5644 MGMT_STATUS_FAILED
);
5648 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
5649 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
5651 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
5652 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
5655 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5656 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
5659 if (skb
&& !IS_ERR(skb
))
5662 mgmt_pending_free(cmd
);
5665 static int read_local_oob_data_sync(struct hci_dev
*hdev
, void *data
)
5667 struct mgmt_pending_cmd
*cmd
= data
;
5669 if (bredr_sc_enabled(hdev
))
5670 cmd
->skb
= hci_read_local_oob_data_sync(hdev
, true, cmd
->sk
);
5672 cmd
->skb
= hci_read_local_oob_data_sync(hdev
, false, cmd
->sk
);
5674 if (IS_ERR(cmd
->skb
))
5675 return PTR_ERR(cmd
->skb
);
5680 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
5681 void *data
, u16 data_len
)
5683 struct mgmt_pending_cmd
*cmd
;
5686 bt_dev_dbg(hdev
, "sock %p", sk
);
5690 if (!hdev_is_powered(hdev
)) {
5691 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5692 MGMT_STATUS_NOT_POWERED
);
5696 if (!lmp_ssp_capable(hdev
)) {
5697 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5698 MGMT_STATUS_NOT_SUPPORTED
);
5702 cmd
= mgmt_pending_new(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
5706 err
= hci_cmd_sync_queue(hdev
, read_local_oob_data_sync
, cmd
,
5707 read_local_oob_data_complete
);
5710 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5711 MGMT_STATUS_FAILED
);
5714 mgmt_pending_free(cmd
);
5718 hci_dev_unlock(hdev
);
5722 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
5723 void *data
, u16 len
)
5725 struct mgmt_addr_info
*addr
= data
;
5728 bt_dev_dbg(hdev
, "sock %p", sk
);
5730 if (!bdaddr_type_is_valid(addr
->type
))
5731 return mgmt_cmd_complete(sk
, hdev
->id
,
5732 MGMT_OP_ADD_REMOTE_OOB_DATA
,
5733 MGMT_STATUS_INVALID_PARAMS
,
5734 addr
, sizeof(*addr
));
5738 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
5739 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
5742 if (cp
->addr
.type
!= BDADDR_BREDR
) {
5743 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5744 MGMT_OP_ADD_REMOTE_OOB_DATA
,
5745 MGMT_STATUS_INVALID_PARAMS
,
5746 &cp
->addr
, sizeof(cp
->addr
));
5750 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
5751 cp
->addr
.type
, cp
->hash
,
5752 cp
->rand
, NULL
, NULL
);
5754 status
= MGMT_STATUS_FAILED
;
5756 status
= MGMT_STATUS_SUCCESS
;
5758 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5759 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
5760 &cp
->addr
, sizeof(cp
->addr
));
5761 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
5762 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
5763 u8
*rand192
, *hash192
, *rand256
, *hash256
;
5766 if (bdaddr_type_is_le(cp
->addr
.type
)) {
5767 /* Enforce zero-valued 192-bit parameters as
5768 * long as legacy SMP OOB isn't implemented.
5770 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
5771 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
5772 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5773 MGMT_OP_ADD_REMOTE_OOB_DATA
,
5774 MGMT_STATUS_INVALID_PARAMS
,
5775 addr
, sizeof(*addr
));
5782 /* In case one of the P-192 values is set to zero,
5783 * then just disable OOB data for P-192.
5785 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
5786 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
5790 rand192
= cp
->rand192
;
5791 hash192
= cp
->hash192
;
5795 /* In case one of the P-256 values is set to zero, then just
5796 * disable OOB data for P-256.
5798 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
5799 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
5803 rand256
= cp
->rand256
;
5804 hash256
= cp
->hash256
;
5807 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
5808 cp
->addr
.type
, hash192
, rand192
,
5811 status
= MGMT_STATUS_FAILED
;
5813 status
= MGMT_STATUS_SUCCESS
;
5815 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5816 MGMT_OP_ADD_REMOTE_OOB_DATA
,
5817 status
, &cp
->addr
, sizeof(cp
->addr
));
5819 bt_dev_err(hdev
, "add_remote_oob_data: invalid len of %u bytes",
5821 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
5822 MGMT_STATUS_INVALID_PARAMS
);
5826 hci_dev_unlock(hdev
);
5830 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
5831 void *data
, u16 len
)
5833 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
5837 bt_dev_dbg(hdev
, "sock %p", sk
);
5839 if (cp
->addr
.type
!= BDADDR_BREDR
)
5840 return mgmt_cmd_complete(sk
, hdev
->id
,
5841 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
5842 MGMT_STATUS_INVALID_PARAMS
,
5843 &cp
->addr
, sizeof(cp
->addr
));
5847 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5848 hci_remote_oob_data_clear(hdev
);
5849 status
= MGMT_STATUS_SUCCESS
;
5853 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5855 status
= MGMT_STATUS_INVALID_PARAMS
;
5857 status
= MGMT_STATUS_SUCCESS
;
5860 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
5861 status
, &cp
->addr
, sizeof(cp
->addr
));
5863 hci_dev_unlock(hdev
);
5867 void mgmt_start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
5869 struct mgmt_pending_cmd
*cmd
;
5871 bt_dev_dbg(hdev
, "status %u", status
);
5875 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
5877 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
5880 cmd
= pending_find(MGMT_OP_START_LIMITED_DISCOVERY
, hdev
);
5883 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5884 mgmt_pending_remove(cmd
);
5887 hci_dev_unlock(hdev
);
5890 static bool discovery_type_is_valid(struct hci_dev
*hdev
, uint8_t type
,
5891 uint8_t *mgmt_status
)
5894 case DISCOV_TYPE_LE
:
5895 *mgmt_status
= mgmt_le_support(hdev
);
5899 case DISCOV_TYPE_INTERLEAVED
:
5900 *mgmt_status
= mgmt_le_support(hdev
);
5904 case DISCOV_TYPE_BREDR
:
5905 *mgmt_status
= mgmt_bredr_support(hdev
);
5910 *mgmt_status
= MGMT_STATUS_INVALID_PARAMS
;
5917 static void start_discovery_complete(struct hci_dev
*hdev
, void *data
, int err
)
5919 struct mgmt_pending_cmd
*cmd
= data
;
5921 bt_dev_dbg(hdev
, "err %d", err
);
5923 if (err
== -ECANCELED
)
5926 if (cmd
!= pending_find(MGMT_OP_START_DISCOVERY
, hdev
) &&
5927 cmd
!= pending_find(MGMT_OP_START_LIMITED_DISCOVERY
, hdev
) &&
5928 cmd
!= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
))
5931 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(err
),
5933 mgmt_pending_remove(cmd
);
5935 hci_discovery_set_state(hdev
, err
? DISCOVERY_STOPPED
:
5939 static int start_discovery_sync(struct hci_dev
*hdev
, void *data
)
5941 return hci_start_discovery_sync(hdev
);
5944 static int start_discovery_internal(struct sock
*sk
, struct hci_dev
*hdev
,
5945 u16 op
, void *data
, u16 len
)
5947 struct mgmt_cp_start_discovery
*cp
= data
;
5948 struct mgmt_pending_cmd
*cmd
;
5952 bt_dev_dbg(hdev
, "sock %p", sk
);
5956 if (!hdev_is_powered(hdev
)) {
5957 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
,
5958 MGMT_STATUS_NOT_POWERED
,
5959 &cp
->type
, sizeof(cp
->type
));
5963 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
5964 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
5965 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, MGMT_STATUS_BUSY
,
5966 &cp
->type
, sizeof(cp
->type
));
5970 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
5971 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, status
,
5972 &cp
->type
, sizeof(cp
->type
));
5976 /* Can't start discovery when it is paused */
5977 if (hdev
->discovery_paused
) {
5978 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, MGMT_STATUS_BUSY
,
5979 &cp
->type
, sizeof(cp
->type
));
5983 /* Clear the discovery filter first to free any previously
5984 * allocated memory for the UUID list.
5986 hci_discovery_filter_clear(hdev
);
5988 hdev
->discovery
.type
= cp
->type
;
5989 hdev
->discovery
.report_invalid_rssi
= false;
5990 if (op
== MGMT_OP_START_LIMITED_DISCOVERY
)
5991 hdev
->discovery
.limited
= true;
5993 hdev
->discovery
.limited
= false;
5995 cmd
= mgmt_pending_add(sk
, op
, hdev
, data
, len
);
6001 err
= hci_cmd_sync_queue(hdev
, start_discovery_sync
, cmd
,
6002 start_discovery_complete
);
6004 mgmt_pending_remove(cmd
);
6008 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
6011 hci_dev_unlock(hdev
);
6015 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
6016 void *data
, u16 len
)
6018 return start_discovery_internal(sk
, hdev
, MGMT_OP_START_DISCOVERY
,
6022 static int start_limited_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
6023 void *data
, u16 len
)
6025 return start_discovery_internal(sk
, hdev
,
6026 MGMT_OP_START_LIMITED_DISCOVERY
,
6030 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
6031 void *data
, u16 len
)
6033 struct mgmt_cp_start_service_discovery
*cp
= data
;
6034 struct mgmt_pending_cmd
*cmd
;
6035 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
6036 u16 uuid_count
, expected_len
;
6040 bt_dev_dbg(hdev
, "sock %p", sk
);
6044 if (!hdev_is_powered(hdev
)) {
6045 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6046 MGMT_OP_START_SERVICE_DISCOVERY
,
6047 MGMT_STATUS_NOT_POWERED
,
6048 &cp
->type
, sizeof(cp
->type
));
6052 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
6053 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
6054 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6055 MGMT_OP_START_SERVICE_DISCOVERY
,
6056 MGMT_STATUS_BUSY
, &cp
->type
,
6061 if (hdev
->discovery_paused
) {
6062 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6063 MGMT_OP_START_SERVICE_DISCOVERY
,
6064 MGMT_STATUS_BUSY
, &cp
->type
,
6069 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
6070 if (uuid_count
> max_uuid_count
) {
6071 bt_dev_err(hdev
, "service_discovery: too big uuid_count value %u",
6073 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6074 MGMT_OP_START_SERVICE_DISCOVERY
,
6075 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
6080 expected_len
= sizeof(*cp
) + uuid_count
* 16;
6081 if (expected_len
!= len
) {
6082 bt_dev_err(hdev
, "service_discovery: expected %u bytes, got %u bytes",
6084 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6085 MGMT_OP_START_SERVICE_DISCOVERY
,
6086 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
6091 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
6092 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6093 MGMT_OP_START_SERVICE_DISCOVERY
,
6094 status
, &cp
->type
, sizeof(cp
->type
));
6098 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
6105 /* Clear the discovery filter first to free any previously
6106 * allocated memory for the UUID list.
6108 hci_discovery_filter_clear(hdev
);
6110 hdev
->discovery
.result_filtering
= true;
6111 hdev
->discovery
.type
= cp
->type
;
6112 hdev
->discovery
.rssi
= cp
->rssi
;
6113 hdev
->discovery
.uuid_count
= uuid_count
;
6115 if (uuid_count
> 0) {
6116 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
6118 if (!hdev
->discovery
.uuids
) {
6119 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6120 MGMT_OP_START_SERVICE_DISCOVERY
,
6122 &cp
->type
, sizeof(cp
->type
));
6123 mgmt_pending_remove(cmd
);
6128 err
= hci_cmd_sync_queue(hdev
, start_discovery_sync
, cmd
,
6129 start_discovery_complete
);
6131 mgmt_pending_remove(cmd
);
6135 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
6138 hci_dev_unlock(hdev
);
6142 void mgmt_stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
6144 struct mgmt_pending_cmd
*cmd
;
6146 bt_dev_dbg(hdev
, "status %u", status
);
6150 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
6152 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6153 mgmt_pending_remove(cmd
);
6156 hci_dev_unlock(hdev
);
6159 static void stop_discovery_complete(struct hci_dev
*hdev
, void *data
, int err
)
6161 struct mgmt_pending_cmd
*cmd
= data
;
6163 if (err
== -ECANCELED
||
6164 cmd
!= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
))
6167 bt_dev_dbg(hdev
, "err %d", err
);
6169 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(err
),
6171 mgmt_pending_remove(cmd
);
6174 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
6177 static int stop_discovery_sync(struct hci_dev
*hdev
, void *data
)
6179 return hci_stop_discovery_sync(hdev
);
6182 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6185 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
6186 struct mgmt_pending_cmd
*cmd
;
6189 bt_dev_dbg(hdev
, "sock %p", sk
);
6193 if (!hci_discovery_active(hdev
)) {
6194 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
6195 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
6196 sizeof(mgmt_cp
->type
));
6200 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
6201 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
6202 MGMT_STATUS_INVALID_PARAMS
,
6203 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
6207 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
6213 err
= hci_cmd_sync_queue(hdev
, stop_discovery_sync
, cmd
,
6214 stop_discovery_complete
);
6216 mgmt_pending_remove(cmd
);
6220 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
6223 hci_dev_unlock(hdev
);
6227 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6230 struct mgmt_cp_confirm_name
*cp
= data
;
6231 struct inquiry_entry
*e
;
6234 bt_dev_dbg(hdev
, "sock %p", sk
);
6238 if (!hci_discovery_active(hdev
)) {
6239 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
6240 MGMT_STATUS_FAILED
, &cp
->addr
,
6245 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
6247 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
6248 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
6253 if (cp
->name_known
) {
6254 e
->name_state
= NAME_KNOWN
;
6257 e
->name_state
= NAME_NEEDED
;
6258 hci_inquiry_cache_update_resolve(hdev
, e
);
6261 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
6262 &cp
->addr
, sizeof(cp
->addr
));
6265 hci_dev_unlock(hdev
);
6269 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6272 struct mgmt_cp_block_device
*cp
= data
;
6276 bt_dev_dbg(hdev
, "sock %p", sk
);
6278 if (!bdaddr_type_is_valid(cp
->addr
.type
))
6279 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
6280 MGMT_STATUS_INVALID_PARAMS
,
6281 &cp
->addr
, sizeof(cp
->addr
));
6285 err
= hci_bdaddr_list_add(&hdev
->reject_list
, &cp
->addr
.bdaddr
,
6288 status
= MGMT_STATUS_FAILED
;
6292 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
6294 status
= MGMT_STATUS_SUCCESS
;
6297 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
6298 &cp
->addr
, sizeof(cp
->addr
));
6300 hci_dev_unlock(hdev
);
6305 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6308 struct mgmt_cp_unblock_device
*cp
= data
;
6312 bt_dev_dbg(hdev
, "sock %p", sk
);
6314 if (!bdaddr_type_is_valid(cp
->addr
.type
))
6315 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
6316 MGMT_STATUS_INVALID_PARAMS
,
6317 &cp
->addr
, sizeof(cp
->addr
));
6321 err
= hci_bdaddr_list_del(&hdev
->reject_list
, &cp
->addr
.bdaddr
,
6324 status
= MGMT_STATUS_INVALID_PARAMS
;
6328 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
6330 status
= MGMT_STATUS_SUCCESS
;
6333 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
6334 &cp
->addr
, sizeof(cp
->addr
));
6336 hci_dev_unlock(hdev
);
6341 static int set_device_id_sync(struct hci_dev
*hdev
, void *data
)
6343 return hci_update_eir_sync(hdev
);
6346 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6349 struct mgmt_cp_set_device_id
*cp
= data
;
6353 bt_dev_dbg(hdev
, "sock %p", sk
);
6355 source
= __le16_to_cpu(cp
->source
);
6357 if (source
> 0x0002)
6358 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
6359 MGMT_STATUS_INVALID_PARAMS
);
6363 hdev
->devid_source
= source
;
6364 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
6365 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
6366 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
6368 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
6371 hci_cmd_sync_queue(hdev
, set_device_id_sync
, NULL
, NULL
);
6373 hci_dev_unlock(hdev
);
6378 static void enable_advertising_instance(struct hci_dev
*hdev
, int err
)
6381 bt_dev_err(hdev
, "failed to re-configure advertising %d", err
);
6383 bt_dev_dbg(hdev
, "status %d", err
);
6386 static void set_advertising_complete(struct hci_dev
*hdev
, void *data
, int err
)
6388 struct cmd_lookup match
= { NULL
, hdev
};
6390 struct adv_info
*adv_instance
;
6391 u8 status
= mgmt_status(err
);
6394 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
6395 cmd_status_rsp
, &status
);
6399 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
6400 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
6402 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
6404 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
6407 new_settings(hdev
, match
.sk
);
6412 /* If "Set Advertising" was just disabled and instance advertising was
6413 * set up earlier, then re-enable multi-instance advertising.
6415 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
6416 list_empty(&hdev
->adv_instances
))
6419 instance
= hdev
->cur_adv_instance
;
6421 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
6422 struct adv_info
, list
);
6426 instance
= adv_instance
->instance
;
6429 err
= hci_schedule_adv_instance_sync(hdev
, instance
, true);
6431 enable_advertising_instance(hdev
, err
);
6434 static int set_adv_sync(struct hci_dev
*hdev
, void *data
)
6436 struct mgmt_pending_cmd
*cmd
= data
;
6437 struct mgmt_mode
*cp
= cmd
->param
;
6440 if (cp
->val
== 0x02)
6441 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
6443 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
6445 cancel_adv_timeout(hdev
);
6448 /* Switch to instance "0" for the Set Advertising setting.
6449 * We cannot use update_[adv|scan_rsp]_data() here as the
6450 * HCI_ADVERTISING flag is not yet set.
6452 hdev
->cur_adv_instance
= 0x00;
6454 if (ext_adv_capable(hdev
)) {
6455 hci_start_ext_adv_sync(hdev
, 0x00);
6457 hci_update_adv_data_sync(hdev
, 0x00);
6458 hci_update_scan_rsp_data_sync(hdev
, 0x00);
6459 hci_enable_advertising_sync(hdev
);
6462 hci_disable_advertising_sync(hdev
);
6468 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6471 struct mgmt_mode
*cp
= data
;
6472 struct mgmt_pending_cmd
*cmd
;
6476 bt_dev_dbg(hdev
, "sock %p", sk
);
6478 status
= mgmt_le_support(hdev
);
6480 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
6483 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
6484 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
6485 MGMT_STATUS_INVALID_PARAMS
);
6487 if (hdev
->advertising_paused
)
6488 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
6495 /* The following conditions are ones which mean that we should
6496 * not do any HCI communication but directly send a mgmt
6497 * response to user space (after toggling the flag if
6500 if (!hdev_is_powered(hdev
) ||
6501 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
6502 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
6503 hci_dev_test_flag(hdev
, HCI_MESH
) ||
6504 hci_conn_num(hdev
, LE_LINK
) > 0 ||
6505 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
6506 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
6510 hdev
->cur_adv_instance
= 0x00;
6511 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
6512 if (cp
->val
== 0x02)
6513 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
6515 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
6517 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
6518 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
6521 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
6526 err
= new_settings(hdev
, sk
);
6531 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
6532 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6533 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
6538 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
6542 err
= hci_cmd_sync_queue(hdev
, set_adv_sync
, cmd
,
6543 set_advertising_complete
);
6546 mgmt_pending_remove(cmd
);
6549 hci_dev_unlock(hdev
);
6553 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
6554 void *data
, u16 len
)
6556 struct mgmt_cp_set_static_address
*cp
= data
;
6559 bt_dev_dbg(hdev
, "sock %p", sk
);
6561 if (!lmp_le_capable(hdev
))
6562 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
6563 MGMT_STATUS_NOT_SUPPORTED
);
6565 if (hdev_is_powered(hdev
))
6566 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
6567 MGMT_STATUS_REJECTED
);
6569 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
6570 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
6571 return mgmt_cmd_status(sk
, hdev
->id
,
6572 MGMT_OP_SET_STATIC_ADDRESS
,
6573 MGMT_STATUS_INVALID_PARAMS
);
6575 /* Two most significant bits shall be set */
6576 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6577 return mgmt_cmd_status(sk
, hdev
->id
,
6578 MGMT_OP_SET_STATIC_ADDRESS
,
6579 MGMT_STATUS_INVALID_PARAMS
);
6584 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
6586 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
6590 err
= new_settings(hdev
, sk
);
6593 hci_dev_unlock(hdev
);
6597 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
6598 void *data
, u16 len
)
6600 struct mgmt_cp_set_scan_params
*cp
= data
;
6601 __u16 interval
, window
;
6604 bt_dev_dbg(hdev
, "sock %p", sk
);
6606 if (!lmp_le_capable(hdev
))
6607 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
6608 MGMT_STATUS_NOT_SUPPORTED
);
6610 interval
= __le16_to_cpu(cp
->interval
);
6612 if (interval
< 0x0004 || interval
> 0x4000)
6613 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
6614 MGMT_STATUS_INVALID_PARAMS
);
6616 window
= __le16_to_cpu(cp
->window
);
6618 if (window
< 0x0004 || window
> 0x4000)
6619 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
6620 MGMT_STATUS_INVALID_PARAMS
);
6622 if (window
> interval
)
6623 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
6624 MGMT_STATUS_INVALID_PARAMS
);
6628 hdev
->le_scan_interval
= interval
;
6629 hdev
->le_scan_window
= window
;
6631 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
6634 /* If background scan is running, restart it so new parameters are
6637 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
6638 hdev
->discovery
.state
== DISCOVERY_STOPPED
)
6639 hci_update_passive_scan(hdev
);
6641 hci_dev_unlock(hdev
);
6646 static void fast_connectable_complete(struct hci_dev
*hdev
, void *data
, int err
)
6648 struct mgmt_pending_cmd
*cmd
= data
;
6650 bt_dev_dbg(hdev
, "err %d", err
);
6653 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
6656 struct mgmt_mode
*cp
= cmd
->param
;
6659 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
6661 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
6663 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
6664 new_settings(hdev
, cmd
->sk
);
6667 mgmt_pending_free(cmd
);
6670 static int write_fast_connectable_sync(struct hci_dev
*hdev
, void *data
)
6672 struct mgmt_pending_cmd
*cmd
= data
;
6673 struct mgmt_mode
*cp
= cmd
->param
;
6675 return hci_write_fast_connectable_sync(hdev
, cp
->val
);
6678 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
6679 void *data
, u16 len
)
6681 struct mgmt_mode
*cp
= data
;
6682 struct mgmt_pending_cmd
*cmd
;
6685 bt_dev_dbg(hdev
, "sock %p", sk
);
6687 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
6688 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
6689 return mgmt_cmd_status(sk
, hdev
->id
,
6690 MGMT_OP_SET_FAST_CONNECTABLE
,
6691 MGMT_STATUS_NOT_SUPPORTED
);
6693 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
6694 return mgmt_cmd_status(sk
, hdev
->id
,
6695 MGMT_OP_SET_FAST_CONNECTABLE
,
6696 MGMT_STATUS_INVALID_PARAMS
);
6700 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
6701 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
6705 if (!hdev_is_powered(hdev
)) {
6706 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
6707 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
6708 new_settings(hdev
, sk
);
6712 cmd
= mgmt_pending_new(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
, data
,
6717 err
= hci_cmd_sync_queue(hdev
, write_fast_connectable_sync
, cmd
,
6718 fast_connectable_complete
);
6721 mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
6722 MGMT_STATUS_FAILED
);
6725 mgmt_pending_free(cmd
);
6729 hci_dev_unlock(hdev
);
6734 static void set_bredr_complete(struct hci_dev
*hdev
, void *data
, int err
)
6736 struct mgmt_pending_cmd
*cmd
= data
;
6738 bt_dev_dbg(hdev
, "err %d", err
);
6741 u8 mgmt_err
= mgmt_status(err
);
6743 /* We need to restore the flag if related HCI commands
6746 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
6748 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
6750 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
6751 new_settings(hdev
, cmd
->sk
);
6754 mgmt_pending_free(cmd
);
6757 static int set_bredr_sync(struct hci_dev
*hdev
, void *data
)
6761 status
= hci_write_fast_connectable_sync(hdev
, false);
6764 status
= hci_update_scan_sync(hdev
);
6766 /* Since only the advertising data flags will change, there
6767 * is no need to update the scan response data.
6770 status
= hci_update_adv_data_sync(hdev
, hdev
->cur_adv_instance
);
6775 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
6777 struct mgmt_mode
*cp
= data
;
6778 struct mgmt_pending_cmd
*cmd
;
6781 bt_dev_dbg(hdev
, "sock %p", sk
);
6783 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
6784 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
6785 MGMT_STATUS_NOT_SUPPORTED
);
6787 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
6788 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
6789 MGMT_STATUS_REJECTED
);
6791 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
6792 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
6793 MGMT_STATUS_INVALID_PARAMS
);
6797 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
6798 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
6802 if (!hdev_is_powered(hdev
)) {
6804 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
6805 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
6806 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
6807 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
6810 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
6812 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
6816 err
= new_settings(hdev
, sk
);
6820 /* Reject disabling when powered on */
6822 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
6823 MGMT_STATUS_REJECTED
);
6826 /* When configuring a dual-mode controller to operate
6827 * with LE only and using a static address, then switching
6828 * BR/EDR back on is not allowed.
6830 * Dual-mode controllers shall operate with the public
6831 * address as its identity address for BR/EDR and LE. So
6832 * reject the attempt to create an invalid configuration.
6834 * The same restrictions applies when secure connections
6835 * has been enabled. For BR/EDR this is a controller feature
6836 * while for LE it is a host stack feature. This means that
6837 * switching BR/EDR back on when secure connections has been
6838 * enabled is not a supported transaction.
6840 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
6841 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
6842 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
6843 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
6844 MGMT_STATUS_REJECTED
);
6849 cmd
= mgmt_pending_new(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
6853 err
= hci_cmd_sync_queue(hdev
, set_bredr_sync
, cmd
,
6854 set_bredr_complete
);
6857 mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
6858 MGMT_STATUS_FAILED
);
6860 mgmt_pending_free(cmd
);
6865 /* We need to flip the bit already here so that
6866 * hci_req_update_adv_data generates the correct flags.
6868 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
6871 hci_dev_unlock(hdev
);
6875 static void set_secure_conn_complete(struct hci_dev
*hdev
, void *data
, int err
)
6877 struct mgmt_pending_cmd
*cmd
= data
;
6878 struct mgmt_mode
*cp
;
6880 bt_dev_dbg(hdev
, "err %d", err
);
6883 u8 mgmt_err
= mgmt_status(err
);
6885 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
6893 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
6894 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
6897 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
6898 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
6901 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
6902 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
6906 send_settings_rsp(cmd
->sk
, cmd
->opcode
, hdev
);
6907 new_settings(hdev
, cmd
->sk
);
6910 mgmt_pending_free(cmd
);
6913 static int set_secure_conn_sync(struct hci_dev
*hdev
, void *data
)
6915 struct mgmt_pending_cmd
*cmd
= data
;
6916 struct mgmt_mode
*cp
= cmd
->param
;
6919 /* Force write of val */
6920 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
6922 return hci_write_sc_support_sync(hdev
, val
);
6925 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
6926 void *data
, u16 len
)
6928 struct mgmt_mode
*cp
= data
;
6929 struct mgmt_pending_cmd
*cmd
;
6933 bt_dev_dbg(hdev
, "sock %p", sk
);
6935 if (!lmp_sc_capable(hdev
) &&
6936 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
6937 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
6938 MGMT_STATUS_NOT_SUPPORTED
);
6940 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
6941 lmp_sc_capable(hdev
) &&
6942 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
6943 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
6944 MGMT_STATUS_REJECTED
);
6946 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
6947 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
6948 MGMT_STATUS_INVALID_PARAMS
);
6952 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
6953 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
6957 changed
= !hci_dev_test_and_set_flag(hdev
,
6959 if (cp
->val
== 0x02)
6960 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
6962 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
6964 changed
= hci_dev_test_and_clear_flag(hdev
,
6966 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
6969 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
6974 err
= new_settings(hdev
, sk
);
6981 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
6982 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
6983 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
6987 cmd
= mgmt_pending_new(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
6991 err
= hci_cmd_sync_queue(hdev
, set_secure_conn_sync
, cmd
,
6992 set_secure_conn_complete
);
6995 mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
6996 MGMT_STATUS_FAILED
);
6998 mgmt_pending_free(cmd
);
7002 hci_dev_unlock(hdev
);
7006 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
7007 void *data
, u16 len
)
7009 struct mgmt_mode
*cp
= data
;
7010 bool changed
, use_changed
;
7013 bt_dev_dbg(hdev
, "sock %p", sk
);
7015 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
7016 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
7017 MGMT_STATUS_INVALID_PARAMS
);
7022 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
7024 changed
= hci_dev_test_and_clear_flag(hdev
,
7025 HCI_KEEP_DEBUG_KEYS
);
7027 if (cp
->val
== 0x02)
7028 use_changed
= !hci_dev_test_and_set_flag(hdev
,
7029 HCI_USE_DEBUG_KEYS
);
7031 use_changed
= hci_dev_test_and_clear_flag(hdev
,
7032 HCI_USE_DEBUG_KEYS
);
7034 if (hdev_is_powered(hdev
) && use_changed
&&
7035 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
7036 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
7037 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
7038 sizeof(mode
), &mode
);
7041 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
7046 err
= new_settings(hdev
, sk
);
7049 hci_dev_unlock(hdev
);
7053 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
7056 struct mgmt_cp_set_privacy
*cp
= cp_data
;
7060 bt_dev_dbg(hdev
, "sock %p", sk
);
7062 if (!lmp_le_capable(hdev
))
7063 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
7064 MGMT_STATUS_NOT_SUPPORTED
);
7066 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01 && cp
->privacy
!= 0x02)
7067 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
7068 MGMT_STATUS_INVALID_PARAMS
);
7070 if (hdev_is_powered(hdev
))
7071 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
7072 MGMT_STATUS_REJECTED
);
7076 /* If user space supports this command it is also expected to
7077 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7079 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
7082 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
7083 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
7084 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
7085 hci_adv_instances_set_rpa_expired(hdev
, true);
7086 if (cp
->privacy
== 0x02)
7087 hci_dev_set_flag(hdev
, HCI_LIMITED_PRIVACY
);
7089 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
7091 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
7092 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
7093 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
7094 hci_adv_instances_set_rpa_expired(hdev
, false);
7095 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
7098 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
7103 err
= new_settings(hdev
, sk
);
7106 hci_dev_unlock(hdev
);
7110 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
7112 switch (irk
->addr
.type
) {
7113 case BDADDR_LE_PUBLIC
:
7116 case BDADDR_LE_RANDOM
:
7117 /* Two most significant bits shall be set */
7118 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
7126 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
7129 struct mgmt_cp_load_irks
*cp
= cp_data
;
7130 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
7131 sizeof(struct mgmt_irk_info
));
7132 u16 irk_count
, expected_len
;
7135 bt_dev_dbg(hdev
, "sock %p", sk
);
7137 if (!lmp_le_capable(hdev
))
7138 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
7139 MGMT_STATUS_NOT_SUPPORTED
);
7141 irk_count
= __le16_to_cpu(cp
->irk_count
);
7142 if (irk_count
> max_irk_count
) {
7143 bt_dev_err(hdev
, "load_irks: too big irk_count value %u",
7145 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
7146 MGMT_STATUS_INVALID_PARAMS
);
7149 expected_len
= struct_size(cp
, irks
, irk_count
);
7150 if (expected_len
!= len
) {
7151 bt_dev_err(hdev
, "load_irks: expected %u bytes, got %u bytes",
7153 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
7154 MGMT_STATUS_INVALID_PARAMS
);
7157 bt_dev_dbg(hdev
, "irk_count %u", irk_count
);
7159 for (i
= 0; i
< irk_count
; i
++) {
7160 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
7162 if (!irk_is_valid(key
))
7163 return mgmt_cmd_status(sk
, hdev
->id
,
7165 MGMT_STATUS_INVALID_PARAMS
);
7170 hci_smp_irks_clear(hdev
);
7172 for (i
= 0; i
< irk_count
; i
++) {
7173 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
7175 if (hci_is_blocked_key(hdev
,
7176 HCI_BLOCKED_KEY_TYPE_IRK
,
7178 bt_dev_warn(hdev
, "Skipping blocked IRK for %pMR",
7183 hci_add_irk(hdev
, &irk
->addr
.bdaddr
,
7184 le_addr_type(irk
->addr
.type
), irk
->val
,
7188 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
7190 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
7192 hci_dev_unlock(hdev
);
7197 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
7199 if (key
->initiator
!= 0x00 && key
->initiator
!= 0x01)
7202 switch (key
->addr
.type
) {
7203 case BDADDR_LE_PUBLIC
:
7206 case BDADDR_LE_RANDOM
:
7207 /* Two most significant bits shall be set */
7208 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
7216 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
7217 void *cp_data
, u16 len
)
7219 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
7220 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
7221 sizeof(struct mgmt_ltk_info
));
7222 u16 key_count
, expected_len
;
7225 bt_dev_dbg(hdev
, "sock %p", sk
);
7227 if (!lmp_le_capable(hdev
))
7228 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
7229 MGMT_STATUS_NOT_SUPPORTED
);
7231 key_count
= __le16_to_cpu(cp
->key_count
);
7232 if (key_count
> max_key_count
) {
7233 bt_dev_err(hdev
, "load_ltks: too big key_count value %u",
7235 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
7236 MGMT_STATUS_INVALID_PARAMS
);
7239 expected_len
= struct_size(cp
, keys
, key_count
);
7240 if (expected_len
!= len
) {
7241 bt_dev_err(hdev
, "load_keys: expected %u bytes, got %u bytes",
7243 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
7244 MGMT_STATUS_INVALID_PARAMS
);
7247 bt_dev_dbg(hdev
, "key_count %u", key_count
);
7251 hci_smp_ltks_clear(hdev
);
7253 for (i
= 0; i
< key_count
; i
++) {
7254 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
7255 u8 type
, authenticated
;
7257 if (hci_is_blocked_key(hdev
,
7258 HCI_BLOCKED_KEY_TYPE_LTK
,
7260 bt_dev_warn(hdev
, "Skipping blocked LTK for %pMR",
7265 if (!ltk_is_valid(key
)) {
7266 bt_dev_warn(hdev
, "Invalid LTK for %pMR",
7271 switch (key
->type
) {
7272 case MGMT_LTK_UNAUTHENTICATED
:
7273 authenticated
= 0x00;
7274 type
= key
->initiator
? SMP_LTK
: SMP_LTK_RESPONDER
;
7276 case MGMT_LTK_AUTHENTICATED
:
7277 authenticated
= 0x01;
7278 type
= key
->initiator
? SMP_LTK
: SMP_LTK_RESPONDER
;
7280 case MGMT_LTK_P256_UNAUTH
:
7281 authenticated
= 0x00;
7282 type
= SMP_LTK_P256
;
7284 case MGMT_LTK_P256_AUTH
:
7285 authenticated
= 0x01;
7286 type
= SMP_LTK_P256
;
7288 case MGMT_LTK_P256_DEBUG
:
7289 authenticated
= 0x00;
7290 type
= SMP_LTK_P256_DEBUG
;
7296 hci_add_ltk(hdev
, &key
->addr
.bdaddr
,
7297 le_addr_type(key
->addr
.type
), type
, authenticated
,
7298 key
->val
, key
->enc_size
, key
->ediv
, key
->rand
);
7301 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
7304 hci_dev_unlock(hdev
);
7309 static void get_conn_info_complete(struct hci_dev
*hdev
, void *data
, int err
)
7311 struct mgmt_pending_cmd
*cmd
= data
;
7312 struct hci_conn
*conn
= cmd
->user_data
;
7313 struct mgmt_cp_get_conn_info
*cp
= cmd
->param
;
7314 struct mgmt_rp_get_conn_info rp
;
7317 bt_dev_dbg(hdev
, "err %d", err
);
7319 memcpy(&rp
.addr
, &cp
->addr
, sizeof(rp
.addr
));
7321 status
= mgmt_status(err
);
7322 if (status
== MGMT_STATUS_SUCCESS
) {
7323 rp
.rssi
= conn
->rssi
;
7324 rp
.tx_power
= conn
->tx_power
;
7325 rp
.max_tx_power
= conn
->max_tx_power
;
7327 rp
.rssi
= HCI_RSSI_INVALID
;
7328 rp
.tx_power
= HCI_TX_POWER_INVALID
;
7329 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
7332 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
, status
,
7335 mgmt_pending_free(cmd
);
7338 static int get_conn_info_sync(struct hci_dev
*hdev
, void *data
)
7340 struct mgmt_pending_cmd
*cmd
= data
;
7341 struct mgmt_cp_get_conn_info
*cp
= cmd
->param
;
7342 struct hci_conn
*conn
;
7346 /* Make sure we are still connected */
7347 if (cp
->addr
.type
== BDADDR_BREDR
)
7348 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
7351 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
7353 if (!conn
|| conn
->state
!= BT_CONNECTED
)
7354 return MGMT_STATUS_NOT_CONNECTED
;
7356 cmd
->user_data
= conn
;
7357 handle
= cpu_to_le16(conn
->handle
);
7359 /* Refresh RSSI each time */
7360 err
= hci_read_rssi_sync(hdev
, handle
);
7362 /* For LE links TX power does not change thus we don't need to
7363 * query for it once value is known.
7365 if (!err
&& (!bdaddr_type_is_le(cp
->addr
.type
) ||
7366 conn
->tx_power
== HCI_TX_POWER_INVALID
))
7367 err
= hci_read_tx_power_sync(hdev
, handle
, 0x00);
7369 /* Max TX power needs to be read only once per connection */
7370 if (!err
&& conn
->max_tx_power
== HCI_TX_POWER_INVALID
)
7371 err
= hci_read_tx_power_sync(hdev
, handle
, 0x01);
7376 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
7379 struct mgmt_cp_get_conn_info
*cp
= data
;
7380 struct mgmt_rp_get_conn_info rp
;
7381 struct hci_conn
*conn
;
7382 unsigned long conn_info_age
;
7385 bt_dev_dbg(hdev
, "sock %p", sk
);
7387 memset(&rp
, 0, sizeof(rp
));
7388 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
7389 rp
.addr
.type
= cp
->addr
.type
;
7391 if (!bdaddr_type_is_valid(cp
->addr
.type
))
7392 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
7393 MGMT_STATUS_INVALID_PARAMS
,
7398 if (!hdev_is_powered(hdev
)) {
7399 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
7400 MGMT_STATUS_NOT_POWERED
, &rp
,
7405 if (cp
->addr
.type
== BDADDR_BREDR
)
7406 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
7409 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
7411 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
7412 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
7413 MGMT_STATUS_NOT_CONNECTED
, &rp
,
7418 /* To avoid client trying to guess when to poll again for information we
7419 * calculate conn info age as random value between min/max set in hdev.
7421 conn_info_age
= get_random_u32_inclusive(hdev
->conn_info_min_age
,
7422 hdev
->conn_info_max_age
- 1);
7424 /* Query controller to refresh cached values if they are too old or were
7427 if (time_after(jiffies
, conn
->conn_info_timestamp
+
7428 msecs_to_jiffies(conn_info_age
)) ||
7429 !conn
->conn_info_timestamp
) {
7430 struct mgmt_pending_cmd
*cmd
;
7432 cmd
= mgmt_pending_new(sk
, MGMT_OP_GET_CONN_INFO
, hdev
, data
,
7437 err
= hci_cmd_sync_queue(hdev
, get_conn_info_sync
,
7438 cmd
, get_conn_info_complete
);
7442 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
7443 MGMT_STATUS_FAILED
, &rp
, sizeof(rp
));
7446 mgmt_pending_free(cmd
);
7451 conn
->conn_info_timestamp
= jiffies
;
7453 /* Cache is valid, just reply with values cached in hci_conn */
7454 rp
.rssi
= conn
->rssi
;
7455 rp
.tx_power
= conn
->tx_power
;
7456 rp
.max_tx_power
= conn
->max_tx_power
;
7458 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
7459 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7463 hci_dev_unlock(hdev
);
7467 static void get_clock_info_complete(struct hci_dev
*hdev
, void *data
, int err
)
7469 struct mgmt_pending_cmd
*cmd
= data
;
7470 struct mgmt_cp_get_clock_info
*cp
= cmd
->param
;
7471 struct mgmt_rp_get_clock_info rp
;
7472 struct hci_conn
*conn
= cmd
->user_data
;
7473 u8 status
= mgmt_status(err
);
7475 bt_dev_dbg(hdev
, "err %d", err
);
7477 memset(&rp
, 0, sizeof(rp
));
7478 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
7479 rp
.addr
.type
= cp
->addr
.type
;
7484 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
7487 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
7488 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
7492 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
7495 mgmt_pending_free(cmd
);
7498 static int get_clock_info_sync(struct hci_dev
*hdev
, void *data
)
7500 struct mgmt_pending_cmd
*cmd
= data
;
7501 struct mgmt_cp_get_clock_info
*cp
= cmd
->param
;
7502 struct hci_cp_read_clock hci_cp
;
7503 struct hci_conn
*conn
;
7505 memset(&hci_cp
, 0, sizeof(hci_cp
));
7506 hci_read_clock_sync(hdev
, &hci_cp
);
7508 /* Make sure connection still exists */
7509 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
7510 if (!conn
|| conn
->state
!= BT_CONNECTED
)
7511 return MGMT_STATUS_NOT_CONNECTED
;
7513 cmd
->user_data
= conn
;
7514 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
7515 hci_cp
.which
= 0x01; /* Piconet clock */
7517 return hci_read_clock_sync(hdev
, &hci_cp
);
7520 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
7523 struct mgmt_cp_get_clock_info
*cp
= data
;
7524 struct mgmt_rp_get_clock_info rp
;
7525 struct mgmt_pending_cmd
*cmd
;
7526 struct hci_conn
*conn
;
7529 bt_dev_dbg(hdev
, "sock %p", sk
);
7531 memset(&rp
, 0, sizeof(rp
));
7532 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
7533 rp
.addr
.type
= cp
->addr
.type
;
7535 if (cp
->addr
.type
!= BDADDR_BREDR
)
7536 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
7537 MGMT_STATUS_INVALID_PARAMS
,
7542 if (!hdev_is_powered(hdev
)) {
7543 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
7544 MGMT_STATUS_NOT_POWERED
, &rp
,
7549 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
7550 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
7552 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
7553 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7554 MGMT_OP_GET_CLOCK_INFO
,
7555 MGMT_STATUS_NOT_CONNECTED
,
7563 cmd
= mgmt_pending_new(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
7567 err
= hci_cmd_sync_queue(hdev
, get_clock_info_sync
, cmd
,
7568 get_clock_info_complete
);
7571 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
7572 MGMT_STATUS_FAILED
, &rp
, sizeof(rp
));
7575 mgmt_pending_free(cmd
);
7580 hci_dev_unlock(hdev
);
7584 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
7586 struct hci_conn
*conn
;
7588 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
7592 if (conn
->dst_type
!= type
)
7595 if (conn
->state
!= BT_CONNECTED
)
7601 /* This function requires the caller holds hdev->lock */
7602 static int hci_conn_params_set(struct hci_dev
*hdev
, bdaddr_t
*addr
,
7603 u8 addr_type
, u8 auto_connect
)
7605 struct hci_conn_params
*params
;
7607 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
7611 if (params
->auto_connect
== auto_connect
)
7614 hci_pend_le_list_del_init(params
);
7616 switch (auto_connect
) {
7617 case HCI_AUTO_CONN_DISABLED
:
7618 case HCI_AUTO_CONN_LINK_LOSS
:
7619 /* If auto connect is being disabled when we're trying to
7620 * connect to device, keep connecting.
7622 if (params
->explicit_connect
)
7623 hci_pend_le_list_add(params
, &hdev
->pend_le_conns
);
7625 case HCI_AUTO_CONN_REPORT
:
7626 if (params
->explicit_connect
)
7627 hci_pend_le_list_add(params
, &hdev
->pend_le_conns
);
7629 hci_pend_le_list_add(params
, &hdev
->pend_le_reports
);
7631 case HCI_AUTO_CONN_DIRECT
:
7632 case HCI_AUTO_CONN_ALWAYS
:
7633 if (!is_connected(hdev
, addr
, addr_type
))
7634 hci_pend_le_list_add(params
, &hdev
->pend_le_conns
);
7638 params
->auto_connect
= auto_connect
;
7640 bt_dev_dbg(hdev
, "addr %pMR (type %u) auto_connect %u",
7641 addr
, addr_type
, auto_connect
);
7646 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
7647 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
7649 struct mgmt_ev_device_added ev
;
7651 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7652 ev
.addr
.type
= type
;
7655 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
7658 static int add_device_sync(struct hci_dev
*hdev
, void *data
)
7660 return hci_update_passive_scan_sync(hdev
);
7663 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
7664 void *data
, u16 len
)
7666 struct mgmt_cp_add_device
*cp
= data
;
7667 u8 auto_conn
, addr_type
;
7668 struct hci_conn_params
*params
;
7670 u32 current_flags
= 0;
7671 u32 supported_flags
;
7673 bt_dev_dbg(hdev
, "sock %p", sk
);
7675 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
7676 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
7677 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
7678 MGMT_STATUS_INVALID_PARAMS
,
7679 &cp
->addr
, sizeof(cp
->addr
));
7681 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
7682 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
7683 MGMT_STATUS_INVALID_PARAMS
,
7684 &cp
->addr
, sizeof(cp
->addr
));
7688 if (cp
->addr
.type
== BDADDR_BREDR
) {
7689 /* Only incoming connections action is supported for now */
7690 if (cp
->action
!= 0x01) {
7691 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7693 MGMT_STATUS_INVALID_PARAMS
,
7694 &cp
->addr
, sizeof(cp
->addr
));
7698 err
= hci_bdaddr_list_add_with_flags(&hdev
->accept_list
,
7704 hci_update_scan(hdev
);
7709 addr_type
= le_addr_type(cp
->addr
.type
);
7711 if (cp
->action
== 0x02)
7712 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
7713 else if (cp
->action
== 0x01)
7714 auto_conn
= HCI_AUTO_CONN_DIRECT
;
7716 auto_conn
= HCI_AUTO_CONN_REPORT
;
7718 /* Kernel internally uses conn_params with resolvable private
7719 * address, but Add Device allows only identity addresses.
7720 * Make sure it is enforced before calling
7721 * hci_conn_params_lookup.
7723 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
7724 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
7725 MGMT_STATUS_INVALID_PARAMS
,
7726 &cp
->addr
, sizeof(cp
->addr
));
7730 /* If the connection parameters don't exist for this device,
7731 * they will be created and configured with defaults.
7733 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
7735 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
7736 MGMT_STATUS_FAILED
, &cp
->addr
,
7740 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
7743 current_flags
= params
->flags
;
7746 err
= hci_cmd_sync_queue(hdev
, add_device_sync
, NULL
, NULL
);
7751 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
7752 supported_flags
= hdev
->conn_flags
;
7753 device_flags_changed(NULL
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
,
7754 supported_flags
, current_flags
);
7756 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
7757 MGMT_STATUS_SUCCESS
, &cp
->addr
,
7761 hci_dev_unlock(hdev
);
7765 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
7766 bdaddr_t
*bdaddr
, u8 type
)
7768 struct mgmt_ev_device_removed ev
;
7770 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7771 ev
.addr
.type
= type
;
7773 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
7776 static int remove_device_sync(struct hci_dev
*hdev
, void *data
)
7778 return hci_update_passive_scan_sync(hdev
);
7781 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
7782 void *data
, u16 len
)
7784 struct mgmt_cp_remove_device
*cp
= data
;
7787 bt_dev_dbg(hdev
, "sock %p", sk
);
7791 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
7792 struct hci_conn_params
*params
;
7795 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
7796 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7797 MGMT_OP_REMOVE_DEVICE
,
7798 MGMT_STATUS_INVALID_PARAMS
,
7799 &cp
->addr
, sizeof(cp
->addr
));
7803 if (cp
->addr
.type
== BDADDR_BREDR
) {
7804 err
= hci_bdaddr_list_del(&hdev
->accept_list
,
7808 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7809 MGMT_OP_REMOVE_DEVICE
,
7810 MGMT_STATUS_INVALID_PARAMS
,
7816 hci_update_scan(hdev
);
7818 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
7823 addr_type
= le_addr_type(cp
->addr
.type
);
7825 /* Kernel internally uses conn_params with resolvable private
7826 * address, but Remove Device allows only identity addresses.
7827 * Make sure it is enforced before calling
7828 * hci_conn_params_lookup.
7830 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
7831 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7832 MGMT_OP_REMOVE_DEVICE
,
7833 MGMT_STATUS_INVALID_PARAMS
,
7834 &cp
->addr
, sizeof(cp
->addr
));
7838 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
7841 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7842 MGMT_OP_REMOVE_DEVICE
,
7843 MGMT_STATUS_INVALID_PARAMS
,
7844 &cp
->addr
, sizeof(cp
->addr
));
7848 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
||
7849 params
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
) {
7850 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7851 MGMT_OP_REMOVE_DEVICE
,
7852 MGMT_STATUS_INVALID_PARAMS
,
7853 &cp
->addr
, sizeof(cp
->addr
));
7857 hci_conn_params_free(params
);
7859 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
7861 struct hci_conn_params
*p
, *tmp
;
7862 struct bdaddr_list
*b
, *btmp
;
7864 if (cp
->addr
.type
) {
7865 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7866 MGMT_OP_REMOVE_DEVICE
,
7867 MGMT_STATUS_INVALID_PARAMS
,
7868 &cp
->addr
, sizeof(cp
->addr
));
7872 list_for_each_entry_safe(b
, btmp
, &hdev
->accept_list
, list
) {
7873 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
7878 hci_update_scan(hdev
);
7880 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
7881 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
7883 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
7884 if (p
->explicit_connect
) {
7885 p
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
7888 hci_conn_params_free(p
);
7891 bt_dev_dbg(hdev
, "All LE connection parameters were removed");
7894 hci_cmd_sync_queue(hdev
, remove_device_sync
, NULL
, NULL
);
7897 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
7898 MGMT_STATUS_SUCCESS
, &cp
->addr
,
7901 hci_dev_unlock(hdev
);
7905 static int conn_update_sync(struct hci_dev
*hdev
, void *data
)
7907 struct hci_conn_params
*params
= data
;
7908 struct hci_conn
*conn
;
7910 conn
= hci_conn_hash_lookup_le(hdev
, ¶ms
->addr
, params
->addr_type
);
7914 return hci_le_conn_update_sync(hdev
, conn
, params
);
7917 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
7920 struct mgmt_cp_load_conn_param
*cp
= data
;
7921 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
7922 sizeof(struct mgmt_conn_param
));
7923 u16 param_count
, expected_len
;
7926 if (!lmp_le_capable(hdev
))
7927 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
7928 MGMT_STATUS_NOT_SUPPORTED
);
7930 param_count
= __le16_to_cpu(cp
->param_count
);
7931 if (param_count
> max_param_count
) {
7932 bt_dev_err(hdev
, "load_conn_param: too big param_count value %u",
7934 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
7935 MGMT_STATUS_INVALID_PARAMS
);
7938 expected_len
= struct_size(cp
, params
, param_count
);
7939 if (expected_len
!= len
) {
7940 bt_dev_err(hdev
, "load_conn_param: expected %u bytes, got %u bytes",
7942 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
7943 MGMT_STATUS_INVALID_PARAMS
);
7946 bt_dev_dbg(hdev
, "param_count %u", param_count
);
7950 if (param_count
> 1)
7951 hci_conn_params_clear_disabled(hdev
);
7953 for (i
= 0; i
< param_count
; i
++) {
7954 struct mgmt_conn_param
*param
= &cp
->params
[i
];
7955 struct hci_conn_params
*hci_param
;
7956 u16 min
, max
, latency
, timeout
;
7957 bool update
= false;
7960 bt_dev_dbg(hdev
, "Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
7963 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
7964 addr_type
= ADDR_LE_DEV_PUBLIC
;
7965 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
7966 addr_type
= ADDR_LE_DEV_RANDOM
;
7968 bt_dev_err(hdev
, "ignoring invalid connection parameters");
7972 min
= le16_to_cpu(param
->min_interval
);
7973 max
= le16_to_cpu(param
->max_interval
);
7974 latency
= le16_to_cpu(param
->latency
);
7975 timeout
= le16_to_cpu(param
->timeout
);
7977 bt_dev_dbg(hdev
, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7978 min
, max
, latency
, timeout
);
7980 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
7981 bt_dev_err(hdev
, "ignoring invalid connection parameters");
7985 /* Detect when the loading is for an existing parameter then
7986 * attempt to trigger the connection update procedure.
7988 if (!i
&& param_count
== 1) {
7989 hci_param
= hci_conn_params_lookup(hdev
,
7990 ¶m
->addr
.bdaddr
,
7995 hci_conn_params_clear_disabled(hdev
);
7998 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
8001 bt_dev_err(hdev
, "failed to add connection parameters");
8005 hci_param
->conn_min_interval
= min
;
8006 hci_param
->conn_max_interval
= max
;
8007 hci_param
->conn_latency
= latency
;
8008 hci_param
->supervision_timeout
= timeout
;
8010 /* Check if we need to trigger a connection update */
8012 struct hci_conn
*conn
;
8014 /* Lookup for existing connection as central and check
8015 * if parameters match and if they don't then trigger
8016 * a connection update.
8018 conn
= hci_conn_hash_lookup_le(hdev
, &hci_param
->addr
,
8020 if (conn
&& conn
->role
== HCI_ROLE_MASTER
&&
8021 (conn
->le_conn_min_interval
!= min
||
8022 conn
->le_conn_max_interval
!= max
||
8023 conn
->le_conn_latency
!= latency
||
8024 conn
->le_supv_timeout
!= timeout
))
8025 hci_cmd_sync_queue(hdev
, conn_update_sync
,
8030 hci_dev_unlock(hdev
);
8032 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
8036 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
8037 void *data
, u16 len
)
8039 struct mgmt_cp_set_external_config
*cp
= data
;
8043 bt_dev_dbg(hdev
, "sock %p", sk
);
8045 if (hdev_is_powered(hdev
))
8046 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
8047 MGMT_STATUS_REJECTED
);
8049 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
8050 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
8051 MGMT_STATUS_INVALID_PARAMS
);
8053 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
8054 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
8055 MGMT_STATUS_NOT_SUPPORTED
);
8060 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
8062 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
8064 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
8071 err
= new_options(hdev
, sk
);
8073 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
8074 mgmt_index_removed(hdev
);
8076 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
8077 hci_dev_set_flag(hdev
, HCI_CONFIG
);
8078 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
8080 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
8082 set_bit(HCI_RAW
, &hdev
->flags
);
8083 mgmt_index_added(hdev
);
8088 hci_dev_unlock(hdev
);
8092 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
8093 void *data
, u16 len
)
8095 struct mgmt_cp_set_public_address
*cp
= data
;
8099 bt_dev_dbg(hdev
, "sock %p", sk
);
8101 if (hdev_is_powered(hdev
))
8102 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
8103 MGMT_STATUS_REJECTED
);
8105 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
8106 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
8107 MGMT_STATUS_INVALID_PARAMS
);
8109 if (!hdev
->set_bdaddr
)
8110 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
8111 MGMT_STATUS_NOT_SUPPORTED
);
8115 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
8116 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
8118 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
8125 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
8126 err
= new_options(hdev
, sk
);
8128 if (is_configured(hdev
)) {
8129 mgmt_index_removed(hdev
);
8131 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
8133 hci_dev_set_flag(hdev
, HCI_CONFIG
);
8134 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
8136 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
8140 hci_dev_unlock(hdev
);
8144 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, void *data
,
8147 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
8148 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
8149 u8
*h192
, *r192
, *h256
, *r256
;
8150 struct mgmt_pending_cmd
*cmd
= data
;
8151 struct sk_buff
*skb
= cmd
->skb
;
8152 u8 status
= mgmt_status(err
);
8155 if (err
== -ECANCELED
||
8156 cmd
!= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
))
8161 status
= MGMT_STATUS_FAILED
;
8162 else if (IS_ERR(skb
))
8163 status
= mgmt_status(PTR_ERR(skb
));
8165 status
= mgmt_status(skb
->data
[0]);
8168 bt_dev_dbg(hdev
, "status %u", status
);
8170 mgmt_cp
= cmd
->param
;
8173 status
= mgmt_status(status
);
8180 } else if (!bredr_sc_enabled(hdev
)) {
8181 struct hci_rp_read_local_oob_data
*rp
;
8183 if (skb
->len
!= sizeof(*rp
)) {
8184 status
= MGMT_STATUS_FAILED
;
8187 status
= MGMT_STATUS_SUCCESS
;
8188 rp
= (void *)skb
->data
;
8190 eir_len
= 5 + 18 + 18;
8197 struct hci_rp_read_local_oob_ext_data
*rp
;
8199 if (skb
->len
!= sizeof(*rp
)) {
8200 status
= MGMT_STATUS_FAILED
;
8203 status
= MGMT_STATUS_SUCCESS
;
8204 rp
= (void *)skb
->data
;
8206 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
8207 eir_len
= 5 + 18 + 18;
8211 eir_len
= 5 + 18 + 18 + 18 + 18;
8221 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
8228 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
8229 hdev
->dev_class
, 3);
8232 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
8233 EIR_SSP_HASH_C192
, h192
, 16);
8234 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
8235 EIR_SSP_RAND_R192
, r192
, 16);
8239 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
8240 EIR_SSP_HASH_C256
, h256
, 16);
8241 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
8242 EIR_SSP_RAND_R256
, r256
, 16);
8246 mgmt_rp
->type
= mgmt_cp
->type
;
8247 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
8249 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
8250 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
8251 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
8252 if (err
< 0 || status
)
8255 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
8257 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
8258 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
8259 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
8261 if (skb
&& !IS_ERR(skb
))
8265 mgmt_pending_remove(cmd
);
8268 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
8269 struct mgmt_cp_read_local_oob_ext_data
*cp
)
8271 struct mgmt_pending_cmd
*cmd
;
8274 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
8279 err
= hci_cmd_sync_queue(hdev
, read_local_oob_data_sync
, cmd
,
8280 read_local_oob_ext_data_complete
);
8283 mgmt_pending_remove(cmd
);
8290 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
8291 void *data
, u16 data_len
)
8293 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
8294 struct mgmt_rp_read_local_oob_ext_data
*rp
;
8297 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
8300 bt_dev_dbg(hdev
, "sock %p", sk
);
8302 if (hdev_is_powered(hdev
)) {
8304 case BIT(BDADDR_BREDR
):
8305 status
= mgmt_bredr_support(hdev
);
8311 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
8312 status
= mgmt_le_support(hdev
);
8316 eir_len
= 9 + 3 + 18 + 18 + 3;
8319 status
= MGMT_STATUS_INVALID_PARAMS
;
8324 status
= MGMT_STATUS_NOT_POWERED
;
8328 rp_len
= sizeof(*rp
) + eir_len
;
8329 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
8333 if (!status
&& !lmp_ssp_capable(hdev
)) {
8334 status
= MGMT_STATUS_NOT_SUPPORTED
;
8345 case BIT(BDADDR_BREDR
):
8346 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
8347 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
8348 hci_dev_unlock(hdev
);
8352 status
= MGMT_STATUS_FAILED
;
8355 eir_len
= eir_append_data(rp
->eir
, eir_len
,
8357 hdev
->dev_class
, 3);
8360 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
8361 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
8362 smp_generate_oob(hdev
, hash
, rand
) < 0) {
8363 hci_dev_unlock(hdev
);
8364 status
= MGMT_STATUS_FAILED
;
8368 /* This should return the active RPA, but since the RPA
8369 * is only programmed on demand, it is really hard to fill
8370 * this in at the moment. For now disallow retrieving
8371 * local out-of-band data when privacy is in use.
8373 * Returning the identity address will not help here since
8374 * pairing happens before the identity resolving key is
8375 * known and thus the connection establishment happens
8376 * based on the RPA and not the identity address.
8378 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
8379 hci_dev_unlock(hdev
);
8380 status
= MGMT_STATUS_REJECTED
;
8384 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
8385 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
8386 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
8387 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
8388 memcpy(addr
, &hdev
->static_addr
, 6);
8391 memcpy(addr
, &hdev
->bdaddr
, 6);
8395 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
8396 addr
, sizeof(addr
));
8398 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
8403 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
8404 &role
, sizeof(role
));
8406 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
8407 eir_len
= eir_append_data(rp
->eir
, eir_len
,
8409 hash
, sizeof(hash
));
8411 eir_len
= eir_append_data(rp
->eir
, eir_len
,
8413 rand
, sizeof(rand
));
8416 flags
= mgmt_get_adv_discov_flags(hdev
);
8418 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
8419 flags
|= LE_AD_NO_BREDR
;
8421 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
8422 &flags
, sizeof(flags
));
8426 hci_dev_unlock(hdev
);
8428 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
8430 status
= MGMT_STATUS_SUCCESS
;
8433 rp
->type
= cp
->type
;
8434 rp
->eir_len
= cpu_to_le16(eir_len
);
8436 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
8437 status
, rp
, sizeof(*rp
) + eir_len
);
8438 if (err
< 0 || status
)
8441 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
8442 rp
, sizeof(*rp
) + eir_len
,
8443 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
8451 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
8455 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
8456 flags
|= MGMT_ADV_FLAG_DISCOV
;
8457 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
8458 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
8459 flags
|= MGMT_ADV_FLAG_APPEARANCE
;
8460 flags
|= MGMT_ADV_FLAG_LOCAL_NAME
;
8461 flags
|= MGMT_ADV_PARAM_DURATION
;
8462 flags
|= MGMT_ADV_PARAM_TIMEOUT
;
8463 flags
|= MGMT_ADV_PARAM_INTERVALS
;
8464 flags
|= MGMT_ADV_PARAM_TX_POWER
;
8465 flags
|= MGMT_ADV_PARAM_SCAN_RSP
;
8467 /* In extended adv TX_POWER returned from Set Adv Param
8468 * will be always valid.
8470 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
|| ext_adv_capable(hdev
))
8471 flags
|= MGMT_ADV_FLAG_TX_POWER
;
8473 if (ext_adv_capable(hdev
)) {
8474 flags
|= MGMT_ADV_FLAG_SEC_1M
;
8475 flags
|= MGMT_ADV_FLAG_HW_OFFLOAD
;
8476 flags
|= MGMT_ADV_FLAG_CAN_SET_TX_POWER
;
8478 if (le_2m_capable(hdev
))
8479 flags
|= MGMT_ADV_FLAG_SEC_2M
;
8481 if (le_coded_capable(hdev
))
8482 flags
|= MGMT_ADV_FLAG_SEC_CODED
;
8488 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
8489 void *data
, u16 data_len
)
8491 struct mgmt_rp_read_adv_features
*rp
;
8494 struct adv_info
*adv_instance
;
8495 u32 supported_flags
;
8498 bt_dev_dbg(hdev
, "sock %p", sk
);
8500 if (!lmp_le_capable(hdev
))
8501 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
8502 MGMT_STATUS_REJECTED
);
8506 rp_len
= sizeof(*rp
) + hdev
->adv_instance_cnt
;
8507 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
8509 hci_dev_unlock(hdev
);
8513 supported_flags
= get_supported_adv_flags(hdev
);
8515 rp
->supported_flags
= cpu_to_le32(supported_flags
);
8516 rp
->max_adv_data_len
= max_adv_len(hdev
);
8517 rp
->max_scan_rsp_len
= max_adv_len(hdev
);
8518 rp
->max_instances
= hdev
->le_num_of_adv_sets
;
8519 rp
->num_instances
= hdev
->adv_instance_cnt
;
8521 instance
= rp
->instance
;
8522 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
8523 /* Only instances 1-le_num_of_adv_sets are externally visible */
8524 if (adv_instance
->instance
<= hdev
->adv_instance_cnt
) {
8525 *instance
= adv_instance
->instance
;
8528 rp
->num_instances
--;
8533 hci_dev_unlock(hdev
);
8535 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
8536 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
8543 static u8
calculate_name_len(struct hci_dev
*hdev
)
8545 u8 buf
[HCI_MAX_SHORT_NAME_LENGTH
+ 2]; /* len + type + name */
8547 return eir_append_local_name(hdev
, buf
, 0);
8550 static u8
tlv_data_max_len(struct hci_dev
*hdev
, u32 adv_flags
,
8553 u8 max_len
= max_adv_len(hdev
);
8556 if (adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
8557 MGMT_ADV_FLAG_LIMITED_DISCOV
|
8558 MGMT_ADV_FLAG_MANAGED_FLAGS
))
8561 if (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)
8564 if (adv_flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
8565 max_len
-= calculate_name_len(hdev
);
8567 if (adv_flags
& (MGMT_ADV_FLAG_APPEARANCE
))
8574 static bool flags_managed(u32 adv_flags
)
8576 return adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
8577 MGMT_ADV_FLAG_LIMITED_DISCOV
|
8578 MGMT_ADV_FLAG_MANAGED_FLAGS
);
8581 static bool tx_power_managed(u32 adv_flags
)
8583 return adv_flags
& MGMT_ADV_FLAG_TX_POWER
;
8586 static bool name_managed(u32 adv_flags
)
8588 return adv_flags
& MGMT_ADV_FLAG_LOCAL_NAME
;
8591 static bool appearance_managed(u32 adv_flags
)
8593 return adv_flags
& MGMT_ADV_FLAG_APPEARANCE
;
8596 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
8597 u8 len
, bool is_adv_data
)
8602 max_len
= tlv_data_max_len(hdev
, adv_flags
, is_adv_data
);
8607 /* Make sure that the data is correctly formatted. */
8608 for (i
= 0; i
< len
; i
+= (cur_len
+ 1)) {
8614 if (data
[i
+ 1] == EIR_FLAGS
&&
8615 (!is_adv_data
|| flags_managed(adv_flags
)))
8618 if (data
[i
+ 1] == EIR_TX_POWER
&& tx_power_managed(adv_flags
))
8621 if (data
[i
+ 1] == EIR_NAME_COMPLETE
&& name_managed(adv_flags
))
8624 if (data
[i
+ 1] == EIR_NAME_SHORT
&& name_managed(adv_flags
))
8627 if (data
[i
+ 1] == EIR_APPEARANCE
&&
8628 appearance_managed(adv_flags
))
8631 /* If the current field length would exceed the total data
8632 * length, then it's invalid.
8634 if (i
+ cur_len
>= len
)
8641 static bool requested_adv_flags_are_valid(struct hci_dev
*hdev
, u32 adv_flags
)
8643 u32 supported_flags
, phy_flags
;
8645 /* The current implementation only supports a subset of the specified
8646 * flags. Also need to check mutual exclusiveness of sec flags.
8648 supported_flags
= get_supported_adv_flags(hdev
);
8649 phy_flags
= adv_flags
& MGMT_ADV_FLAG_SEC_MASK
;
8650 if (adv_flags
& ~supported_flags
||
8651 ((phy_flags
&& (phy_flags
^ (phy_flags
& -phy_flags
)))))
8657 static bool adv_busy(struct hci_dev
*hdev
)
8659 return pending_find(MGMT_OP_SET_LE
, hdev
);
8662 static void add_adv_complete(struct hci_dev
*hdev
, struct sock
*sk
, u8 instance
,
8665 struct adv_info
*adv
, *n
;
8667 bt_dev_dbg(hdev
, "err %d", err
);
8671 list_for_each_entry_safe(adv
, n
, &hdev
->adv_instances
, list
) {
8678 adv
->pending
= false;
8682 instance
= adv
->instance
;
8684 if (hdev
->cur_adv_instance
== instance
)
8685 cancel_adv_timeout(hdev
);
8687 hci_remove_adv_instance(hdev
, instance
);
8688 mgmt_advertising_removed(sk
, hdev
, instance
);
8691 hci_dev_unlock(hdev
);
8694 static void add_advertising_complete(struct hci_dev
*hdev
, void *data
, int err
)
8696 struct mgmt_pending_cmd
*cmd
= data
;
8697 struct mgmt_cp_add_advertising
*cp
= cmd
->param
;
8698 struct mgmt_rp_add_advertising rp
;
8700 memset(&rp
, 0, sizeof(rp
));
8702 rp
.instance
= cp
->instance
;
8705 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
8708 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
8709 mgmt_status(err
), &rp
, sizeof(rp
));
8711 add_adv_complete(hdev
, cmd
->sk
, cp
->instance
, err
);
8713 mgmt_pending_free(cmd
);
8716 static int add_advertising_sync(struct hci_dev
*hdev
, void *data
)
8718 struct mgmt_pending_cmd
*cmd
= data
;
8719 struct mgmt_cp_add_advertising
*cp
= cmd
->param
;
8721 return hci_schedule_adv_instance_sync(hdev
, cp
->instance
, true);
8724 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
8725 void *data
, u16 data_len
)
8727 struct mgmt_cp_add_advertising
*cp
= data
;
8728 struct mgmt_rp_add_advertising rp
;
8731 u16 timeout
, duration
;
8732 unsigned int prev_instance_cnt
;
8733 u8 schedule_instance
= 0;
8734 struct adv_info
*adv
, *next_instance
;
8736 struct mgmt_pending_cmd
*cmd
;
8738 bt_dev_dbg(hdev
, "sock %p", sk
);
8740 status
= mgmt_le_support(hdev
);
8742 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8745 if (cp
->instance
< 1 || cp
->instance
> hdev
->le_num_of_adv_sets
)
8746 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8747 MGMT_STATUS_INVALID_PARAMS
);
8749 if (data_len
!= sizeof(*cp
) + cp
->adv_data_len
+ cp
->scan_rsp_len
)
8750 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8751 MGMT_STATUS_INVALID_PARAMS
);
8753 flags
= __le32_to_cpu(cp
->flags
);
8754 timeout
= __le16_to_cpu(cp
->timeout
);
8755 duration
= __le16_to_cpu(cp
->duration
);
8757 if (!requested_adv_flags_are_valid(hdev
, flags
))
8758 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8759 MGMT_STATUS_INVALID_PARAMS
);
8763 if (timeout
&& !hdev_is_powered(hdev
)) {
8764 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8765 MGMT_STATUS_REJECTED
);
8769 if (adv_busy(hdev
)) {
8770 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8775 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
8776 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
8777 cp
->scan_rsp_len
, false)) {
8778 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8779 MGMT_STATUS_INVALID_PARAMS
);
8783 prev_instance_cnt
= hdev
->adv_instance_cnt
;
8785 adv
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
8786 cp
->adv_data_len
, cp
->data
,
8788 cp
->data
+ cp
->adv_data_len
,
8790 HCI_ADV_TX_POWER_NO_PREFERENCE
,
8791 hdev
->le_adv_min_interval
,
8792 hdev
->le_adv_max_interval
, 0);
8794 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8795 MGMT_STATUS_FAILED
);
8799 /* Only trigger an advertising added event if a new instance was
8802 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
8803 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
8805 if (hdev
->cur_adv_instance
== cp
->instance
) {
8806 /* If the currently advertised instance is being changed then
8807 * cancel the current advertising and schedule the next
8808 * instance. If there is only one instance then the overridden
8809 * advertising data will be visible right away.
8811 cancel_adv_timeout(hdev
);
8813 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
8815 schedule_instance
= next_instance
->instance
;
8816 } else if (!hdev
->adv_instance_timeout
) {
8817 /* Immediately advertise the new instance if no other
8818 * instance is currently being advertised.
8820 schedule_instance
= cp
->instance
;
8823 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8824 * there is no instance to be advertised then we have no HCI
8825 * communication to make. Simply return.
8827 if (!hdev_is_powered(hdev
) ||
8828 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
8829 !schedule_instance
) {
8830 rp
.instance
= cp
->instance
;
8831 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
8832 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
8836 /* We're good to go, update advertising data, parameters, and start
8839 cmd
= mgmt_pending_new(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
8846 cp
->instance
= schedule_instance
;
8848 err
= hci_cmd_sync_queue(hdev
, add_advertising_sync
, cmd
,
8849 add_advertising_complete
);
8851 mgmt_pending_free(cmd
);
8854 hci_dev_unlock(hdev
);
8859 static void add_ext_adv_params_complete(struct hci_dev
*hdev
, void *data
,
8862 struct mgmt_pending_cmd
*cmd
= data
;
8863 struct mgmt_cp_add_ext_adv_params
*cp
= cmd
->param
;
8864 struct mgmt_rp_add_ext_adv_params rp
;
8865 struct adv_info
*adv
;
8868 BT_DBG("%s", hdev
->name
);
8872 adv
= hci_find_adv_instance(hdev
, cp
->instance
);
8876 rp
.instance
= cp
->instance
;
8877 rp
.tx_power
= adv
->tx_power
;
8879 /* While we're at it, inform userspace of the available space for this
8880 * advertisement, given the flags that will be used.
8882 flags
= __le32_to_cpu(cp
->flags
);
8883 rp
.max_adv_data_len
= tlv_data_max_len(hdev
, flags
, true);
8884 rp
.max_scan_rsp_len
= tlv_data_max_len(hdev
, flags
, false);
8887 /* If this advertisement was previously advertising and we
8888 * failed to update it, we signal that it has been removed and
8889 * delete its structure
8892 mgmt_advertising_removed(cmd
->sk
, hdev
, cp
->instance
);
8894 hci_remove_adv_instance(hdev
, cp
->instance
);
8896 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
8899 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
8900 mgmt_status(err
), &rp
, sizeof(rp
));
8904 mgmt_pending_free(cmd
);
8906 hci_dev_unlock(hdev
);
8909 static int add_ext_adv_params_sync(struct hci_dev
*hdev
, void *data
)
8911 struct mgmt_pending_cmd
*cmd
= data
;
8912 struct mgmt_cp_add_ext_adv_params
*cp
= cmd
->param
;
8914 return hci_setup_ext_adv_instance_sync(hdev
, cp
->instance
);
8917 static int add_ext_adv_params(struct sock
*sk
, struct hci_dev
*hdev
,
8918 void *data
, u16 data_len
)
8920 struct mgmt_cp_add_ext_adv_params
*cp
= data
;
8921 struct mgmt_rp_add_ext_adv_params rp
;
8922 struct mgmt_pending_cmd
*cmd
= NULL
;
8923 struct adv_info
*adv
;
8924 u32 flags
, min_interval
, max_interval
;
8925 u16 timeout
, duration
;
8930 BT_DBG("%s", hdev
->name
);
8932 status
= mgmt_le_support(hdev
);
8934 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8937 if (cp
->instance
< 1 || cp
->instance
> hdev
->le_num_of_adv_sets
)
8938 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8939 MGMT_STATUS_INVALID_PARAMS
);
8941 /* The purpose of breaking add_advertising into two separate MGMT calls
8942 * for params and data is to allow more parameters to be added to this
8943 * structure in the future. For this reason, we verify that we have the
8944 * bare minimum structure we know of when the interface was defined. Any
8945 * extra parameters we don't know about will be ignored in this request.
8947 if (data_len
< MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE
)
8948 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8949 MGMT_STATUS_INVALID_PARAMS
);
8951 flags
= __le32_to_cpu(cp
->flags
);
8953 if (!requested_adv_flags_are_valid(hdev
, flags
))
8954 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8955 MGMT_STATUS_INVALID_PARAMS
);
8959 /* In new interface, we require that we are powered to register */
8960 if (!hdev_is_powered(hdev
)) {
8961 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8962 MGMT_STATUS_REJECTED
);
8966 if (adv_busy(hdev
)) {
8967 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8972 /* Parse defined parameters from request, use defaults otherwise */
8973 timeout
= (flags
& MGMT_ADV_PARAM_TIMEOUT
) ?
8974 __le16_to_cpu(cp
->timeout
) : 0;
8976 duration
= (flags
& MGMT_ADV_PARAM_DURATION
) ?
8977 __le16_to_cpu(cp
->duration
) :
8978 hdev
->def_multi_adv_rotation_duration
;
8980 min_interval
= (flags
& MGMT_ADV_PARAM_INTERVALS
) ?
8981 __le32_to_cpu(cp
->min_interval
) :
8982 hdev
->le_adv_min_interval
;
8984 max_interval
= (flags
& MGMT_ADV_PARAM_INTERVALS
) ?
8985 __le32_to_cpu(cp
->max_interval
) :
8986 hdev
->le_adv_max_interval
;
8988 tx_power
= (flags
& MGMT_ADV_PARAM_TX_POWER
) ?
8990 HCI_ADV_TX_POWER_NO_PREFERENCE
;
8992 /* Create advertising instance with no advertising or response data */
8993 adv
= hci_add_adv_instance(hdev
, cp
->instance
, flags
, 0, NULL
, 0, NULL
,
8994 timeout
, duration
, tx_power
, min_interval
,
8998 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
8999 MGMT_STATUS_FAILED
);
9003 /* Submit request for advertising params if ext adv available */
9004 if (ext_adv_capable(hdev
)) {
9005 cmd
= mgmt_pending_new(sk
, MGMT_OP_ADD_EXT_ADV_PARAMS
, hdev
,
9009 hci_remove_adv_instance(hdev
, cp
->instance
);
9013 err
= hci_cmd_sync_queue(hdev
, add_ext_adv_params_sync
, cmd
,
9014 add_ext_adv_params_complete
);
9016 mgmt_pending_free(cmd
);
9018 rp
.instance
= cp
->instance
;
9019 rp
.tx_power
= HCI_ADV_TX_POWER_NO_PREFERENCE
;
9020 rp
.max_adv_data_len
= tlv_data_max_len(hdev
, flags
, true);
9021 rp
.max_scan_rsp_len
= tlv_data_max_len(hdev
, flags
, false);
9022 err
= mgmt_cmd_complete(sk
, hdev
->id
,
9023 MGMT_OP_ADD_EXT_ADV_PARAMS
,
9024 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
9028 hci_dev_unlock(hdev
);
9033 static void add_ext_adv_data_complete(struct hci_dev
*hdev
, void *data
, int err
)
9035 struct mgmt_pending_cmd
*cmd
= data
;
9036 struct mgmt_cp_add_ext_adv_data
*cp
= cmd
->param
;
9037 struct mgmt_rp_add_advertising rp
;
9039 add_adv_complete(hdev
, cmd
->sk
, cp
->instance
, err
);
9041 memset(&rp
, 0, sizeof(rp
));
9043 rp
.instance
= cp
->instance
;
9046 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
9049 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
9050 mgmt_status(err
), &rp
, sizeof(rp
));
9052 mgmt_pending_free(cmd
);
9055 static int add_ext_adv_data_sync(struct hci_dev
*hdev
, void *data
)
9057 struct mgmt_pending_cmd
*cmd
= data
;
9058 struct mgmt_cp_add_ext_adv_data
*cp
= cmd
->param
;
9061 if (ext_adv_capable(hdev
)) {
9062 err
= hci_update_adv_data_sync(hdev
, cp
->instance
);
9066 err
= hci_update_scan_rsp_data_sync(hdev
, cp
->instance
);
9070 return hci_enable_ext_advertising_sync(hdev
, cp
->instance
);
9073 return hci_schedule_adv_instance_sync(hdev
, cp
->instance
, true);
9076 static int add_ext_adv_data(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
9079 struct mgmt_cp_add_ext_adv_data
*cp
= data
;
9080 struct mgmt_rp_add_ext_adv_data rp
;
9081 u8 schedule_instance
= 0;
9082 struct adv_info
*next_instance
;
9083 struct adv_info
*adv_instance
;
9085 struct mgmt_pending_cmd
*cmd
;
9087 BT_DBG("%s", hdev
->name
);
9091 adv_instance
= hci_find_adv_instance(hdev
, cp
->instance
);
9093 if (!adv_instance
) {
9094 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
9095 MGMT_STATUS_INVALID_PARAMS
);
9099 /* In new interface, we require that we are powered to register */
9100 if (!hdev_is_powered(hdev
)) {
9101 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
9102 MGMT_STATUS_REJECTED
);
9103 goto clear_new_instance
;
9106 if (adv_busy(hdev
)) {
9107 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
9109 goto clear_new_instance
;
9112 /* Validate new data */
9113 if (!tlv_data_is_valid(hdev
, adv_instance
->flags
, cp
->data
,
9114 cp
->adv_data_len
, true) ||
9115 !tlv_data_is_valid(hdev
, adv_instance
->flags
, cp
->data
+
9116 cp
->adv_data_len
, cp
->scan_rsp_len
, false)) {
9117 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
9118 MGMT_STATUS_INVALID_PARAMS
);
9119 goto clear_new_instance
;
9122 /* Set the data in the advertising instance */
9123 hci_set_adv_instance_data(hdev
, cp
->instance
, cp
->adv_data_len
,
9124 cp
->data
, cp
->scan_rsp_len
,
9125 cp
->data
+ cp
->adv_data_len
);
9127 /* If using software rotation, determine next instance to use */
9128 if (hdev
->cur_adv_instance
== cp
->instance
) {
9129 /* If the currently advertised instance is being changed
9130 * then cancel the current advertising and schedule the
9131 * next instance. If there is only one instance then the
9132 * overridden advertising data will be visible right
9135 cancel_adv_timeout(hdev
);
9137 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
9139 schedule_instance
= next_instance
->instance
;
9140 } else if (!hdev
->adv_instance_timeout
) {
9141 /* Immediately advertise the new instance if no other
9142 * instance is currently being advertised.
9144 schedule_instance
= cp
->instance
;
9147 /* If the HCI_ADVERTISING flag is set or there is no instance to
9148 * be advertised then we have no HCI communication to make.
9151 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) || !schedule_instance
) {
9152 if (adv_instance
->pending
) {
9153 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
9154 adv_instance
->pending
= false;
9156 rp
.instance
= cp
->instance
;
9157 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
9158 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
9162 cmd
= mgmt_pending_new(sk
, MGMT_OP_ADD_EXT_ADV_DATA
, hdev
, data
,
9166 goto clear_new_instance
;
9169 err
= hci_cmd_sync_queue(hdev
, add_ext_adv_data_sync
, cmd
,
9170 add_ext_adv_data_complete
);
9172 mgmt_pending_free(cmd
);
9173 goto clear_new_instance
;
9176 /* We were successful in updating data, so trigger advertising_added
9177 * event if this is an instance that wasn't previously advertising. If
9178 * a failure occurs in the requests we initiated, we will remove the
9179 * instance again in add_advertising_complete
9181 if (adv_instance
->pending
)
9182 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
9187 hci_remove_adv_instance(hdev
, cp
->instance
);
9190 hci_dev_unlock(hdev
);
9195 static void remove_advertising_complete(struct hci_dev
*hdev
, void *data
,
9198 struct mgmt_pending_cmd
*cmd
= data
;
9199 struct mgmt_cp_remove_advertising
*cp
= cmd
->param
;
9200 struct mgmt_rp_remove_advertising rp
;
9202 bt_dev_dbg(hdev
, "err %d", err
);
9204 memset(&rp
, 0, sizeof(rp
));
9205 rp
.instance
= cp
->instance
;
9208 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
9211 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
9212 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
9214 mgmt_pending_free(cmd
);
9217 static int remove_advertising_sync(struct hci_dev
*hdev
, void *data
)
9219 struct mgmt_pending_cmd
*cmd
= data
;
9220 struct mgmt_cp_remove_advertising
*cp
= cmd
->param
;
9223 err
= hci_remove_advertising_sync(hdev
, cmd
->sk
, cp
->instance
, true);
9227 if (list_empty(&hdev
->adv_instances
))
9228 err
= hci_disable_advertising_sync(hdev
);
9233 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
9234 void *data
, u16 data_len
)
9236 struct mgmt_cp_remove_advertising
*cp
= data
;
9237 struct mgmt_pending_cmd
*cmd
;
9240 bt_dev_dbg(hdev
, "sock %p", sk
);
9244 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
9245 err
= mgmt_cmd_status(sk
, hdev
->id
,
9246 MGMT_OP_REMOVE_ADVERTISING
,
9247 MGMT_STATUS_INVALID_PARAMS
);
9251 if (pending_find(MGMT_OP_SET_LE
, hdev
)) {
9252 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
9257 if (list_empty(&hdev
->adv_instances
)) {
9258 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
9259 MGMT_STATUS_INVALID_PARAMS
);
9263 cmd
= mgmt_pending_new(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
9270 err
= hci_cmd_sync_queue(hdev
, remove_advertising_sync
, cmd
,
9271 remove_advertising_complete
);
9273 mgmt_pending_free(cmd
);
9276 hci_dev_unlock(hdev
);
9281 static int get_adv_size_info(struct sock
*sk
, struct hci_dev
*hdev
,
9282 void *data
, u16 data_len
)
9284 struct mgmt_cp_get_adv_size_info
*cp
= data
;
9285 struct mgmt_rp_get_adv_size_info rp
;
9286 u32 flags
, supported_flags
;
9288 bt_dev_dbg(hdev
, "sock %p", sk
);
9290 if (!lmp_le_capable(hdev
))
9291 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
9292 MGMT_STATUS_REJECTED
);
9294 if (cp
->instance
< 1 || cp
->instance
> hdev
->le_num_of_adv_sets
)
9295 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
9296 MGMT_STATUS_INVALID_PARAMS
);
9298 flags
= __le32_to_cpu(cp
->flags
);
9300 /* The current implementation only supports a subset of the specified
9303 supported_flags
= get_supported_adv_flags(hdev
);
9304 if (flags
& ~supported_flags
)
9305 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
9306 MGMT_STATUS_INVALID_PARAMS
);
9308 rp
.instance
= cp
->instance
;
9309 rp
.flags
= cp
->flags
;
9310 rp
.max_adv_data_len
= tlv_data_max_len(hdev
, flags
, true);
9311 rp
.max_scan_rsp_len
= tlv_data_max_len(hdev
, flags
, false);
9313 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
9314 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
9317 static const struct hci_mgmt_handler mgmt_handlers
[] = {
9318 { NULL
}, /* 0x0000 (no command) */
9319 { read_version
, MGMT_READ_VERSION_SIZE
,
9321 HCI_MGMT_UNTRUSTED
},
9322 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
9324 HCI_MGMT_UNTRUSTED
},
9325 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
9327 HCI_MGMT_UNTRUSTED
},
9328 { read_controller_info
, MGMT_READ_INFO_SIZE
,
9329 HCI_MGMT_UNTRUSTED
},
9330 { set_powered
, MGMT_SETTING_SIZE
},
9331 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
9332 { set_connectable
, MGMT_SETTING_SIZE
},
9333 { set_fast_connectable
, MGMT_SETTING_SIZE
},
9334 { set_bondable
, MGMT_SETTING_SIZE
},
9335 { set_link_security
, MGMT_SETTING_SIZE
},
9336 { set_ssp
, MGMT_SETTING_SIZE
},
9337 { set_hs
, MGMT_SETTING_SIZE
},
9338 { set_le
, MGMT_SETTING_SIZE
},
9339 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
9340 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
9341 { add_uuid
, MGMT_ADD_UUID_SIZE
},
9342 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
9343 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
9345 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
9347 { disconnect
, MGMT_DISCONNECT_SIZE
},
9348 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
9349 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
9350 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
9351 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
9352 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
9353 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
9354 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
9355 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
9356 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
9357 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
9358 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
9359 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
9360 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
9362 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
9363 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
9364 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
9365 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
9366 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
9367 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
9368 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
9369 { set_advertising
, MGMT_SETTING_SIZE
},
9370 { set_bredr
, MGMT_SETTING_SIZE
},
9371 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
9372 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
9373 { set_secure_conn
, MGMT_SETTING_SIZE
},
9374 { set_debug_keys
, MGMT_SETTING_SIZE
},
9375 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
9376 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
9378 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
9379 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
9380 { add_device
, MGMT_ADD_DEVICE_SIZE
},
9381 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
9382 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
9384 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
9386 HCI_MGMT_UNTRUSTED
},
9387 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
9388 HCI_MGMT_UNCONFIGURED
|
9389 HCI_MGMT_UNTRUSTED
},
9390 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
9391 HCI_MGMT_UNCONFIGURED
},
9392 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
9393 HCI_MGMT_UNCONFIGURED
},
9394 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
9396 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
9397 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
9399 HCI_MGMT_UNTRUSTED
},
9400 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
9401 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
9403 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
9404 { get_adv_size_info
, MGMT_GET_ADV_SIZE_INFO_SIZE
},
9405 { start_limited_discovery
, MGMT_START_DISCOVERY_SIZE
},
9406 { read_ext_controller_info
,MGMT_READ_EXT_INFO_SIZE
,
9407 HCI_MGMT_UNTRUSTED
},
9408 { set_appearance
, MGMT_SET_APPEARANCE_SIZE
},
9409 { get_phy_configuration
, MGMT_GET_PHY_CONFIGURATION_SIZE
},
9410 { set_phy_configuration
, MGMT_SET_PHY_CONFIGURATION_SIZE
},
9411 { set_blocked_keys
, MGMT_OP_SET_BLOCKED_KEYS_SIZE
,
9413 { set_wideband_speech
, MGMT_SETTING_SIZE
},
9414 { read_controller_cap
, MGMT_READ_CONTROLLER_CAP_SIZE
,
9415 HCI_MGMT_UNTRUSTED
},
9416 { read_exp_features_info
, MGMT_READ_EXP_FEATURES_INFO_SIZE
,
9417 HCI_MGMT_UNTRUSTED
|
9418 HCI_MGMT_HDEV_OPTIONAL
},
9419 { set_exp_feature
, MGMT_SET_EXP_FEATURE_SIZE
,
9421 HCI_MGMT_HDEV_OPTIONAL
},
9422 { read_def_system_config
, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE
,
9423 HCI_MGMT_UNTRUSTED
},
9424 { set_def_system_config
, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE
,
9426 { read_def_runtime_config
, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE
,
9427 HCI_MGMT_UNTRUSTED
},
9428 { set_def_runtime_config
, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE
,
9430 { get_device_flags
, MGMT_GET_DEVICE_FLAGS_SIZE
},
9431 { set_device_flags
, MGMT_SET_DEVICE_FLAGS_SIZE
},
9432 { read_adv_mon_features
, MGMT_READ_ADV_MONITOR_FEATURES_SIZE
},
9433 { add_adv_patterns_monitor
,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE
,
9435 { remove_adv_monitor
, MGMT_REMOVE_ADV_MONITOR_SIZE
},
9436 { add_ext_adv_params
, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE
,
9438 { add_ext_adv_data
, MGMT_ADD_EXT_ADV_DATA_SIZE
,
9440 { add_adv_patterns_monitor_rssi
,
9441 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE
,
9443 { set_mesh
, MGMT_SET_MESH_RECEIVER_SIZE
,
9445 { mesh_features
, MGMT_MESH_READ_FEATURES_SIZE
},
9446 { mesh_send
, MGMT_MESH_SEND_SIZE
,
9448 { mesh_send_cancel
, MGMT_MESH_SEND_CANCEL_SIZE
},
9449 { mgmt_hci_cmd_sync
, MGMT_HCI_CMD_SYNC_SIZE
, HCI_MGMT_VAR_LEN
},
9452 void mgmt_index_added(struct hci_dev
*hdev
)
9454 struct mgmt_ev_ext_index ev
;
9456 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
9459 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
9460 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
, NULL
, 0,
9461 HCI_MGMT_UNCONF_INDEX_EVENTS
);
9464 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
9465 HCI_MGMT_INDEX_EVENTS
);
9471 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
9472 HCI_MGMT_EXT_INDEX_EVENTS
);
9475 void mgmt_index_removed(struct hci_dev
*hdev
)
9477 struct mgmt_ev_ext_index ev
;
9478 struct cmd_lookup match
= { NULL
, hdev
, MGMT_STATUS_INVALID_INDEX
};
9480 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
9483 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &match
);
9485 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
9486 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
, NULL
, 0,
9487 HCI_MGMT_UNCONF_INDEX_EVENTS
);
9490 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
9491 HCI_MGMT_INDEX_EVENTS
);
9497 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
9498 HCI_MGMT_EXT_INDEX_EVENTS
);
9500 /* Cancel any remaining timed work */
9501 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
9503 cancel_delayed_work_sync(&hdev
->discov_off
);
9504 cancel_delayed_work_sync(&hdev
->service_cache
);
9505 cancel_delayed_work_sync(&hdev
->rpa_expired
);
9508 void mgmt_power_on(struct hci_dev
*hdev
, int err
)
9510 struct cmd_lookup match
= { NULL
, hdev
};
9512 bt_dev_dbg(hdev
, "err %d", err
);
9517 restart_le_actions(hdev
);
9518 hci_update_passive_scan(hdev
);
9521 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
9523 new_settings(hdev
, match
.sk
);
9528 hci_dev_unlock(hdev
);
9531 void __mgmt_power_off(struct hci_dev
*hdev
)
9533 struct cmd_lookup match
= { NULL
, hdev
};
9534 u8 zero_cod
[] = { 0, 0, 0 };
9536 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
9538 /* If the power off is because of hdev unregistration let
9539 * use the appropriate INVALID_INDEX status. Otherwise use
9540 * NOT_POWERED. We cover both scenarios here since later in
9541 * mgmt_index_removed() any hci_conn callbacks will have already
9542 * been triggered, potentially causing misleading DISCONNECTED
9545 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
9546 match
.mgmt_status
= MGMT_STATUS_INVALID_INDEX
;
9548 match
.mgmt_status
= MGMT_STATUS_NOT_POWERED
;
9550 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &match
);
9552 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0) {
9553 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
9554 zero_cod
, sizeof(zero_cod
),
9555 HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
9556 ext_info_changed(hdev
, NULL
);
9559 new_settings(hdev
, match
.sk
);
9565 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
9567 struct mgmt_pending_cmd
*cmd
;
9570 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
9574 if (err
== -ERFKILL
)
9575 status
= MGMT_STATUS_RFKILLED
;
9577 status
= MGMT_STATUS_FAILED
;
9579 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
9581 mgmt_pending_remove(cmd
);
9584 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
9587 struct mgmt_ev_new_link_key ev
;
9589 memset(&ev
, 0, sizeof(ev
));
9591 ev
.store_hint
= persistent
;
9592 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
9593 ev
.key
.addr
.type
= BDADDR_BREDR
;
9594 ev
.key
.type
= key
->type
;
9595 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
9596 ev
.key
.pin_len
= key
->pin_len
;
9598 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
9601 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
9603 switch (ltk
->type
) {
9605 case SMP_LTK_RESPONDER
:
9606 if (ltk
->authenticated
)
9607 return MGMT_LTK_AUTHENTICATED
;
9608 return MGMT_LTK_UNAUTHENTICATED
;
9610 if (ltk
->authenticated
)
9611 return MGMT_LTK_P256_AUTH
;
9612 return MGMT_LTK_P256_UNAUTH
;
9613 case SMP_LTK_P256_DEBUG
:
9614 return MGMT_LTK_P256_DEBUG
;
9617 return MGMT_LTK_UNAUTHENTICATED
;
9620 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
9622 struct mgmt_ev_new_long_term_key ev
;
9624 memset(&ev
, 0, sizeof(ev
));
9626 /* Devices using resolvable or non-resolvable random addresses
9627 * without providing an identity resolving key don't require
9628 * to store long term keys. Their addresses will change the
9631 * Only when a remote device provides an identity address
9632 * make sure the long term key is stored. If the remote
9633 * identity is known, the long term keys are internally
9634 * mapped to the identity address. So allow static random
9635 * and public addresses here.
9637 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
9638 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
9639 ev
.store_hint
= 0x00;
9641 ev
.store_hint
= persistent
;
9643 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
9644 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
9645 ev
.key
.type
= mgmt_ltk_type(key
);
9646 ev
.key
.enc_size
= key
->enc_size
;
9647 ev
.key
.ediv
= key
->ediv
;
9648 ev
.key
.rand
= key
->rand
;
9650 if (key
->type
== SMP_LTK
)
9651 ev
.key
.initiator
= 1;
9653 /* Make sure we copy only the significant bytes based on the
9654 * encryption key size, and set the rest of the value to zeroes.
9656 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
9657 memset(ev
.key
.val
+ key
->enc_size
, 0,
9658 sizeof(ev
.key
.val
) - key
->enc_size
);
9660 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
9663 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
, bool persistent
)
9665 struct mgmt_ev_new_irk ev
;
9667 memset(&ev
, 0, sizeof(ev
));
9669 ev
.store_hint
= persistent
;
9671 bacpy(&ev
.rpa
, &irk
->rpa
);
9672 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
9673 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
9674 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
9676 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
9679 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
9682 struct mgmt_ev_new_csrk ev
;
9684 memset(&ev
, 0, sizeof(ev
));
9686 /* Devices using resolvable or non-resolvable random addresses
9687 * without providing an identity resolving key don't require
9688 * to store signature resolving keys. Their addresses will change
9689 * the next time around.
9691 * Only when a remote device provides an identity address
9692 * make sure the signature resolving key is stored. So allow
9693 * static random and public addresses here.
9695 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
9696 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
9697 ev
.store_hint
= 0x00;
9699 ev
.store_hint
= persistent
;
9701 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
9702 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
9703 ev
.key
.type
= csrk
->type
;
9704 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
9706 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
9709 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9710 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
9711 u16 max_interval
, u16 latency
, u16 timeout
)
9713 struct mgmt_ev_new_conn_param ev
;
9715 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
9718 memset(&ev
, 0, sizeof(ev
));
9719 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
9720 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
9721 ev
.store_hint
= store_hint
;
9722 ev
.min_interval
= cpu_to_le16(min_interval
);
9723 ev
.max_interval
= cpu_to_le16(max_interval
);
9724 ev
.latency
= cpu_to_le16(latency
);
9725 ev
.timeout
= cpu_to_le16(timeout
);
9727 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
9730 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
9731 u8
*name
, u8 name_len
)
9733 struct sk_buff
*skb
;
9734 struct mgmt_ev_device_connected
*ev
;
9738 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
9741 /* allocate buff for LE or BR/EDR adv */
9742 if (conn
->le_adv_data_len
> 0)
9743 skb
= mgmt_alloc_skb(hdev
, MGMT_EV_DEVICE_CONNECTED
,
9744 sizeof(*ev
) + conn
->le_adv_data_len
);
9746 skb
= mgmt_alloc_skb(hdev
, MGMT_EV_DEVICE_CONNECTED
,
9747 sizeof(*ev
) + (name
? eir_precalc_len(name_len
) : 0) +
9748 eir_precalc_len(sizeof(conn
->dev_class
)));
9750 ev
= skb_put(skb
, sizeof(*ev
));
9751 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
9752 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
9755 flags
|= MGMT_DEV_FOUND_INITIATED_CONN
;
9757 ev
->flags
= __cpu_to_le32(flags
);
9759 /* We must ensure that the EIR Data fields are ordered and
9760 * unique. Keep it simple for now and avoid the problem by not
9761 * adding any BR/EDR data to the LE adv.
9763 if (conn
->le_adv_data_len
> 0) {
9764 skb_put_data(skb
, conn
->le_adv_data
, conn
->le_adv_data_len
);
9765 eir_len
= conn
->le_adv_data_len
;
9768 eir_len
+= eir_skb_put_data(skb
, EIR_NAME_COMPLETE
, name
, name_len
);
9770 if (memcmp(conn
->dev_class
, "\0\0\0", sizeof(conn
->dev_class
)))
9771 eir_len
+= eir_skb_put_data(skb
, EIR_CLASS_OF_DEV
,
9772 conn
->dev_class
, sizeof(conn
->dev_class
));
9775 ev
->eir_len
= cpu_to_le16(eir_len
);
9777 mgmt_event_skb(skb
, NULL
);
9780 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
9782 struct hci_dev
*hdev
= data
;
9783 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
9785 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
9787 cmd
->cmd_complete(cmd
, 0);
9788 mgmt_pending_remove(cmd
);
9791 bool mgmt_powering_down(struct hci_dev
*hdev
)
9793 struct mgmt_pending_cmd
*cmd
;
9794 struct mgmt_mode
*cp
;
9796 if (hci_dev_test_flag(hdev
, HCI_POWERING_DOWN
))
9799 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
9810 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9811 u8 link_type
, u8 addr_type
, u8 reason
,
9812 bool mgmt_connected
)
9814 struct mgmt_ev_device_disconnected ev
;
9815 struct sock
*sk
= NULL
;
9817 if (!mgmt_connected
)
9820 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
9823 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
9824 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
9827 /* Report disconnects due to suspend */
9828 if (hdev
->suspended
)
9829 ev
.reason
= MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND
;
9831 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
9837 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9838 u8 link_type
, u8 addr_type
, u8 status
)
9840 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
9841 struct mgmt_cp_disconnect
*cp
;
9842 struct mgmt_pending_cmd
*cmd
;
9844 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
9847 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
9853 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
9856 if (cp
->addr
.type
!= bdaddr_type
)
9859 cmd
->cmd_complete(cmd
, mgmt_status(status
));
9860 mgmt_pending_remove(cmd
);
9863 void mgmt_connect_failed(struct hci_dev
*hdev
, struct hci_conn
*conn
, u8 status
)
9865 struct mgmt_ev_connect_failed ev
;
9867 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
)) {
9868 mgmt_device_disconnected(hdev
, &conn
->dst
, conn
->type
,
9869 conn
->dst_type
, status
, true);
9873 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
9874 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
9875 ev
.status
= mgmt_status(status
);
9877 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
9880 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
9882 struct mgmt_ev_pin_code_request ev
;
9884 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
9885 ev
.addr
.type
= BDADDR_BREDR
;
9888 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
9891 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9894 struct mgmt_pending_cmd
*cmd
;
9896 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
9900 cmd
->cmd_complete(cmd
, mgmt_status(status
));
9901 mgmt_pending_remove(cmd
);
9904 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9907 struct mgmt_pending_cmd
*cmd
;
9909 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
9913 cmd
->cmd_complete(cmd
, mgmt_status(status
));
9914 mgmt_pending_remove(cmd
);
9917 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9918 u8 link_type
, u8 addr_type
, u32 value
,
9921 struct mgmt_ev_user_confirm_request ev
;
9923 bt_dev_dbg(hdev
, "bdaddr %pMR", bdaddr
);
9925 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
9926 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
9927 ev
.confirm_hint
= confirm_hint
;
9928 ev
.value
= cpu_to_le32(value
);
9930 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
9934 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9935 u8 link_type
, u8 addr_type
)
9937 struct mgmt_ev_user_passkey_request ev
;
9939 bt_dev_dbg(hdev
, "bdaddr %pMR", bdaddr
);
9941 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
9942 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
9944 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
9948 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9949 u8 link_type
, u8 addr_type
, u8 status
,
9952 struct mgmt_pending_cmd
*cmd
;
9954 cmd
= pending_find(opcode
, hdev
);
9958 cmd
->cmd_complete(cmd
, mgmt_status(status
));
9959 mgmt_pending_remove(cmd
);
9964 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9965 u8 link_type
, u8 addr_type
, u8 status
)
9967 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
9968 status
, MGMT_OP_USER_CONFIRM_REPLY
);
9971 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9972 u8 link_type
, u8 addr_type
, u8 status
)
9974 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
9976 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
9979 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9980 u8 link_type
, u8 addr_type
, u8 status
)
9982 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
9983 status
, MGMT_OP_USER_PASSKEY_REPLY
);
9986 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9987 u8 link_type
, u8 addr_type
, u8 status
)
9989 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
9991 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
9994 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
9995 u8 link_type
, u8 addr_type
, u32 passkey
,
9998 struct mgmt_ev_passkey_notify ev
;
10000 bt_dev_dbg(hdev
, "bdaddr %pMR", bdaddr
);
10002 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
10003 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
10004 ev
.passkey
= __cpu_to_le32(passkey
);
10005 ev
.entered
= entered
;
10007 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
10010 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
10012 struct mgmt_ev_auth_failed ev
;
10013 struct mgmt_pending_cmd
*cmd
;
10014 u8 status
= mgmt_status(hci_status
);
10016 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
10017 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
10018 ev
.status
= status
;
10020 cmd
= find_pairing(conn
);
10022 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
10023 cmd
? cmd
->sk
: NULL
);
10026 cmd
->cmd_complete(cmd
, status
);
10027 mgmt_pending_remove(cmd
);
10031 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
10033 struct cmd_lookup match
= { NULL
, hdev
};
10037 u8 mgmt_err
= mgmt_status(status
);
10038 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
10039 cmd_status_rsp
, &mgmt_err
);
10043 if (test_bit(HCI_AUTH
, &hdev
->flags
))
10044 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
10046 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
10048 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
10052 new_settings(hdev
, match
.sk
);
10055 sock_put(match
.sk
);
10058 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
10060 struct cmd_lookup
*match
= data
;
10062 if (match
->sk
== NULL
) {
10063 match
->sk
= cmd
->sk
;
10064 sock_hold(match
->sk
);
10068 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
10071 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
10073 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
10074 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
10075 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
10078 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
,
10079 3, HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
10080 ext_info_changed(hdev
, NULL
);
10084 sock_put(match
.sk
);
10087 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
10089 struct mgmt_cp_set_local_name ev
;
10090 struct mgmt_pending_cmd
*cmd
;
10095 memset(&ev
, 0, sizeof(ev
));
10096 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
10097 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
10099 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
10101 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
10103 /* If this is a HCI command related to powering on the
10104 * HCI dev don't send any mgmt signals.
10106 if (hci_dev_test_flag(hdev
, HCI_POWERING_DOWN
))
10109 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
10113 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
10114 HCI_MGMT_LOCAL_NAME_EVENTS
, cmd
? cmd
->sk
: NULL
);
10115 ext_info_changed(hdev
, cmd
? cmd
->sk
: NULL
);
10118 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
10122 for (i
= 0; i
< uuid_count
; i
++) {
10123 if (!memcmp(uuid
, uuids
[i
], 16))
10130 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
10134 while (parsed
< eir_len
) {
10135 u8 field_len
= eir
[0];
10139 if (field_len
== 0)
10142 if (eir_len
- parsed
< field_len
+ 1)
10146 case EIR_UUID16_ALL
:
10147 case EIR_UUID16_SOME
:
10148 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
10149 memcpy(uuid
, bluetooth_base_uuid
, 16);
10150 uuid
[13] = eir
[i
+ 3];
10151 uuid
[12] = eir
[i
+ 2];
10152 if (has_uuid(uuid
, uuid_count
, uuids
))
10156 case EIR_UUID32_ALL
:
10157 case EIR_UUID32_SOME
:
10158 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
10159 memcpy(uuid
, bluetooth_base_uuid
, 16);
10160 uuid
[15] = eir
[i
+ 5];
10161 uuid
[14] = eir
[i
+ 4];
10162 uuid
[13] = eir
[i
+ 3];
10163 uuid
[12] = eir
[i
+ 2];
10164 if (has_uuid(uuid
, uuid_count
, uuids
))
10168 case EIR_UUID128_ALL
:
10169 case EIR_UUID128_SOME
:
10170 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
10171 memcpy(uuid
, eir
+ i
+ 2, 16);
10172 if (has_uuid(uuid
, uuid_count
, uuids
))
10178 parsed
+= field_len
+ 1;
10179 eir
+= field_len
+ 1;
10185 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
10186 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
10188 /* If a RSSI threshold has been specified, and
10189 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10190 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10191 * is set, let it through for further processing, as we might need to
10192 * restart the scan.
10194 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10195 * the results are also dropped.
10197 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
10198 (rssi
== HCI_RSSI_INVALID
||
10199 (rssi
< hdev
->discovery
.rssi
&&
10200 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
10203 if (hdev
->discovery
.uuid_count
!= 0) {
10204 /* If a list of UUIDs is provided in filter, results with no
10205 * matching UUID should be dropped.
10207 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
10208 hdev
->discovery
.uuids
) &&
10209 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
10210 hdev
->discovery
.uuid_count
,
10211 hdev
->discovery
.uuids
))
10215 /* If duplicate filtering does not report RSSI changes, then restart
10216 * scanning to ensure updated result with updated RSSI values.
10218 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
10219 /* Validate RSSI value against the RSSI threshold once more. */
10220 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
10221 rssi
< hdev
->discovery
.rssi
)
10228 void mgmt_adv_monitor_device_lost(struct hci_dev
*hdev
, u16 handle
,
10229 bdaddr_t
*bdaddr
, u8 addr_type
)
10231 struct mgmt_ev_adv_monitor_device_lost ev
;
10233 ev
.monitor_handle
= cpu_to_le16(handle
);
10234 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
10235 ev
.addr
.type
= addr_type
;
10237 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST
, hdev
, &ev
, sizeof(ev
),
10241 static void mgmt_send_adv_monitor_device_found(struct hci_dev
*hdev
,
10242 struct sk_buff
*skb
,
10243 struct sock
*skip_sk
,
10246 struct sk_buff
*advmon_skb
;
10247 size_t advmon_skb_len
;
10248 __le16
*monitor_handle
;
10253 advmon_skb_len
= (sizeof(struct mgmt_ev_adv_monitor_device_found
) -
10254 sizeof(struct mgmt_ev_device_found
)) + skb
->len
;
10255 advmon_skb
= mgmt_alloc_skb(hdev
, MGMT_EV_ADV_MONITOR_DEVICE_FOUND
,
10260 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10261 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10262 * store monitor_handle of the matched monitor.
10264 monitor_handle
= skb_put(advmon_skb
, sizeof(*monitor_handle
));
10265 *monitor_handle
= cpu_to_le16(handle
);
10266 skb_put_data(advmon_skb
, skb
->data
, skb
->len
);
10268 mgmt_event_skb(advmon_skb
, skip_sk
);
10271 static void mgmt_adv_monitor_device_found(struct hci_dev
*hdev
,
10272 bdaddr_t
*bdaddr
, bool report_device
,
10273 struct sk_buff
*skb
,
10274 struct sock
*skip_sk
)
10276 struct monitored_device
*dev
, *tmp
;
10277 bool matched
= false;
10278 bool notified
= false;
10280 /* We have received the Advertisement Report because:
10281 * 1. the kernel has initiated active discovery
10282 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10284 * 3. if none of the above is true, we have one or more active
10285 * Advertisement Monitor
10287 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10288 * and report ONLY one advertisement per device for the matched Monitor
10289 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10291 * For case 3, since we are not active scanning and all advertisements
10292 * received are due to a matched Advertisement Monitor, report all
10293 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10295 if (report_device
&& !hdev
->advmon_pend_notify
) {
10296 mgmt_event_skb(skb
, skip_sk
);
10300 hdev
->advmon_pend_notify
= false;
10302 list_for_each_entry_safe(dev
, tmp
, &hdev
->monitored_devices
, list
) {
10303 if (!bacmp(&dev
->bdaddr
, bdaddr
)) {
10306 if (!dev
->notified
) {
10307 mgmt_send_adv_monitor_device_found(hdev
, skb
,
10311 dev
->notified
= true;
10315 if (!dev
->notified
)
10316 hdev
->advmon_pend_notify
= true;
10319 if (!report_device
&&
10320 ((matched
&& !notified
) || !msft_monitor_supported(hdev
))) {
10321 /* Handle 0 indicates that we are not active scanning and this
10322 * is a subsequent advertisement report for an already matched
10323 * Advertisement Monitor or the controller offloading support
10324 * is not available.
10326 mgmt_send_adv_monitor_device_found(hdev
, skb
, skip_sk
, 0);
10330 mgmt_event_skb(skb
, skip_sk
);
10335 static void mesh_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
10336 u8 addr_type
, s8 rssi
, u32 flags
, u8
*eir
,
10337 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
,
10340 struct sk_buff
*skb
;
10341 struct mgmt_ev_mesh_device_found
*ev
;
10344 if (!hdev
->mesh_ad_types
[0])
10347 /* Scan for requested AD types */
10349 for (i
= 0; i
+ 1 < eir_len
; i
+= eir
[i
] + 1) {
10350 for (j
= 0; j
< sizeof(hdev
->mesh_ad_types
); j
++) {
10351 if (!hdev
->mesh_ad_types
[j
])
10354 if (hdev
->mesh_ad_types
[j
] == eir
[i
+ 1])
10360 if (scan_rsp_len
> 0) {
10361 for (i
= 0; i
+ 1 < scan_rsp_len
; i
+= scan_rsp
[i
] + 1) {
10362 for (j
= 0; j
< sizeof(hdev
->mesh_ad_types
); j
++) {
10363 if (!hdev
->mesh_ad_types
[j
])
10366 if (hdev
->mesh_ad_types
[j
] == scan_rsp
[i
+ 1])
10375 skb
= mgmt_alloc_skb(hdev
, MGMT_EV_MESH_DEVICE_FOUND
,
10376 sizeof(*ev
) + eir_len
+ scan_rsp_len
);
10380 ev
= skb_put(skb
, sizeof(*ev
));
10382 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
10383 ev
->addr
.type
= link_to_bdaddr(LE_LINK
, addr_type
);
10385 ev
->flags
= cpu_to_le32(flags
);
10386 ev
->instant
= cpu_to_le64(instant
);
10389 /* Copy EIR or advertising data into event */
10390 skb_put_data(skb
, eir
, eir_len
);
10392 if (scan_rsp_len
> 0)
10393 /* Append scan response data to event */
10394 skb_put_data(skb
, scan_rsp
, scan_rsp_len
);
10396 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
10398 mgmt_event_skb(skb
, NULL
);
10401 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
10402 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
10403 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
,
10406 struct sk_buff
*skb
;
10407 struct mgmt_ev_device_found
*ev
;
10408 bool report_device
= hci_discovery_active(hdev
);
10410 if (hci_dev_test_flag(hdev
, HCI_MESH
) && link_type
== LE_LINK
)
10411 mesh_device_found(hdev
, bdaddr
, addr_type
, rssi
, flags
,
10412 eir
, eir_len
, scan_rsp
, scan_rsp_len
,
10415 /* Don't send events for a non-kernel initiated discovery. With
10416 * LE one exception is if we have pend_le_reports > 0 in which
10417 * case we're doing passive scanning and want these events.
10419 if (!hci_discovery_active(hdev
)) {
10420 if (link_type
== ACL_LINK
)
10422 if (link_type
== LE_LINK
&& !list_empty(&hdev
->pend_le_reports
))
10423 report_device
= true;
10424 else if (!hci_is_adv_monitoring(hdev
))
10428 if (hdev
->discovery
.result_filtering
) {
10429 /* We are using service discovery */
10430 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
10435 if (hdev
->discovery
.limited
) {
10436 /* Check for limited discoverable bit */
10438 if (!(dev_class
[1] & 0x20))
10441 u8
*flags
= eir_get_data(eir
, eir_len
, EIR_FLAGS
, NULL
);
10442 if (!flags
|| !(flags
[0] & LE_AD_LIMITED
))
10447 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10448 skb
= mgmt_alloc_skb(hdev
, MGMT_EV_DEVICE_FOUND
,
10449 sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5);
10453 ev
= skb_put(skb
, sizeof(*ev
));
10455 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10456 * RSSI value was reported as 0 when not available. This behavior
10457 * is kept when using device discovery. This is required for full
10458 * backwards compatibility with the API.
10460 * However when using service discovery, the value 127 will be
10461 * returned when the RSSI is not available.
10463 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
10464 link_type
== ACL_LINK
)
10467 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
10468 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
10470 ev
->flags
= cpu_to_le32(flags
);
10473 /* Copy EIR or advertising data into event */
10474 skb_put_data(skb
, eir
, eir_len
);
10476 if (dev_class
&& !eir_get_data(eir
, eir_len
, EIR_CLASS_OF_DEV
, NULL
)) {
10479 eir_len
+= eir_append_data(eir_cod
, 0, EIR_CLASS_OF_DEV
,
10481 skb_put_data(skb
, eir_cod
, sizeof(eir_cod
));
10484 if (scan_rsp_len
> 0)
10485 /* Append scan response data to event */
10486 skb_put_data(skb
, scan_rsp
, scan_rsp_len
);
10488 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
10490 mgmt_adv_monitor_device_found(hdev
, bdaddr
, report_device
, skb
, NULL
);
10493 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
10494 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
10496 struct sk_buff
*skb
;
10497 struct mgmt_ev_device_found
*ev
;
10501 skb
= mgmt_alloc_skb(hdev
, MGMT_EV_DEVICE_FOUND
,
10502 sizeof(*ev
) + (name
? eir_precalc_len(name_len
) : 0));
10504 ev
= skb_put(skb
, sizeof(*ev
));
10505 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
10506 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
10510 eir_len
+= eir_skb_put_data(skb
, EIR_NAME_COMPLETE
, name
, name_len
);
10512 flags
= MGMT_DEV_FOUND_NAME_REQUEST_FAILED
;
10514 ev
->eir_len
= cpu_to_le16(eir_len
);
10515 ev
->flags
= cpu_to_le32(flags
);
10517 mgmt_event_skb(skb
, NULL
);
10520 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
10522 struct mgmt_ev_discovering ev
;
10524 bt_dev_dbg(hdev
, "discovering %u", discovering
);
10526 memset(&ev
, 0, sizeof(ev
));
10527 ev
.type
= hdev
->discovery
.type
;
10528 ev
.discovering
= discovering
;
10530 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
10533 void mgmt_suspending(struct hci_dev
*hdev
, u8 state
)
10535 struct mgmt_ev_controller_suspend ev
;
10537 ev
.suspend_state
= state
;
10538 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND
, hdev
, &ev
, sizeof(ev
), NULL
);
10541 void mgmt_resuming(struct hci_dev
*hdev
, u8 reason
, bdaddr_t
*bdaddr
,
10544 struct mgmt_ev_controller_resume ev
;
10546 ev
.wake_reason
= reason
;
10548 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
10549 ev
.addr
.type
= addr_type
;
10551 memset(&ev
.addr
, 0, sizeof(ev
.addr
));
10554 mgmt_event(MGMT_EV_CONTROLLER_RESUME
, hdev
, &ev
, sizeof(ev
), NULL
);
10557 static struct hci_mgmt_chan chan
= {
10558 .channel
= HCI_CHANNEL_CONTROL
,
10559 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
10560 .handlers
= mgmt_handlers
,
10561 .hdev_init
= mgmt_init_hdev
,
10564 int mgmt_init(void)
10566 return hci_mgmt_chan_register(&chan
);
10569 void mgmt_exit(void)
10571 hci_mgmt_chan_unregister(&chan
);
10574 void mgmt_cleanup(struct sock
*sk
)
10576 struct mgmt_mesh_tx
*mesh_tx
;
10577 struct hci_dev
*hdev
;
10579 read_lock(&hci_dev_list_lock
);
10581 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
10583 mesh_tx
= mgmt_mesh_next(hdev
, sk
);
10586 mesh_send_complete(hdev
, mesh_tx
, true);
10590 read_unlock(&hci_dev_list_lock
);