Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / net / bluetooth / mgmt.c
blobfa0f7a4a1d2fc8a5422a1407bc63407f6ccdb338
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 19
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_CONTROLLER_CAP,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 MGMT_OP_ADD_EXT_ADV_PARAMS,
126 MGMT_OP_ADD_EXT_ADV_DATA,
129 static const u16 mgmt_events[] = {
130 MGMT_EV_CONTROLLER_ERROR,
131 MGMT_EV_INDEX_ADDED,
132 MGMT_EV_INDEX_REMOVED,
133 MGMT_EV_NEW_SETTINGS,
134 MGMT_EV_CLASS_OF_DEV_CHANGED,
135 MGMT_EV_LOCAL_NAME_CHANGED,
136 MGMT_EV_NEW_LINK_KEY,
137 MGMT_EV_NEW_LONG_TERM_KEY,
138 MGMT_EV_DEVICE_CONNECTED,
139 MGMT_EV_DEVICE_DISCONNECTED,
140 MGMT_EV_CONNECT_FAILED,
141 MGMT_EV_PIN_CODE_REQUEST,
142 MGMT_EV_USER_CONFIRM_REQUEST,
143 MGMT_EV_USER_PASSKEY_REQUEST,
144 MGMT_EV_AUTH_FAILED,
145 MGMT_EV_DEVICE_FOUND,
146 MGMT_EV_DISCOVERING,
147 MGMT_EV_DEVICE_BLOCKED,
148 MGMT_EV_DEVICE_UNBLOCKED,
149 MGMT_EV_DEVICE_UNPAIRED,
150 MGMT_EV_PASSKEY_NOTIFY,
151 MGMT_EV_NEW_IRK,
152 MGMT_EV_NEW_CSRK,
153 MGMT_EV_DEVICE_ADDED,
154 MGMT_EV_DEVICE_REMOVED,
155 MGMT_EV_NEW_CONN_PARAM,
156 MGMT_EV_UNCONF_INDEX_ADDED,
157 MGMT_EV_UNCONF_INDEX_REMOVED,
158 MGMT_EV_NEW_CONFIG_OPTIONS,
159 MGMT_EV_EXT_INDEX_ADDED,
160 MGMT_EV_EXT_INDEX_REMOVED,
161 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
162 MGMT_EV_ADVERTISING_ADDED,
163 MGMT_EV_ADVERTISING_REMOVED,
164 MGMT_EV_EXT_INFO_CHANGED,
165 MGMT_EV_PHY_CONFIGURATION_CHANGED,
166 MGMT_EV_EXP_FEATURE_CHANGED,
167 MGMT_EV_DEVICE_FLAGS_CHANGED,
168 MGMT_EV_CONTROLLER_SUSPEND,
169 MGMT_EV_CONTROLLER_RESUME,
172 static const u16 mgmt_untrusted_commands[] = {
173 MGMT_OP_READ_INDEX_LIST,
174 MGMT_OP_READ_INFO,
175 MGMT_OP_READ_UNCONF_INDEX_LIST,
176 MGMT_OP_READ_CONFIG_INFO,
177 MGMT_OP_READ_EXT_INDEX_LIST,
178 MGMT_OP_READ_EXT_INFO,
179 MGMT_OP_READ_CONTROLLER_CAP,
180 MGMT_OP_READ_EXP_FEATURES_INFO,
181 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
182 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
185 static const u16 mgmt_untrusted_events[] = {
186 MGMT_EV_INDEX_ADDED,
187 MGMT_EV_INDEX_REMOVED,
188 MGMT_EV_NEW_SETTINGS,
189 MGMT_EV_CLASS_OF_DEV_CHANGED,
190 MGMT_EV_LOCAL_NAME_CHANGED,
191 MGMT_EV_UNCONF_INDEX_ADDED,
192 MGMT_EV_UNCONF_INDEX_REMOVED,
193 MGMT_EV_NEW_CONFIG_OPTIONS,
194 MGMT_EV_EXT_INDEX_ADDED,
195 MGMT_EV_EXT_INDEX_REMOVED,
196 MGMT_EV_EXT_INFO_CHANGED,
197 MGMT_EV_EXP_FEATURE_CHANGED,
198 MGMT_EV_ADV_MONITOR_ADDED,
199 MGMT_EV_ADV_MONITOR_REMOVED,
202 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
204 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
205 "\x00\x00\x00\x00\x00\x00\x00\x00"
207 /* HCI to MGMT error code conversion table */
208 static const u8 mgmt_status_table[] = {
209 MGMT_STATUS_SUCCESS,
210 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
211 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
212 MGMT_STATUS_FAILED, /* Hardware Failure */
213 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
214 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
215 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
216 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
217 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
218 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
219 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
220 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
221 MGMT_STATUS_BUSY, /* Command Disallowed */
222 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
223 MGMT_STATUS_REJECTED, /* Rejected Security */
224 MGMT_STATUS_REJECTED, /* Rejected Personal */
225 MGMT_STATUS_TIMEOUT, /* Host Timeout */
226 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
227 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
228 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
229 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
230 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
231 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
232 MGMT_STATUS_BUSY, /* Repeated Attempts */
233 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
234 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
235 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
236 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
237 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
238 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
239 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
240 MGMT_STATUS_FAILED, /* Unspecified Error */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
242 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
243 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
244 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
245 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
246 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
247 MGMT_STATUS_FAILED, /* Unit Link Key Used */
248 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
249 MGMT_STATUS_TIMEOUT, /* Instant Passed */
250 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
251 MGMT_STATUS_FAILED, /* Transaction Collision */
252 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
253 MGMT_STATUS_REJECTED, /* QoS Rejected */
254 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
255 MGMT_STATUS_REJECTED, /* Insufficient Security */
256 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Slot Violation */
259 MGMT_STATUS_FAILED, /* Role Switch Failed */
260 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
261 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
262 MGMT_STATUS_BUSY, /* Host Busy Pairing */
263 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
264 MGMT_STATUS_BUSY, /* Controller Busy */
265 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
266 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
267 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
268 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
269 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
272 static u8 mgmt_status(u8 hci_status)
274 if (hci_status < ARRAY_SIZE(mgmt_status_table))
275 return mgmt_status_table[hci_status];
277 return MGMT_STATUS_FAILED;
280 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
281 u16 len, int flag)
283 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
284 flag, NULL);
287 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
288 u16 len, int flag, struct sock *skip_sk)
290 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
291 flag, skip_sk);
294 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
295 struct sock *skip_sk)
297 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
298 HCI_SOCK_TRUSTED, skip_sk);
301 static u8 le_addr_type(u8 mgmt_addr_type)
303 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
304 return ADDR_LE_DEV_PUBLIC;
305 else
306 return ADDR_LE_DEV_RANDOM;
309 void mgmt_fill_version_info(void *ver)
311 struct mgmt_rp_read_version *rp = ver;
313 rp->version = MGMT_VERSION;
314 rp->revision = cpu_to_le16(MGMT_REVISION);
317 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
318 u16 data_len)
320 struct mgmt_rp_read_version rp;
322 bt_dev_dbg(hdev, "sock %p", sk);
324 mgmt_fill_version_info(&rp);
326 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
327 &rp, sizeof(rp));
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
331 u16 data_len)
333 struct mgmt_rp_read_commands *rp;
334 u16 num_commands, num_events;
335 size_t rp_size;
336 int i, err;
338 bt_dev_dbg(hdev, "sock %p", sk);
340 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
341 num_commands = ARRAY_SIZE(mgmt_commands);
342 num_events = ARRAY_SIZE(mgmt_events);
343 } else {
344 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
345 num_events = ARRAY_SIZE(mgmt_untrusted_events);
348 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
350 rp = kmalloc(rp_size, GFP_KERNEL);
351 if (!rp)
352 return -ENOMEM;
354 rp->num_commands = cpu_to_le16(num_commands);
355 rp->num_events = cpu_to_le16(num_events);
357 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
358 __le16 *opcode = rp->opcodes;
360 for (i = 0; i < num_commands; i++, opcode++)
361 put_unaligned_le16(mgmt_commands[i], opcode);
363 for (i = 0; i < num_events; i++, opcode++)
364 put_unaligned_le16(mgmt_events[i], opcode);
365 } else {
366 __le16 *opcode = rp->opcodes;
368 for (i = 0; i < num_commands; i++, opcode++)
369 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
371 for (i = 0; i < num_events; i++, opcode++)
372 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
375 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
376 rp, rp_size);
377 kfree(rp);
379 return err;
382 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
383 u16 data_len)
385 struct mgmt_rp_read_index_list *rp;
386 struct hci_dev *d;
387 size_t rp_len;
388 u16 count;
389 int err;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 read_lock(&hci_dev_list_lock);
395 count = 0;
396 list_for_each_entry(d, &hci_dev_list, list) {
397 if (d->dev_type == HCI_PRIMARY &&
398 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
399 count++;
402 rp_len = sizeof(*rp) + (2 * count);
403 rp = kmalloc(rp_len, GFP_ATOMIC);
404 if (!rp) {
405 read_unlock(&hci_dev_list_lock);
406 return -ENOMEM;
409 count = 0;
410 list_for_each_entry(d, &hci_dev_list, list) {
411 if (hci_dev_test_flag(d, HCI_SETUP) ||
412 hci_dev_test_flag(d, HCI_CONFIG) ||
413 hci_dev_test_flag(d, HCI_USER_CHANNEL))
414 continue;
416 /* Devices marked as raw-only are neither configured
417 * nor unconfigured controllers.
419 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
420 continue;
422 if (d->dev_type == HCI_PRIMARY &&
423 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
424 rp->index[count++] = cpu_to_le16(d->id);
425 bt_dev_dbg(hdev, "Added hci%u", d->id);
429 rp->num_controllers = cpu_to_le16(count);
430 rp_len = sizeof(*rp) + (2 * count);
432 read_unlock(&hci_dev_list_lock);
434 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
435 0, rp, rp_len);
437 kfree(rp);
439 return err;
442 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
443 void *data, u16 data_len)
445 struct mgmt_rp_read_unconf_index_list *rp;
446 struct hci_dev *d;
447 size_t rp_len;
448 u16 count;
449 int err;
451 bt_dev_dbg(hdev, "sock %p", sk);
453 read_lock(&hci_dev_list_lock);
455 count = 0;
456 list_for_each_entry(d, &hci_dev_list, list) {
457 if (d->dev_type == HCI_PRIMARY &&
458 hci_dev_test_flag(d, HCI_UNCONFIGURED))
459 count++;
462 rp_len = sizeof(*rp) + (2 * count);
463 rp = kmalloc(rp_len, GFP_ATOMIC);
464 if (!rp) {
465 read_unlock(&hci_dev_list_lock);
466 return -ENOMEM;
469 count = 0;
470 list_for_each_entry(d, &hci_dev_list, list) {
471 if (hci_dev_test_flag(d, HCI_SETUP) ||
472 hci_dev_test_flag(d, HCI_CONFIG) ||
473 hci_dev_test_flag(d, HCI_USER_CHANNEL))
474 continue;
476 /* Devices marked as raw-only are neither configured
477 * nor unconfigured controllers.
479 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
480 continue;
482 if (d->dev_type == HCI_PRIMARY &&
483 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
484 rp->index[count++] = cpu_to_le16(d->id);
485 bt_dev_dbg(hdev, "Added hci%u", d->id);
489 rp->num_controllers = cpu_to_le16(count);
490 rp_len = sizeof(*rp) + (2 * count);
492 read_unlock(&hci_dev_list_lock);
494 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
495 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
497 kfree(rp);
499 return err;
502 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
503 void *data, u16 data_len)
505 struct mgmt_rp_read_ext_index_list *rp;
506 struct hci_dev *d;
507 u16 count;
508 int err;
510 bt_dev_dbg(hdev, "sock %p", sk);
512 read_lock(&hci_dev_list_lock);
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
517 count++;
520 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
521 if (!rp) {
522 read_unlock(&hci_dev_list_lock);
523 return -ENOMEM;
526 count = 0;
527 list_for_each_entry(d, &hci_dev_list, list) {
528 if (hci_dev_test_flag(d, HCI_SETUP) ||
529 hci_dev_test_flag(d, HCI_CONFIG) ||
530 hci_dev_test_flag(d, HCI_USER_CHANNEL))
531 continue;
533 /* Devices marked as raw-only are neither configured
534 * nor unconfigured controllers.
536 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
537 continue;
539 if (d->dev_type == HCI_PRIMARY) {
540 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
541 rp->entry[count].type = 0x01;
542 else
543 rp->entry[count].type = 0x00;
544 } else if (d->dev_type == HCI_AMP) {
545 rp->entry[count].type = 0x02;
546 } else {
547 continue;
550 rp->entry[count].bus = d->bus;
551 rp->entry[count++].index = cpu_to_le16(d->id);
552 bt_dev_dbg(hdev, "Added hci%u", d->id);
555 rp->num_controllers = cpu_to_le16(count);
557 read_unlock(&hci_dev_list_lock);
559 /* If this command is called at least once, then all the
560 * default index and unconfigured index events are disabled
561 * and from now on only extended index events are used.
563 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
564 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
567 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
568 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
569 struct_size(rp, entry, count));
571 kfree(rp);
573 return err;
576 static bool is_configured(struct hci_dev *hdev)
578 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
579 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
580 return false;
582 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
583 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
584 !bacmp(&hdev->public_addr, BDADDR_ANY))
585 return false;
587 return true;
590 static __le32 get_missing_options(struct hci_dev *hdev)
592 u32 options = 0;
594 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
595 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
596 options |= MGMT_OPTION_EXTERNAL_CONFIG;
598 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
599 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
600 !bacmp(&hdev->public_addr, BDADDR_ANY))
601 options |= MGMT_OPTION_PUBLIC_ADDRESS;
603 return cpu_to_le32(options);
606 static int new_options(struct hci_dev *hdev, struct sock *skip)
608 __le32 options = get_missing_options(hdev);
610 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
611 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
614 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
616 __le32 options = get_missing_options(hdev);
618 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
619 sizeof(options));
622 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
623 void *data, u16 data_len)
625 struct mgmt_rp_read_config_info rp;
626 u32 options = 0;
628 bt_dev_dbg(hdev, "sock %p", sk);
630 hci_dev_lock(hdev);
632 memset(&rp, 0, sizeof(rp));
633 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
635 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
636 options |= MGMT_OPTION_EXTERNAL_CONFIG;
638 if (hdev->set_bdaddr)
639 options |= MGMT_OPTION_PUBLIC_ADDRESS;
641 rp.supported_options = cpu_to_le32(options);
642 rp.missing_options = get_missing_options(hdev);
644 hci_dev_unlock(hdev);
646 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
647 &rp, sizeof(rp));
650 static u32 get_supported_phys(struct hci_dev *hdev)
652 u32 supported_phys = 0;
654 if (lmp_bredr_capable(hdev)) {
655 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
657 if (hdev->features[0][0] & LMP_3SLOT)
658 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
660 if (hdev->features[0][0] & LMP_5SLOT)
661 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
663 if (lmp_edr_2m_capable(hdev)) {
664 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
666 if (lmp_edr_3slot_capable(hdev))
667 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
669 if (lmp_edr_5slot_capable(hdev))
670 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
672 if (lmp_edr_3m_capable(hdev)) {
673 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
675 if (lmp_edr_3slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
678 if (lmp_edr_5slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
684 if (lmp_le_capable(hdev)) {
685 supported_phys |= MGMT_PHY_LE_1M_TX;
686 supported_phys |= MGMT_PHY_LE_1M_RX;
688 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
689 supported_phys |= MGMT_PHY_LE_2M_TX;
690 supported_phys |= MGMT_PHY_LE_2M_RX;
693 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
694 supported_phys |= MGMT_PHY_LE_CODED_TX;
695 supported_phys |= MGMT_PHY_LE_CODED_RX;
699 return supported_phys;
702 static u32 get_selected_phys(struct hci_dev *hdev)
704 u32 selected_phys = 0;
706 if (lmp_bredr_capable(hdev)) {
707 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
709 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
710 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
712 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
713 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
715 if (lmp_edr_2m_capable(hdev)) {
716 if (!(hdev->pkt_type & HCI_2DH1))
717 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev) &&
720 !(hdev->pkt_type & HCI_2DH3))
721 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
723 if (lmp_edr_5slot_capable(hdev) &&
724 !(hdev->pkt_type & HCI_2DH5))
725 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
727 if (lmp_edr_3m_capable(hdev)) {
728 if (!(hdev->pkt_type & HCI_3DH1))
729 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
731 if (lmp_edr_3slot_capable(hdev) &&
732 !(hdev->pkt_type & HCI_3DH3))
733 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
735 if (lmp_edr_5slot_capable(hdev) &&
736 !(hdev->pkt_type & HCI_3DH5))
737 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
742 if (lmp_le_capable(hdev)) {
743 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
744 selected_phys |= MGMT_PHY_LE_1M_TX;
746 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
747 selected_phys |= MGMT_PHY_LE_1M_RX;
749 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
750 selected_phys |= MGMT_PHY_LE_2M_TX;
752 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
753 selected_phys |= MGMT_PHY_LE_2M_RX;
755 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
756 selected_phys |= MGMT_PHY_LE_CODED_TX;
758 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
759 selected_phys |= MGMT_PHY_LE_CODED_RX;
762 return selected_phys;
765 static u32 get_configurable_phys(struct hci_dev *hdev)
767 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
768 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
771 static u32 get_supported_settings(struct hci_dev *hdev)
773 u32 settings = 0;
775 settings |= MGMT_SETTING_POWERED;
776 settings |= MGMT_SETTING_BONDABLE;
777 settings |= MGMT_SETTING_DEBUG_KEYS;
778 settings |= MGMT_SETTING_CONNECTABLE;
779 settings |= MGMT_SETTING_DISCOVERABLE;
781 if (lmp_bredr_capable(hdev)) {
782 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
783 settings |= MGMT_SETTING_FAST_CONNECTABLE;
784 settings |= MGMT_SETTING_BREDR;
785 settings |= MGMT_SETTING_LINK_SECURITY;
787 if (lmp_ssp_capable(hdev)) {
788 settings |= MGMT_SETTING_SSP;
789 if (IS_ENABLED(CONFIG_BT_HS))
790 settings |= MGMT_SETTING_HS;
793 if (lmp_sc_capable(hdev))
794 settings |= MGMT_SETTING_SECURE_CONN;
796 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
797 &hdev->quirks))
798 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
801 if (lmp_le_capable(hdev)) {
802 settings |= MGMT_SETTING_LE;
803 settings |= MGMT_SETTING_SECURE_CONN;
804 settings |= MGMT_SETTING_PRIVACY;
805 settings |= MGMT_SETTING_STATIC_ADDRESS;
807 /* When the experimental feature for LL Privacy support is
808 * enabled, then advertising is no longer supported.
810 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
811 settings |= MGMT_SETTING_ADVERTISING;
814 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
815 hdev->set_bdaddr)
816 settings |= MGMT_SETTING_CONFIGURATION;
818 settings |= MGMT_SETTING_PHY_CONFIGURATION;
820 return settings;
823 static u32 get_current_settings(struct hci_dev *hdev)
825 u32 settings = 0;
827 if (hdev_is_powered(hdev))
828 settings |= MGMT_SETTING_POWERED;
830 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
831 settings |= MGMT_SETTING_CONNECTABLE;
833 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
834 settings |= MGMT_SETTING_FAST_CONNECTABLE;
836 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
837 settings |= MGMT_SETTING_DISCOVERABLE;
839 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
840 settings |= MGMT_SETTING_BONDABLE;
842 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
843 settings |= MGMT_SETTING_BREDR;
845 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
846 settings |= MGMT_SETTING_LE;
848 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
849 settings |= MGMT_SETTING_LINK_SECURITY;
851 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
852 settings |= MGMT_SETTING_SSP;
854 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
855 settings |= MGMT_SETTING_HS;
857 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
858 settings |= MGMT_SETTING_ADVERTISING;
860 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
861 settings |= MGMT_SETTING_SECURE_CONN;
863 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
864 settings |= MGMT_SETTING_DEBUG_KEYS;
866 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
867 settings |= MGMT_SETTING_PRIVACY;
869 /* The current setting for static address has two purposes. The
870 * first is to indicate if the static address will be used and
871 * the second is to indicate if it is actually set.
873 * This means if the static address is not configured, this flag
874 * will never be set. If the address is configured, then if the
875 * address is actually used decides if the flag is set or not.
877 * For single mode LE only controllers and dual-mode controllers
878 * with BR/EDR disabled, the existence of the static address will
879 * be evaluated.
881 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
882 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
883 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
884 if (bacmp(&hdev->static_addr, BDADDR_ANY))
885 settings |= MGMT_SETTING_STATIC_ADDRESS;
888 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
889 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
891 return settings;
894 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
896 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
899 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
900 struct hci_dev *hdev,
901 const void *data)
903 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
906 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
908 struct mgmt_pending_cmd *cmd;
910 /* If there's a pending mgmt command the flags will not yet have
911 * their final values, so check for this first.
913 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
914 if (cmd) {
915 struct mgmt_mode *cp = cmd->param;
916 if (cp->val == 0x01)
917 return LE_AD_GENERAL;
918 else if (cp->val == 0x02)
919 return LE_AD_LIMITED;
920 } else {
921 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
922 return LE_AD_LIMITED;
923 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
924 return LE_AD_GENERAL;
927 return 0;
930 bool mgmt_get_connectable(struct hci_dev *hdev)
932 struct mgmt_pending_cmd *cmd;
934 /* If there's a pending mgmt command the flag will not yet have
935 * it's final value, so check for this first.
937 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
938 if (cmd) {
939 struct mgmt_mode *cp = cmd->param;
941 return cp->val;
944 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
947 static void service_cache_off(struct work_struct *work)
949 struct hci_dev *hdev = container_of(work, struct hci_dev,
950 service_cache.work);
951 struct hci_request req;
953 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
954 return;
956 hci_req_init(&req, hdev);
958 hci_dev_lock(hdev);
960 __hci_req_update_eir(&req);
961 __hci_req_update_class(&req);
963 hci_dev_unlock(hdev);
965 hci_req_run(&req, NULL);
968 static void rpa_expired(struct work_struct *work)
970 struct hci_dev *hdev = container_of(work, struct hci_dev,
971 rpa_expired.work);
972 struct hci_request req;
974 bt_dev_dbg(hdev, "");
976 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
978 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
979 return;
981 /* The generation of a new RPA and programming it into the
982 * controller happens in the hci_req_enable_advertising()
983 * function.
985 hci_req_init(&req, hdev);
986 if (ext_adv_capable(hdev))
987 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
988 else
989 __hci_req_enable_advertising(&req);
990 hci_req_run(&req, NULL);
993 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
995 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
996 return;
998 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
999 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1001 /* Non-mgmt controlled devices get this bit set
1002 * implicitly so that pairing works for them, however
1003 * for mgmt we require user-space to explicitly enable
1004 * it
1006 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1009 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1010 void *data, u16 data_len)
1012 struct mgmt_rp_read_info rp;
1014 bt_dev_dbg(hdev, "sock %p", sk);
1016 hci_dev_lock(hdev);
1018 memset(&rp, 0, sizeof(rp));
1020 bacpy(&rp.bdaddr, &hdev->bdaddr);
1022 rp.version = hdev->hci_ver;
1023 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1025 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1026 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1028 memcpy(rp.dev_class, hdev->dev_class, 3);
1030 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1031 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1033 hci_dev_unlock(hdev);
1035 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1036 sizeof(rp));
1039 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1041 u16 eir_len = 0;
1042 size_t name_len;
1044 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1045 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1046 hdev->dev_class, 3);
1048 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1049 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1050 hdev->appearance);
1052 name_len = strlen(hdev->dev_name);
1053 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1054 hdev->dev_name, name_len);
1056 name_len = strlen(hdev->short_name);
1057 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1058 hdev->short_name, name_len);
1060 return eir_len;
1063 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1064 void *data, u16 data_len)
1066 char buf[512];
1067 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1068 u16 eir_len;
1070 bt_dev_dbg(hdev, "sock %p", sk);
1072 memset(&buf, 0, sizeof(buf));
1074 hci_dev_lock(hdev);
1076 bacpy(&rp->bdaddr, &hdev->bdaddr);
1078 rp->version = hdev->hci_ver;
1079 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1081 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1082 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1085 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1086 rp->eir_len = cpu_to_le16(eir_len);
1088 hci_dev_unlock(hdev);
1090 /* If this command is called at least once, then the events
1091 * for class of device and local name changes are disabled
1092 * and only the new extended controller information event
1093 * is used.
1095 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1096 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1099 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1100 sizeof(*rp) + eir_len);
1103 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1105 char buf[512];
1106 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1107 u16 eir_len;
1109 memset(buf, 0, sizeof(buf));
1111 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1112 ev->eir_len = cpu_to_le16(eir_len);
1114 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1115 sizeof(*ev) + eir_len,
1116 HCI_MGMT_EXT_INFO_EVENTS, skip);
1119 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1121 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1123 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1124 sizeof(settings));
1127 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1129 bt_dev_dbg(hdev, "status 0x%02x", status);
1131 if (hci_conn_count(hdev) == 0) {
1132 cancel_delayed_work(&hdev->power_off);
1133 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1137 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1139 struct mgmt_ev_advertising_added ev;
1141 ev.instance = instance;
1143 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1146 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1147 u8 instance)
1149 struct mgmt_ev_advertising_removed ev;
1151 ev.instance = instance;
1153 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1156 static void cancel_adv_timeout(struct hci_dev *hdev)
1158 if (hdev->adv_instance_timeout) {
1159 hdev->adv_instance_timeout = 0;
1160 cancel_delayed_work(&hdev->adv_instance_expire);
1164 static int clean_up_hci_state(struct hci_dev *hdev)
1166 struct hci_request req;
1167 struct hci_conn *conn;
1168 bool discov_stopped;
1169 int err;
1171 hci_req_init(&req, hdev);
1173 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1174 test_bit(HCI_PSCAN, &hdev->flags)) {
1175 u8 scan = 0x00;
1176 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1179 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1181 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1182 __hci_req_disable_advertising(&req);
1184 discov_stopped = hci_req_stop_discovery(&req);
1186 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1187 /* 0x15 == Terminated due to Power Off */
1188 __hci_abort_conn(&req, conn, 0x15);
1191 err = hci_req_run(&req, clean_up_hci_complete);
1192 if (!err && discov_stopped)
1193 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1195 return err;
1198 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1199 u16 len)
1201 struct mgmt_mode *cp = data;
1202 struct mgmt_pending_cmd *cmd;
1203 int err;
1205 bt_dev_dbg(hdev, "sock %p", sk);
1207 if (cp->val != 0x00 && cp->val != 0x01)
1208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1209 MGMT_STATUS_INVALID_PARAMS);
1211 hci_dev_lock(hdev);
1213 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1214 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1215 MGMT_STATUS_BUSY);
1216 goto failed;
1219 if (!!cp->val == hdev_is_powered(hdev)) {
1220 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1221 goto failed;
1224 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1225 if (!cmd) {
1226 err = -ENOMEM;
1227 goto failed;
1230 if (cp->val) {
1231 queue_work(hdev->req_workqueue, &hdev->power_on);
1232 err = 0;
1233 } else {
1234 /* Disconnect connections, stop scans, etc */
1235 err = clean_up_hci_state(hdev);
1236 if (!err)
1237 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1238 HCI_POWER_OFF_TIMEOUT);
1240 /* ENODATA means there were no HCI commands queued */
1241 if (err == -ENODATA) {
1242 cancel_delayed_work(&hdev->power_off);
1243 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1244 err = 0;
1248 failed:
1249 hci_dev_unlock(hdev);
1250 return err;
1253 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1255 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1257 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1258 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1261 int mgmt_new_settings(struct hci_dev *hdev)
1263 return new_settings(hdev, NULL);
1266 struct cmd_lookup {
1267 struct sock *sk;
1268 struct hci_dev *hdev;
1269 u8 mgmt_status;
1272 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1274 struct cmd_lookup *match = data;
1276 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1278 list_del(&cmd->list);
1280 if (match->sk == NULL) {
1281 match->sk = cmd->sk;
1282 sock_hold(match->sk);
1285 mgmt_pending_free(cmd);
1288 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1290 u8 *status = data;
1292 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1293 mgmt_pending_remove(cmd);
1296 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1298 if (cmd->cmd_complete) {
1299 u8 *status = data;
1301 cmd->cmd_complete(cmd, *status);
1302 mgmt_pending_remove(cmd);
1304 return;
1307 cmd_status_rsp(cmd, data);
1310 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1313 cmd->param, cmd->param_len);
1316 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1319 cmd->param, sizeof(struct mgmt_addr_info));
1322 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1324 if (!lmp_bredr_capable(hdev))
1325 return MGMT_STATUS_NOT_SUPPORTED;
1326 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1327 return MGMT_STATUS_REJECTED;
1328 else
1329 return MGMT_STATUS_SUCCESS;
1332 static u8 mgmt_le_support(struct hci_dev *hdev)
1334 if (!lmp_le_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1338 else
1339 return MGMT_STATUS_SUCCESS;
1342 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1344 struct mgmt_pending_cmd *cmd;
1346 bt_dev_dbg(hdev, "status 0x%02x", status);
1348 hci_dev_lock(hdev);
1350 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1351 if (!cmd)
1352 goto unlock;
1354 if (status) {
1355 u8 mgmt_err = mgmt_status(status);
1356 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1357 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1358 goto remove_cmd;
1361 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1362 hdev->discov_timeout > 0) {
1363 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1364 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1367 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1368 new_settings(hdev, cmd->sk);
1370 remove_cmd:
1371 mgmt_pending_remove(cmd);
1373 unlock:
1374 hci_dev_unlock(hdev);
1377 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1378 u16 len)
1380 struct mgmt_cp_set_discoverable *cp = data;
1381 struct mgmt_pending_cmd *cmd;
1382 u16 timeout;
1383 int err;
1385 bt_dev_dbg(hdev, "sock %p", sk);
1387 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1388 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1389 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1390 MGMT_STATUS_REJECTED);
1392 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 MGMT_STATUS_INVALID_PARAMS);
1396 timeout = __le16_to_cpu(cp->timeout);
1398 /* Disabling discoverable requires that no timeout is set,
1399 * and enabling limited discoverable requires a timeout.
1401 if ((cp->val == 0x00 && timeout > 0) ||
1402 (cp->val == 0x02 && timeout == 0))
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1406 hci_dev_lock(hdev);
1408 if (!hdev_is_powered(hdev) && timeout > 0) {
1409 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1410 MGMT_STATUS_NOT_POWERED);
1411 goto failed;
1414 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1415 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1417 MGMT_STATUS_BUSY);
1418 goto failed;
1421 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1423 MGMT_STATUS_REJECTED);
1424 goto failed;
1427 if (hdev->advertising_paused) {
1428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1429 MGMT_STATUS_BUSY);
1430 goto failed;
1433 if (!hdev_is_powered(hdev)) {
1434 bool changed = false;
1436 /* Setting limited discoverable when powered off is
1437 * not a valid operation since it requires a timeout
1438 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1440 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1441 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1442 changed = true;
1445 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1446 if (err < 0)
1447 goto failed;
1449 if (changed)
1450 err = new_settings(hdev, sk);
1452 goto failed;
1455 /* If the current mode is the same, then just update the timeout
1456 * value with the new value. And if only the timeout gets updated,
1457 * then no need for any HCI transactions.
1459 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1460 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1461 HCI_LIMITED_DISCOVERABLE)) {
1462 cancel_delayed_work(&hdev->discov_off);
1463 hdev->discov_timeout = timeout;
1465 if (cp->val && hdev->discov_timeout > 0) {
1466 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1467 queue_delayed_work(hdev->req_workqueue,
1468 &hdev->discov_off, to);
1471 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1472 goto failed;
1475 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1476 if (!cmd) {
1477 err = -ENOMEM;
1478 goto failed;
1481 /* Cancel any potential discoverable timeout that might be
1482 * still active and store new timeout value. The arming of
1483 * the timeout happens in the complete handler.
1485 cancel_delayed_work(&hdev->discov_off);
1486 hdev->discov_timeout = timeout;
1488 if (cp->val)
1489 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1490 else
1491 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1493 /* Limited discoverable mode */
1494 if (cp->val == 0x02)
1495 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1496 else
1497 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1500 err = 0;
1502 failed:
1503 hci_dev_unlock(hdev);
1504 return err;
1507 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1509 struct mgmt_pending_cmd *cmd;
1511 bt_dev_dbg(hdev, "status 0x%02x", status);
1513 hci_dev_lock(hdev);
1515 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1516 if (!cmd)
1517 goto unlock;
1519 if (status) {
1520 u8 mgmt_err = mgmt_status(status);
1521 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 goto remove_cmd;
1525 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1526 new_settings(hdev, cmd->sk);
1528 remove_cmd:
1529 mgmt_pending_remove(cmd);
1531 unlock:
1532 hci_dev_unlock(hdev);
1535 static int set_connectable_update_settings(struct hci_dev *hdev,
1536 struct sock *sk, u8 val)
1538 bool changed = false;
1539 int err;
1541 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1542 changed = true;
1544 if (val) {
1545 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1546 } else {
1547 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1548 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1551 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1552 if (err < 0)
1553 return err;
1555 if (changed) {
1556 hci_req_update_scan(hdev);
1557 hci_update_background_scan(hdev);
1558 return new_settings(hdev, sk);
1561 return 0;
1564 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1565 u16 len)
1567 struct mgmt_mode *cp = data;
1568 struct mgmt_pending_cmd *cmd;
1569 int err;
1571 bt_dev_dbg(hdev, "sock %p", sk);
1573 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1574 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1576 MGMT_STATUS_REJECTED);
1578 if (cp->val != 0x00 && cp->val != 0x01)
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1582 hci_dev_lock(hdev);
1584 if (!hdev_is_powered(hdev)) {
1585 err = set_connectable_update_settings(hdev, sk, cp->val);
1586 goto failed;
1589 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1590 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1592 MGMT_STATUS_BUSY);
1593 goto failed;
1596 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1597 if (!cmd) {
1598 err = -ENOMEM;
1599 goto failed;
1602 if (cp->val) {
1603 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 } else {
1605 if (hdev->discov_timeout > 0)
1606 cancel_delayed_work(&hdev->discov_off);
1608 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1609 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1613 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1614 err = 0;
1616 failed:
1617 hci_dev_unlock(hdev);
1618 return err;
1621 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1622 u16 len)
1624 struct mgmt_mode *cp = data;
1625 bool changed;
1626 int err;
1628 bt_dev_dbg(hdev, "sock %p", sk);
1630 if (cp->val != 0x00 && cp->val != 0x01)
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1632 MGMT_STATUS_INVALID_PARAMS);
1634 hci_dev_lock(hdev);
1636 if (cp->val)
1637 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1638 else
1639 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1641 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1642 if (err < 0)
1643 goto unlock;
1645 if (changed) {
1646 /* In limited privacy mode the change of bondable mode
1647 * may affect the local advertising address.
1649 if (hdev_is_powered(hdev) &&
1650 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1651 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1652 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1653 queue_work(hdev->req_workqueue,
1654 &hdev->discoverable_update);
1656 err = new_settings(hdev, sk);
1659 unlock:
1660 hci_dev_unlock(hdev);
1661 return err;
1664 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1665 u16 len)
1667 struct mgmt_mode *cp = data;
1668 struct mgmt_pending_cmd *cmd;
1669 u8 val, status;
1670 int err;
1672 bt_dev_dbg(hdev, "sock %p", sk);
1674 status = mgmt_bredr_support(hdev);
1675 if (status)
1676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1677 status);
1679 if (cp->val != 0x00 && cp->val != 0x01)
1680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1681 MGMT_STATUS_INVALID_PARAMS);
1683 hci_dev_lock(hdev);
1685 if (!hdev_is_powered(hdev)) {
1686 bool changed = false;
1688 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1689 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1690 changed = true;
1693 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1694 if (err < 0)
1695 goto failed;
1697 if (changed)
1698 err = new_settings(hdev, sk);
1700 goto failed;
1703 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1705 MGMT_STATUS_BUSY);
1706 goto failed;
1709 val = !!cp->val;
1711 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1712 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1713 goto failed;
1716 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1717 if (!cmd) {
1718 err = -ENOMEM;
1719 goto failed;
1722 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1723 if (err < 0) {
1724 mgmt_pending_remove(cmd);
1725 goto failed;
1728 failed:
1729 hci_dev_unlock(hdev);
1730 return err;
1733 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1735 struct mgmt_mode *cp = data;
1736 struct mgmt_pending_cmd *cmd;
1737 u8 status;
1738 int err;
1740 bt_dev_dbg(hdev, "sock %p", sk);
1742 status = mgmt_bredr_support(hdev);
1743 if (status)
1744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1746 if (!lmp_ssp_capable(hdev))
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1748 MGMT_STATUS_NOT_SUPPORTED);
1750 if (cp->val != 0x00 && cp->val != 0x01)
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1752 MGMT_STATUS_INVALID_PARAMS);
1754 hci_dev_lock(hdev);
1756 if (!hdev_is_powered(hdev)) {
1757 bool changed;
1759 if (cp->val) {
1760 changed = !hci_dev_test_and_set_flag(hdev,
1761 HCI_SSP_ENABLED);
1762 } else {
1763 changed = hci_dev_test_and_clear_flag(hdev,
1764 HCI_SSP_ENABLED);
1765 if (!changed)
1766 changed = hci_dev_test_and_clear_flag(hdev,
1767 HCI_HS_ENABLED);
1768 else
1769 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1772 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1773 if (err < 0)
1774 goto failed;
1776 if (changed)
1777 err = new_settings(hdev, sk);
1779 goto failed;
1782 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1784 MGMT_STATUS_BUSY);
1785 goto failed;
1788 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1789 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1790 goto failed;
1793 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1794 if (!cmd) {
1795 err = -ENOMEM;
1796 goto failed;
1799 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1800 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1801 sizeof(cp->val), &cp->val);
1803 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1804 if (err < 0) {
1805 mgmt_pending_remove(cmd);
1806 goto failed;
1809 failed:
1810 hci_dev_unlock(hdev);
1811 return err;
1814 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1816 struct mgmt_mode *cp = data;
1817 bool changed;
1818 u8 status;
1819 int err;
1821 bt_dev_dbg(hdev, "sock %p", sk);
1823 if (!IS_ENABLED(CONFIG_BT_HS))
1824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1825 MGMT_STATUS_NOT_SUPPORTED);
1827 status = mgmt_bredr_support(hdev);
1828 if (status)
1829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1831 if (!lmp_ssp_capable(hdev))
1832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1833 MGMT_STATUS_NOT_SUPPORTED);
1835 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1837 MGMT_STATUS_REJECTED);
1839 if (cp->val != 0x00 && cp->val != 0x01)
1840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1841 MGMT_STATUS_INVALID_PARAMS);
1843 hci_dev_lock(hdev);
1845 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1846 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_BUSY);
1848 goto unlock;
1851 if (cp->val) {
1852 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1853 } else {
1854 if (hdev_is_powered(hdev)) {
1855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1856 MGMT_STATUS_REJECTED);
1857 goto unlock;
1860 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1863 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1864 if (err < 0)
1865 goto unlock;
1867 if (changed)
1868 err = new_settings(hdev, sk);
1870 unlock:
1871 hci_dev_unlock(hdev);
1872 return err;
1875 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1877 struct cmd_lookup match = { NULL, hdev };
1879 hci_dev_lock(hdev);
1881 if (status) {
1882 u8 mgmt_err = mgmt_status(status);
1884 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1885 &mgmt_err);
1886 goto unlock;
1889 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1891 new_settings(hdev, match.sk);
1893 if (match.sk)
1894 sock_put(match.sk);
1896 /* Make sure the controller has a good default for
1897 * advertising data. Restrict the update to when LE
1898 * has actually been enabled. During power on, the
1899 * update in powered_update_hci will take care of it.
1901 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1902 struct hci_request req;
1903 hci_req_init(&req, hdev);
1904 if (ext_adv_capable(hdev)) {
1905 int err;
1907 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1908 if (!err)
1909 __hci_req_update_scan_rsp_data(&req, 0x00);
1910 } else {
1911 __hci_req_update_adv_data(&req, 0x00);
1912 __hci_req_update_scan_rsp_data(&req, 0x00);
1914 hci_req_run(&req, NULL);
1915 hci_update_background_scan(hdev);
1918 unlock:
1919 hci_dev_unlock(hdev);
1922 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1924 struct mgmt_mode *cp = data;
1925 struct hci_cp_write_le_host_supported hci_cp;
1926 struct mgmt_pending_cmd *cmd;
1927 struct hci_request req;
1928 int err;
1929 u8 val, enabled;
1931 bt_dev_dbg(hdev, "sock %p", sk);
1933 if (!lmp_le_capable(hdev))
1934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1935 MGMT_STATUS_NOT_SUPPORTED);
1937 if (cp->val != 0x00 && cp->val != 0x01)
1938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 MGMT_STATUS_INVALID_PARAMS);
1941 /* Bluetooth single mode LE only controllers or dual-mode
1942 * controllers configured as LE only devices, do not allow
1943 * switching LE off. These have either LE enabled explicitly
1944 * or BR/EDR has been previously switched off.
1946 * When trying to enable an already enabled LE, then gracefully
1947 * send a positive response. Trying to disable it however will
1948 * result into rejection.
1950 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1951 if (cp->val == 0x01)
1952 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1955 MGMT_STATUS_REJECTED);
1958 hci_dev_lock(hdev);
1960 val = !!cp->val;
1961 enabled = lmp_host_le_capable(hdev);
1963 if (!val)
1964 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1966 if (!hdev_is_powered(hdev) || val == enabled) {
1967 bool changed = false;
1969 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1970 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1971 changed = true;
1974 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1975 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1976 changed = true;
1979 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1980 if (err < 0)
1981 goto unlock;
1983 if (changed)
1984 err = new_settings(hdev, sk);
1986 goto unlock;
1989 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1990 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1991 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1992 MGMT_STATUS_BUSY);
1993 goto unlock;
1996 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1997 if (!cmd) {
1998 err = -ENOMEM;
1999 goto unlock;
2002 hci_req_init(&req, hdev);
2004 memset(&hci_cp, 0, sizeof(hci_cp));
2006 if (val) {
2007 hci_cp.le = val;
2008 hci_cp.simul = 0x00;
2009 } else {
2010 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2011 __hci_req_disable_advertising(&req);
2013 if (ext_adv_capable(hdev))
2014 __hci_req_clear_ext_adv_sets(&req);
2017 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2018 &hci_cp);
2020 err = hci_req_run(&req, le_enable_complete);
2021 if (err < 0)
2022 mgmt_pending_remove(cmd);
2024 unlock:
2025 hci_dev_unlock(hdev);
2026 return err;
2029 /* This is a helper function to test for pending mgmt commands that can
2030 * cause CoD or EIR HCI commands. We can only allow one such pending
2031 * mgmt command at a time since otherwise we cannot easily track what
2032 * the current values are, will be, and based on that calculate if a new
2033 * HCI command needs to be sent and if yes with what value.
2035 static bool pending_eir_or_class(struct hci_dev *hdev)
2037 struct mgmt_pending_cmd *cmd;
2039 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2040 switch (cmd->opcode) {
2041 case MGMT_OP_ADD_UUID:
2042 case MGMT_OP_REMOVE_UUID:
2043 case MGMT_OP_SET_DEV_CLASS:
2044 case MGMT_OP_SET_POWERED:
2045 return true;
2049 return false;
2052 static const u8 bluetooth_base_uuid[] = {
2053 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2054 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2057 static u8 get_uuid_size(const u8 *uuid)
2059 u32 val;
2061 if (memcmp(uuid, bluetooth_base_uuid, 12))
2062 return 128;
2064 val = get_unaligned_le32(&uuid[12]);
2065 if (val > 0xffff)
2066 return 32;
2068 return 16;
2071 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2073 struct mgmt_pending_cmd *cmd;
2075 hci_dev_lock(hdev);
2077 cmd = pending_find(mgmt_op, hdev);
2078 if (!cmd)
2079 goto unlock;
2081 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2082 mgmt_status(status), hdev->dev_class, 3);
2084 mgmt_pending_remove(cmd);
2086 unlock:
2087 hci_dev_unlock(hdev);
2090 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2092 bt_dev_dbg(hdev, "status 0x%02x", status);
2094 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2097 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2099 struct mgmt_cp_add_uuid *cp = data;
2100 struct mgmt_pending_cmd *cmd;
2101 struct hci_request req;
2102 struct bt_uuid *uuid;
2103 int err;
2105 bt_dev_dbg(hdev, "sock %p", sk);
2107 hci_dev_lock(hdev);
2109 if (pending_eir_or_class(hdev)) {
2110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2111 MGMT_STATUS_BUSY);
2112 goto failed;
2115 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2116 if (!uuid) {
2117 err = -ENOMEM;
2118 goto failed;
2121 memcpy(uuid->uuid, cp->uuid, 16);
2122 uuid->svc_hint = cp->svc_hint;
2123 uuid->size = get_uuid_size(cp->uuid);
2125 list_add_tail(&uuid->list, &hdev->uuids);
2127 hci_req_init(&req, hdev);
2129 __hci_req_update_class(&req);
2130 __hci_req_update_eir(&req);
2132 err = hci_req_run(&req, add_uuid_complete);
2133 if (err < 0) {
2134 if (err != -ENODATA)
2135 goto failed;
2137 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2138 hdev->dev_class, 3);
2139 goto failed;
2142 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2143 if (!cmd) {
2144 err = -ENOMEM;
2145 goto failed;
2148 err = 0;
2150 failed:
2151 hci_dev_unlock(hdev);
2152 return err;
2155 static bool enable_service_cache(struct hci_dev *hdev)
2157 if (!hdev_is_powered(hdev))
2158 return false;
2160 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2161 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2162 CACHE_TIMEOUT);
2163 return true;
2166 return false;
2169 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2171 bt_dev_dbg(hdev, "status 0x%02x", status);
2173 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2176 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2177 u16 len)
2179 struct mgmt_cp_remove_uuid *cp = data;
2180 struct mgmt_pending_cmd *cmd;
2181 struct bt_uuid *match, *tmp;
2182 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2183 struct hci_request req;
2184 int err, found;
2186 bt_dev_dbg(hdev, "sock %p", sk);
2188 hci_dev_lock(hdev);
2190 if (pending_eir_or_class(hdev)) {
2191 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2192 MGMT_STATUS_BUSY);
2193 goto unlock;
2196 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2197 hci_uuids_clear(hdev);
2199 if (enable_service_cache(hdev)) {
2200 err = mgmt_cmd_complete(sk, hdev->id,
2201 MGMT_OP_REMOVE_UUID,
2202 0, hdev->dev_class, 3);
2203 goto unlock;
2206 goto update_class;
2209 found = 0;
2211 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2212 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2213 continue;
2215 list_del(&match->list);
2216 kfree(match);
2217 found++;
2220 if (found == 0) {
2221 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2222 MGMT_STATUS_INVALID_PARAMS);
2223 goto unlock;
2226 update_class:
2227 hci_req_init(&req, hdev);
2229 __hci_req_update_class(&req);
2230 __hci_req_update_eir(&req);
2232 err = hci_req_run(&req, remove_uuid_complete);
2233 if (err < 0) {
2234 if (err != -ENODATA)
2235 goto unlock;
2237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2238 hdev->dev_class, 3);
2239 goto unlock;
2242 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2243 if (!cmd) {
2244 err = -ENOMEM;
2245 goto unlock;
2248 err = 0;
2250 unlock:
2251 hci_dev_unlock(hdev);
2252 return err;
2255 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2257 bt_dev_dbg(hdev, "status 0x%02x", status);
2259 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2262 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2263 u16 len)
2265 struct mgmt_cp_set_dev_class *cp = data;
2266 struct mgmt_pending_cmd *cmd;
2267 struct hci_request req;
2268 int err;
2270 bt_dev_dbg(hdev, "sock %p", sk);
2272 if (!lmp_bredr_capable(hdev))
2273 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2274 MGMT_STATUS_NOT_SUPPORTED);
2276 hci_dev_lock(hdev);
2278 if (pending_eir_or_class(hdev)) {
2279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_BUSY);
2281 goto unlock;
2284 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2286 MGMT_STATUS_INVALID_PARAMS);
2287 goto unlock;
2290 hdev->major_class = cp->major;
2291 hdev->minor_class = cp->minor;
2293 if (!hdev_is_powered(hdev)) {
2294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2295 hdev->dev_class, 3);
2296 goto unlock;
2299 hci_req_init(&req, hdev);
2301 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2302 hci_dev_unlock(hdev);
2303 cancel_delayed_work_sync(&hdev->service_cache);
2304 hci_dev_lock(hdev);
2305 __hci_req_update_eir(&req);
2308 __hci_req_update_class(&req);
2310 err = hci_req_run(&req, set_class_complete);
2311 if (err < 0) {
2312 if (err != -ENODATA)
2313 goto unlock;
2315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2316 hdev->dev_class, 3);
2317 goto unlock;
2320 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2321 if (!cmd) {
2322 err = -ENOMEM;
2323 goto unlock;
2326 err = 0;
2328 unlock:
2329 hci_dev_unlock(hdev);
2330 return err;
2333 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2334 u16 len)
2336 struct mgmt_cp_load_link_keys *cp = data;
2337 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2338 sizeof(struct mgmt_link_key_info));
2339 u16 key_count, expected_len;
2340 bool changed;
2341 int i;
2343 bt_dev_dbg(hdev, "sock %p", sk);
2345 if (!lmp_bredr_capable(hdev))
2346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347 MGMT_STATUS_NOT_SUPPORTED);
2349 key_count = __le16_to_cpu(cp->key_count);
2350 if (key_count > max_key_count) {
2351 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2352 key_count);
2353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 MGMT_STATUS_INVALID_PARAMS);
2357 expected_len = struct_size(cp, keys, key_count);
2358 if (expected_len != len) {
2359 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2360 expected_len, len);
2361 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362 MGMT_STATUS_INVALID_PARAMS);
2365 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2367 MGMT_STATUS_INVALID_PARAMS);
2369 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2370 key_count);
2372 for (i = 0; i < key_count; i++) {
2373 struct mgmt_link_key_info *key = &cp->keys[i];
2375 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2376 return mgmt_cmd_status(sk, hdev->id,
2377 MGMT_OP_LOAD_LINK_KEYS,
2378 MGMT_STATUS_INVALID_PARAMS);
2381 hci_dev_lock(hdev);
2383 hci_link_keys_clear(hdev);
2385 if (cp->debug_keys)
2386 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2387 else
2388 changed = hci_dev_test_and_clear_flag(hdev,
2389 HCI_KEEP_DEBUG_KEYS);
2391 if (changed)
2392 new_settings(hdev, NULL);
2394 for (i = 0; i < key_count; i++) {
2395 struct mgmt_link_key_info *key = &cp->keys[i];
2397 if (hci_is_blocked_key(hdev,
2398 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2399 key->val)) {
2400 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2401 &key->addr.bdaddr);
2402 continue;
2405 /* Always ignore debug keys and require a new pairing if
2406 * the user wants to use them.
2408 if (key->type == HCI_LK_DEBUG_COMBINATION)
2409 continue;
2411 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2412 key->type, key->pin_len, NULL);
2415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2417 hci_dev_unlock(hdev);
2419 return 0;
2422 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423 u8 addr_type, struct sock *skip_sk)
2425 struct mgmt_ev_device_unpaired ev;
2427 bacpy(&ev.addr.bdaddr, bdaddr);
2428 ev.addr.type = addr_type;
2430 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2431 skip_sk);
2434 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2435 u16 len)
2437 struct mgmt_cp_unpair_device *cp = data;
2438 struct mgmt_rp_unpair_device rp;
2439 struct hci_conn_params *params;
2440 struct mgmt_pending_cmd *cmd;
2441 struct hci_conn *conn;
2442 u8 addr_type;
2443 int err;
2445 memset(&rp, 0, sizeof(rp));
2446 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2447 rp.addr.type = cp->addr.type;
2449 if (!bdaddr_type_is_valid(cp->addr.type))
2450 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2451 MGMT_STATUS_INVALID_PARAMS,
2452 &rp, sizeof(rp));
2454 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2455 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2456 MGMT_STATUS_INVALID_PARAMS,
2457 &rp, sizeof(rp));
2459 hci_dev_lock(hdev);
2461 if (!hdev_is_powered(hdev)) {
2462 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2463 MGMT_STATUS_NOT_POWERED, &rp,
2464 sizeof(rp));
2465 goto unlock;
2468 if (cp->addr.type == BDADDR_BREDR) {
2469 /* If disconnection is requested, then look up the
2470 * connection. If the remote device is connected, it
2471 * will be later used to terminate the link.
2473 * Setting it to NULL explicitly will cause no
2474 * termination of the link.
2476 if (cp->disconnect)
2477 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2478 &cp->addr.bdaddr);
2479 else
2480 conn = NULL;
2482 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2483 if (err < 0) {
2484 err = mgmt_cmd_complete(sk, hdev->id,
2485 MGMT_OP_UNPAIR_DEVICE,
2486 MGMT_STATUS_NOT_PAIRED, &rp,
2487 sizeof(rp));
2488 goto unlock;
2491 goto done;
2494 /* LE address type */
2495 addr_type = le_addr_type(cp->addr.type);
2497 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2498 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2499 if (err < 0) {
2500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2501 MGMT_STATUS_NOT_PAIRED, &rp,
2502 sizeof(rp));
2503 goto unlock;
2506 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2507 if (!conn) {
2508 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2509 goto done;
2513 /* Defer clearing up the connection parameters until closing to
2514 * give a chance of keeping them if a repairing happens.
2516 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2518 /* Disable auto-connection parameters if present */
2519 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2520 if (params) {
2521 if (params->explicit_connect)
2522 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2523 else
2524 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2527 /* If disconnection is not requested, then clear the connection
2528 * variable so that the link is not terminated.
2530 if (!cp->disconnect)
2531 conn = NULL;
2533 done:
2534 /* If the connection variable is set, then termination of the
2535 * link is requested.
2537 if (!conn) {
2538 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2539 &rp, sizeof(rp));
2540 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2541 goto unlock;
2544 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2545 sizeof(*cp));
2546 if (!cmd) {
2547 err = -ENOMEM;
2548 goto unlock;
2551 cmd->cmd_complete = addr_cmd_complete;
2553 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2554 if (err < 0)
2555 mgmt_pending_remove(cmd);
2557 unlock:
2558 hci_dev_unlock(hdev);
2559 return err;
2562 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2563 u16 len)
2565 struct mgmt_cp_disconnect *cp = data;
2566 struct mgmt_rp_disconnect rp;
2567 struct mgmt_pending_cmd *cmd;
2568 struct hci_conn *conn;
2569 int err;
2571 bt_dev_dbg(hdev, "sock %p", sk);
2573 memset(&rp, 0, sizeof(rp));
2574 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2575 rp.addr.type = cp->addr.type;
2577 if (!bdaddr_type_is_valid(cp->addr.type))
2578 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2579 MGMT_STATUS_INVALID_PARAMS,
2580 &rp, sizeof(rp));
2582 hci_dev_lock(hdev);
2584 if (!test_bit(HCI_UP, &hdev->flags)) {
2585 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 MGMT_STATUS_NOT_POWERED, &rp,
2587 sizeof(rp));
2588 goto failed;
2591 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2593 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2594 goto failed;
2597 if (cp->addr.type == BDADDR_BREDR)
2598 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2599 &cp->addr.bdaddr);
2600 else
2601 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2602 le_addr_type(cp->addr.type));
2604 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2606 MGMT_STATUS_NOT_CONNECTED, &rp,
2607 sizeof(rp));
2608 goto failed;
2611 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2612 if (!cmd) {
2613 err = -ENOMEM;
2614 goto failed;
2617 cmd->cmd_complete = generic_cmd_complete;
2619 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2620 if (err < 0)
2621 mgmt_pending_remove(cmd);
2623 failed:
2624 hci_dev_unlock(hdev);
2625 return err;
2628 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2630 switch (link_type) {
2631 case LE_LINK:
2632 switch (addr_type) {
2633 case ADDR_LE_DEV_PUBLIC:
2634 return BDADDR_LE_PUBLIC;
2636 default:
2637 /* Fallback to LE Random address type */
2638 return BDADDR_LE_RANDOM;
2641 default:
2642 /* Fallback to BR/EDR type */
2643 return BDADDR_BREDR;
2647 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2648 u16 data_len)
2650 struct mgmt_rp_get_connections *rp;
2651 struct hci_conn *c;
2652 int err;
2653 u16 i;
2655 bt_dev_dbg(hdev, "sock %p", sk);
2657 hci_dev_lock(hdev);
2659 if (!hdev_is_powered(hdev)) {
2660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2661 MGMT_STATUS_NOT_POWERED);
2662 goto unlock;
2665 i = 0;
2666 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2667 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2668 i++;
2671 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2672 if (!rp) {
2673 err = -ENOMEM;
2674 goto unlock;
2677 i = 0;
2678 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2679 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 continue;
2681 bacpy(&rp->addr[i].bdaddr, &c->dst);
2682 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2683 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2684 continue;
2685 i++;
2688 rp->conn_count = cpu_to_le16(i);
2690 /* Recalculate length in case of filtered SCO connections, etc */
2691 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2692 struct_size(rp, addr, i));
2694 kfree(rp);
2696 unlock:
2697 hci_dev_unlock(hdev);
2698 return err;
2701 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2702 struct mgmt_cp_pin_code_neg_reply *cp)
2704 struct mgmt_pending_cmd *cmd;
2705 int err;
2707 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2708 sizeof(*cp));
2709 if (!cmd)
2710 return -ENOMEM;
2712 cmd->cmd_complete = addr_cmd_complete;
2714 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2715 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2716 if (err < 0)
2717 mgmt_pending_remove(cmd);
2719 return err;
2722 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2723 u16 len)
2725 struct hci_conn *conn;
2726 struct mgmt_cp_pin_code_reply *cp = data;
2727 struct hci_cp_pin_code_reply reply;
2728 struct mgmt_pending_cmd *cmd;
2729 int err;
2731 bt_dev_dbg(hdev, "sock %p", sk);
2733 hci_dev_lock(hdev);
2735 if (!hdev_is_powered(hdev)) {
2736 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2737 MGMT_STATUS_NOT_POWERED);
2738 goto failed;
2741 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2742 if (!conn) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 MGMT_STATUS_NOT_CONNECTED);
2745 goto failed;
2748 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2749 struct mgmt_cp_pin_code_neg_reply ncp;
2751 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2753 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2755 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2756 if (err >= 0)
2757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2758 MGMT_STATUS_INVALID_PARAMS);
2760 goto failed;
2763 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2764 if (!cmd) {
2765 err = -ENOMEM;
2766 goto failed;
2769 cmd->cmd_complete = addr_cmd_complete;
2771 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2772 reply.pin_len = cp->pin_len;
2773 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2775 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2776 if (err < 0)
2777 mgmt_pending_remove(cmd);
2779 failed:
2780 hci_dev_unlock(hdev);
2781 return err;
2784 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2785 u16 len)
2787 struct mgmt_cp_set_io_capability *cp = data;
2789 bt_dev_dbg(hdev, "sock %p", sk);
2791 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2793 MGMT_STATUS_INVALID_PARAMS);
2795 hci_dev_lock(hdev);
2797 hdev->io_capability = cp->io_capability;
2799 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2801 hci_dev_unlock(hdev);
2803 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2804 NULL, 0);
2807 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2809 struct hci_dev *hdev = conn->hdev;
2810 struct mgmt_pending_cmd *cmd;
2812 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2813 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2814 continue;
2816 if (cmd->user_data != conn)
2817 continue;
2819 return cmd;
2822 return NULL;
2825 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2827 struct mgmt_rp_pair_device rp;
2828 struct hci_conn *conn = cmd->user_data;
2829 int err;
2831 bacpy(&rp.addr.bdaddr, &conn->dst);
2832 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2834 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2835 status, &rp, sizeof(rp));
2837 /* So we don't get further callbacks for this connection */
2838 conn->connect_cfm_cb = NULL;
2839 conn->security_cfm_cb = NULL;
2840 conn->disconn_cfm_cb = NULL;
2842 hci_conn_drop(conn);
2844 /* The device is paired so there is no need to remove
2845 * its connection parameters anymore.
2847 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2849 hci_conn_put(conn);
2851 return err;
2854 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2856 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2857 struct mgmt_pending_cmd *cmd;
2859 cmd = find_pairing(conn);
2860 if (cmd) {
2861 cmd->cmd_complete(cmd, status);
2862 mgmt_pending_remove(cmd);
2866 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2868 struct mgmt_pending_cmd *cmd;
2870 BT_DBG("status %u", status);
2872 cmd = find_pairing(conn);
2873 if (!cmd) {
2874 BT_DBG("Unable to find a pending command");
2875 return;
2878 cmd->cmd_complete(cmd, mgmt_status(status));
2879 mgmt_pending_remove(cmd);
2882 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2884 struct mgmt_pending_cmd *cmd;
2886 BT_DBG("status %u", status);
2888 if (!status)
2889 return;
2891 cmd = find_pairing(conn);
2892 if (!cmd) {
2893 BT_DBG("Unable to find a pending command");
2894 return;
2897 cmd->cmd_complete(cmd, mgmt_status(status));
2898 mgmt_pending_remove(cmd);
2901 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 u16 len)
2904 struct mgmt_cp_pair_device *cp = data;
2905 struct mgmt_rp_pair_device rp;
2906 struct mgmt_pending_cmd *cmd;
2907 u8 sec_level, auth_type;
2908 struct hci_conn *conn;
2909 int err;
2911 bt_dev_dbg(hdev, "sock %p", sk);
2913 memset(&rp, 0, sizeof(rp));
2914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2915 rp.addr.type = cp->addr.type;
2917 if (!bdaddr_type_is_valid(cp->addr.type))
2918 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2920 &rp, sizeof(rp));
2922 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2923 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2924 MGMT_STATUS_INVALID_PARAMS,
2925 &rp, sizeof(rp));
2927 hci_dev_lock(hdev);
2929 if (!hdev_is_powered(hdev)) {
2930 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 MGMT_STATUS_NOT_POWERED, &rp,
2932 sizeof(rp));
2933 goto unlock;
2936 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 MGMT_STATUS_ALREADY_PAIRED, &rp,
2939 sizeof(rp));
2940 goto unlock;
2943 sec_level = BT_SECURITY_MEDIUM;
2944 auth_type = HCI_AT_DEDICATED_BONDING;
2946 if (cp->addr.type == BDADDR_BREDR) {
2947 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2948 auth_type, CONN_REASON_PAIR_DEVICE);
2949 } else {
2950 u8 addr_type = le_addr_type(cp->addr.type);
2951 struct hci_conn_params *p;
2953 /* When pairing a new device, it is expected to remember
2954 * this device for future connections. Adding the connection
2955 * parameter information ahead of time allows tracking
2956 * of the slave preferred values and will speed up any
2957 * further connection establishment.
2959 * If connection parameters already exist, then they
2960 * will be kept and this function does nothing.
2962 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2964 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2965 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2967 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2968 sec_level, HCI_LE_CONN_TIMEOUT,
2969 CONN_REASON_PAIR_DEVICE);
2972 if (IS_ERR(conn)) {
2973 int status;
2975 if (PTR_ERR(conn) == -EBUSY)
2976 status = MGMT_STATUS_BUSY;
2977 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2978 status = MGMT_STATUS_NOT_SUPPORTED;
2979 else if (PTR_ERR(conn) == -ECONNREFUSED)
2980 status = MGMT_STATUS_REJECTED;
2981 else
2982 status = MGMT_STATUS_CONNECT_FAILED;
2984 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2985 status, &rp, sizeof(rp));
2986 goto unlock;
2989 if (conn->connect_cfm_cb) {
2990 hci_conn_drop(conn);
2991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2992 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2993 goto unlock;
2996 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2997 if (!cmd) {
2998 err = -ENOMEM;
2999 hci_conn_drop(conn);
3000 goto unlock;
3003 cmd->cmd_complete = pairing_complete;
3005 /* For LE, just connecting isn't a proof that the pairing finished */
3006 if (cp->addr.type == BDADDR_BREDR) {
3007 conn->connect_cfm_cb = pairing_complete_cb;
3008 conn->security_cfm_cb = pairing_complete_cb;
3009 conn->disconn_cfm_cb = pairing_complete_cb;
3010 } else {
3011 conn->connect_cfm_cb = le_pairing_complete_cb;
3012 conn->security_cfm_cb = le_pairing_complete_cb;
3013 conn->disconn_cfm_cb = le_pairing_complete_cb;
3016 conn->io_capability = cp->io_cap;
3017 cmd->user_data = hci_conn_get(conn);
3019 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3020 hci_conn_security(conn, sec_level, auth_type, true)) {
3021 cmd->cmd_complete(cmd, 0);
3022 mgmt_pending_remove(cmd);
3025 err = 0;
3027 unlock:
3028 hci_dev_unlock(hdev);
3029 return err;
3032 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3033 u16 len)
3035 struct mgmt_addr_info *addr = data;
3036 struct mgmt_pending_cmd *cmd;
3037 struct hci_conn *conn;
3038 int err;
3040 bt_dev_dbg(hdev, "sock %p", sk);
3042 hci_dev_lock(hdev);
3044 if (!hdev_is_powered(hdev)) {
3045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3046 MGMT_STATUS_NOT_POWERED);
3047 goto unlock;
3050 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3051 if (!cmd) {
3052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3053 MGMT_STATUS_INVALID_PARAMS);
3054 goto unlock;
3057 conn = cmd->user_data;
3059 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3061 MGMT_STATUS_INVALID_PARAMS);
3062 goto unlock;
3065 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3066 mgmt_pending_remove(cmd);
3068 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3069 addr, sizeof(*addr));
3071 /* Since user doesn't want to proceed with the connection, abort any
3072 * ongoing pairing and then terminate the link if it was created
3073 * because of the pair device action.
3075 if (addr->type == BDADDR_BREDR)
3076 hci_remove_link_key(hdev, &addr->bdaddr);
3077 else
3078 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3079 le_addr_type(addr->type));
3081 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3082 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3084 unlock:
3085 hci_dev_unlock(hdev);
3086 return err;
3089 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3090 struct mgmt_addr_info *addr, u16 mgmt_op,
3091 u16 hci_op, __le32 passkey)
3093 struct mgmt_pending_cmd *cmd;
3094 struct hci_conn *conn;
3095 int err;
3097 hci_dev_lock(hdev);
3099 if (!hdev_is_powered(hdev)) {
3100 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3101 MGMT_STATUS_NOT_POWERED, addr,
3102 sizeof(*addr));
3103 goto done;
3106 if (addr->type == BDADDR_BREDR)
3107 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3108 else
3109 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3110 le_addr_type(addr->type));
3112 if (!conn) {
3113 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3114 MGMT_STATUS_NOT_CONNECTED, addr,
3115 sizeof(*addr));
3116 goto done;
3119 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3120 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3121 if (!err)
3122 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 MGMT_STATUS_SUCCESS, addr,
3124 sizeof(*addr));
3125 else
3126 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3127 MGMT_STATUS_FAILED, addr,
3128 sizeof(*addr));
3130 goto done;
3133 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3134 if (!cmd) {
3135 err = -ENOMEM;
3136 goto done;
3139 cmd->cmd_complete = addr_cmd_complete;
3141 /* Continue with pairing via HCI */
3142 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3143 struct hci_cp_user_passkey_reply cp;
3145 bacpy(&cp.bdaddr, &addr->bdaddr);
3146 cp.passkey = passkey;
3147 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3148 } else
3149 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3150 &addr->bdaddr);
3152 if (err < 0)
3153 mgmt_pending_remove(cmd);
3155 done:
3156 hci_dev_unlock(hdev);
3157 return err;
3160 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3161 void *data, u16 len)
3163 struct mgmt_cp_pin_code_neg_reply *cp = data;
3165 bt_dev_dbg(hdev, "sock %p", sk);
3167 return user_pairing_resp(sk, hdev, &cp->addr,
3168 MGMT_OP_PIN_CODE_NEG_REPLY,
3169 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3172 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3173 u16 len)
3175 struct mgmt_cp_user_confirm_reply *cp = data;
3177 bt_dev_dbg(hdev, "sock %p", sk);
3179 if (len != sizeof(*cp))
3180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3181 MGMT_STATUS_INVALID_PARAMS);
3183 return user_pairing_resp(sk, hdev, &cp->addr,
3184 MGMT_OP_USER_CONFIRM_REPLY,
3185 HCI_OP_USER_CONFIRM_REPLY, 0);
3188 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3189 void *data, u16 len)
3191 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3193 bt_dev_dbg(hdev, "sock %p", sk);
3195 return user_pairing_resp(sk, hdev, &cp->addr,
3196 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3197 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3200 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3201 u16 len)
3203 struct mgmt_cp_user_passkey_reply *cp = data;
3205 bt_dev_dbg(hdev, "sock %p", sk);
3207 return user_pairing_resp(sk, hdev, &cp->addr,
3208 MGMT_OP_USER_PASSKEY_REPLY,
3209 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3212 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3213 void *data, u16 len)
3215 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3217 bt_dev_dbg(hdev, "sock %p", sk);
3219 return user_pairing_resp(sk, hdev, &cp->addr,
3220 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3221 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3224 static void adv_expire(struct hci_dev *hdev, u32 flags)
3226 struct adv_info *adv_instance;
3227 struct hci_request req;
3228 int err;
3230 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3231 if (!adv_instance)
3232 return;
3234 /* stop if current instance doesn't need to be changed */
3235 if (!(adv_instance->flags & flags))
3236 return;
3238 cancel_adv_timeout(hdev);
3240 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3241 if (!adv_instance)
3242 return;
3244 hci_req_init(&req, hdev);
3245 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3246 true);
3247 if (err)
3248 return;
3250 hci_req_run(&req, NULL);
3253 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3255 struct mgmt_cp_set_local_name *cp;
3256 struct mgmt_pending_cmd *cmd;
3258 bt_dev_dbg(hdev, "status 0x%02x", status);
3260 hci_dev_lock(hdev);
3262 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3263 if (!cmd)
3264 goto unlock;
3266 cp = cmd->param;
3268 if (status) {
3269 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3270 mgmt_status(status));
3271 } else {
3272 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3273 cp, sizeof(*cp));
3275 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3276 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3279 mgmt_pending_remove(cmd);
3281 unlock:
3282 hci_dev_unlock(hdev);
3285 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3286 u16 len)
3288 struct mgmt_cp_set_local_name *cp = data;
3289 struct mgmt_pending_cmd *cmd;
3290 struct hci_request req;
3291 int err;
3293 bt_dev_dbg(hdev, "sock %p", sk);
3295 hci_dev_lock(hdev);
3297 /* If the old values are the same as the new ones just return a
3298 * direct command complete event.
3300 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3301 !memcmp(hdev->short_name, cp->short_name,
3302 sizeof(hdev->short_name))) {
3303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3304 data, len);
3305 goto failed;
3308 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3310 if (!hdev_is_powered(hdev)) {
3311 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3314 data, len);
3315 if (err < 0)
3316 goto failed;
3318 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3319 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3320 ext_info_changed(hdev, sk);
3322 goto failed;
3325 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3326 if (!cmd) {
3327 err = -ENOMEM;
3328 goto failed;
3331 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3333 hci_req_init(&req, hdev);
3335 if (lmp_bredr_capable(hdev)) {
3336 __hci_req_update_name(&req);
3337 __hci_req_update_eir(&req);
3340 /* The name is stored in the scan response data and so
3341 * no need to udpate the advertising data here.
3343 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3344 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3346 err = hci_req_run(&req, set_name_complete);
3347 if (err < 0)
3348 mgmt_pending_remove(cmd);
3350 failed:
3351 hci_dev_unlock(hdev);
3352 return err;
3355 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3356 u16 len)
3358 struct mgmt_cp_set_appearance *cp = data;
3359 u16 appearance;
3360 int err;
3362 bt_dev_dbg(hdev, "sock %p", sk);
3364 if (!lmp_le_capable(hdev))
3365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3366 MGMT_STATUS_NOT_SUPPORTED);
3368 appearance = le16_to_cpu(cp->appearance);
3370 hci_dev_lock(hdev);
3372 if (hdev->appearance != appearance) {
3373 hdev->appearance = appearance;
3375 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3376 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3378 ext_info_changed(hdev, sk);
3381 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3384 hci_dev_unlock(hdev);
3386 return err;
3389 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3392 struct mgmt_rp_get_phy_configuration rp;
3394 bt_dev_dbg(hdev, "sock %p", sk);
3396 hci_dev_lock(hdev);
3398 memset(&rp, 0, sizeof(rp));
3400 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3401 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3402 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3404 hci_dev_unlock(hdev);
3406 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3407 &rp, sizeof(rp));
3410 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3412 struct mgmt_ev_phy_configuration_changed ev;
3414 memset(&ev, 0, sizeof(ev));
3416 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3418 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3419 sizeof(ev), skip);
3422 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3423 u16 opcode, struct sk_buff *skb)
3425 struct mgmt_pending_cmd *cmd;
3427 bt_dev_dbg(hdev, "status 0x%02x", status);
3429 hci_dev_lock(hdev);
3431 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3432 if (!cmd)
3433 goto unlock;
3435 if (status) {
3436 mgmt_cmd_status(cmd->sk, hdev->id,
3437 MGMT_OP_SET_PHY_CONFIGURATION,
3438 mgmt_status(status));
3439 } else {
3440 mgmt_cmd_complete(cmd->sk, hdev->id,
3441 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3442 NULL, 0);
3444 mgmt_phy_configuration_changed(hdev, cmd->sk);
3447 mgmt_pending_remove(cmd);
3449 unlock:
3450 hci_dev_unlock(hdev);
3453 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3454 void *data, u16 len)
3456 struct mgmt_cp_set_phy_configuration *cp = data;
3457 struct hci_cp_le_set_default_phy cp_phy;
3458 struct mgmt_pending_cmd *cmd;
3459 struct hci_request req;
3460 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3461 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3462 bool changed = false;
3463 int err;
3465 bt_dev_dbg(hdev, "sock %p", sk);
3467 configurable_phys = get_configurable_phys(hdev);
3468 supported_phys = get_supported_phys(hdev);
3469 selected_phys = __le32_to_cpu(cp->selected_phys);
3471 if (selected_phys & ~supported_phys)
3472 return mgmt_cmd_status(sk, hdev->id,
3473 MGMT_OP_SET_PHY_CONFIGURATION,
3474 MGMT_STATUS_INVALID_PARAMS);
3476 unconfigure_phys = supported_phys & ~configurable_phys;
3478 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3479 return mgmt_cmd_status(sk, hdev->id,
3480 MGMT_OP_SET_PHY_CONFIGURATION,
3481 MGMT_STATUS_INVALID_PARAMS);
3483 if (selected_phys == get_selected_phys(hdev))
3484 return mgmt_cmd_complete(sk, hdev->id,
3485 MGMT_OP_SET_PHY_CONFIGURATION,
3486 0, NULL, 0);
3488 hci_dev_lock(hdev);
3490 if (!hdev_is_powered(hdev)) {
3491 err = mgmt_cmd_status(sk, hdev->id,
3492 MGMT_OP_SET_PHY_CONFIGURATION,
3493 MGMT_STATUS_REJECTED);
3494 goto unlock;
3497 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3498 err = mgmt_cmd_status(sk, hdev->id,
3499 MGMT_OP_SET_PHY_CONFIGURATION,
3500 MGMT_STATUS_BUSY);
3501 goto unlock;
3504 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3505 pkt_type |= (HCI_DH3 | HCI_DM3);
3506 else
3507 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3509 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3510 pkt_type |= (HCI_DH5 | HCI_DM5);
3511 else
3512 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3514 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3515 pkt_type &= ~HCI_2DH1;
3516 else
3517 pkt_type |= HCI_2DH1;
3519 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3520 pkt_type &= ~HCI_2DH3;
3521 else
3522 pkt_type |= HCI_2DH3;
3524 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3525 pkt_type &= ~HCI_2DH5;
3526 else
3527 pkt_type |= HCI_2DH5;
3529 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3530 pkt_type &= ~HCI_3DH1;
3531 else
3532 pkt_type |= HCI_3DH1;
3534 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3535 pkt_type &= ~HCI_3DH3;
3536 else
3537 pkt_type |= HCI_3DH3;
3539 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3540 pkt_type &= ~HCI_3DH5;
3541 else
3542 pkt_type |= HCI_3DH5;
3544 if (pkt_type != hdev->pkt_type) {
3545 hdev->pkt_type = pkt_type;
3546 changed = true;
3549 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3550 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3551 if (changed)
3552 mgmt_phy_configuration_changed(hdev, sk);
3554 err = mgmt_cmd_complete(sk, hdev->id,
3555 MGMT_OP_SET_PHY_CONFIGURATION,
3556 0, NULL, 0);
3558 goto unlock;
3561 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3562 len);
3563 if (!cmd) {
3564 err = -ENOMEM;
3565 goto unlock;
3568 hci_req_init(&req, hdev);
3570 memset(&cp_phy, 0, sizeof(cp_phy));
3572 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 cp_phy.all_phys |= 0x01;
3575 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 cp_phy.all_phys |= 0x02;
3578 if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3581 if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3584 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3587 if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3596 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3598 err = hci_req_run_skb(&req, set_default_phy_complete);
3599 if (err < 0)
3600 mgmt_pending_remove(cmd);
3602 unlock:
3603 hci_dev_unlock(hdev);
3605 return err;
3608 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3609 u16 len)
3611 int err = MGMT_STATUS_SUCCESS;
3612 struct mgmt_cp_set_blocked_keys *keys = data;
3613 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3614 sizeof(struct mgmt_blocked_key_info));
3615 u16 key_count, expected_len;
3616 int i;
3618 bt_dev_dbg(hdev, "sock %p", sk);
3620 key_count = __le16_to_cpu(keys->key_count);
3621 if (key_count > max_key_count) {
3622 bt_dev_err(hdev, "too big key_count value %u", key_count);
3623 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3624 MGMT_STATUS_INVALID_PARAMS);
3627 expected_len = struct_size(keys, keys, key_count);
3628 if (expected_len != len) {
3629 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3630 expected_len, len);
3631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3632 MGMT_STATUS_INVALID_PARAMS);
3635 hci_dev_lock(hdev);
3637 hci_blocked_keys_clear(hdev);
3639 for (i = 0; i < keys->key_count; ++i) {
3640 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3642 if (!b) {
3643 err = MGMT_STATUS_NO_RESOURCES;
3644 break;
3647 b->type = keys->keys[i].type;
3648 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3649 list_add_rcu(&b->list, &hdev->blocked_keys);
3651 hci_dev_unlock(hdev);
3653 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3654 err, NULL, 0);
3657 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3658 void *data, u16 len)
3660 struct mgmt_mode *cp = data;
3661 int err;
3662 bool changed = false;
3664 bt_dev_dbg(hdev, "sock %p", sk);
3666 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3667 return mgmt_cmd_status(sk, hdev->id,
3668 MGMT_OP_SET_WIDEBAND_SPEECH,
3669 MGMT_STATUS_NOT_SUPPORTED);
3671 if (cp->val != 0x00 && cp->val != 0x01)
3672 return mgmt_cmd_status(sk, hdev->id,
3673 MGMT_OP_SET_WIDEBAND_SPEECH,
3674 MGMT_STATUS_INVALID_PARAMS);
3676 hci_dev_lock(hdev);
3678 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3679 err = mgmt_cmd_status(sk, hdev->id,
3680 MGMT_OP_SET_WIDEBAND_SPEECH,
3681 MGMT_STATUS_BUSY);
3682 goto unlock;
3685 if (hdev_is_powered(hdev) &&
3686 !!cp->val != hci_dev_test_flag(hdev,
3687 HCI_WIDEBAND_SPEECH_ENABLED)) {
3688 err = mgmt_cmd_status(sk, hdev->id,
3689 MGMT_OP_SET_WIDEBAND_SPEECH,
3690 MGMT_STATUS_REJECTED);
3691 goto unlock;
3694 if (cp->val)
3695 changed = !hci_dev_test_and_set_flag(hdev,
3696 HCI_WIDEBAND_SPEECH_ENABLED);
3697 else
3698 changed = hci_dev_test_and_clear_flag(hdev,
3699 HCI_WIDEBAND_SPEECH_ENABLED);
3701 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3702 if (err < 0)
3703 goto unlock;
3705 if (changed)
3706 err = new_settings(hdev, sk);
3708 unlock:
3709 hci_dev_unlock(hdev);
3710 return err;
3713 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3714 void *data, u16 data_len)
3716 char buf[20];
3717 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3718 u16 cap_len = 0;
3719 u8 flags = 0;
3720 u8 tx_power_range[2];
3722 bt_dev_dbg(hdev, "sock %p", sk);
3724 memset(&buf, 0, sizeof(buf));
3726 hci_dev_lock(hdev);
3728 /* When the Read Simple Pairing Options command is supported, then
3729 * the remote public key validation is supported.
3731 if (hdev->commands[41] & 0x08)
3732 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3734 flags |= 0x02; /* Remote public key validation (LE) */
3736 /* When the Read Encryption Key Size command is supported, then the
3737 * encryption key size is enforced.
3739 if (hdev->commands[20] & 0x10)
3740 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3742 flags |= 0x08; /* Encryption key size enforcement (LE) */
3744 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3745 &flags, 1);
3747 /* When the Read Simple Pairing Options command is supported, then
3748 * also max encryption key size information is provided.
3750 if (hdev->commands[41] & 0x08)
3751 cap_len = eir_append_le16(rp->cap, cap_len,
3752 MGMT_CAP_MAX_ENC_KEY_SIZE,
3753 hdev->max_enc_key_size);
3755 cap_len = eir_append_le16(rp->cap, cap_len,
3756 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3757 SMP_MAX_ENC_KEY_SIZE);
3759 /* Append the min/max LE tx power parameters if we were able to fetch
3760 * it from the controller
3762 if (hdev->commands[38] & 0x80) {
3763 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3764 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3765 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3766 tx_power_range, 2);
3769 rp->cap_len = cpu_to_le16(cap_len);
3771 hci_dev_unlock(hdev);
3773 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3774 rp, sizeof(*rp) + cap_len);
3777 #ifdef CONFIG_BT_FEATURE_DEBUG
3778 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3779 static const u8 debug_uuid[16] = {
3780 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3781 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3783 #endif
3785 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3786 static const u8 simult_central_periph_uuid[16] = {
3787 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3788 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3791 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3792 static const u8 rpa_resolution_uuid[16] = {
3793 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3794 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3797 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3798 void *data, u16 data_len)
3800 char buf[62]; /* Enough space for 3 features */
3801 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3802 u16 idx = 0;
3803 u32 flags;
3805 bt_dev_dbg(hdev, "sock %p", sk);
3807 memset(&buf, 0, sizeof(buf));
3809 #ifdef CONFIG_BT_FEATURE_DEBUG
3810 if (!hdev) {
3811 flags = bt_dbg_get() ? BIT(0) : 0;
3813 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3814 rp->features[idx].flags = cpu_to_le32(flags);
3815 idx++;
3817 #endif
3819 if (hdev) {
3820 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3821 (hdev->le_states[4] & 0x08) && /* Central */
3822 (hdev->le_states[4] & 0x40) && /* Peripheral */
3823 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3824 flags = BIT(0);
3825 else
3826 flags = 0;
3828 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3829 rp->features[idx].flags = cpu_to_le32(flags);
3830 idx++;
3833 if (hdev && use_ll_privacy(hdev)) {
3834 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3835 flags = BIT(0) | BIT(1);
3836 else
3837 flags = BIT(1);
3839 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3840 rp->features[idx].flags = cpu_to_le32(flags);
3841 idx++;
3844 rp->feature_count = cpu_to_le16(idx);
3846 /* After reading the experimental features information, enable
3847 * the events to update client on any future change.
3849 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3851 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3852 MGMT_OP_READ_EXP_FEATURES_INFO,
3853 0, rp, sizeof(*rp) + (20 * idx));
3856 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3857 struct sock *skip)
3859 struct mgmt_ev_exp_feature_changed ev;
3861 memset(&ev, 0, sizeof(ev));
3862 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3863 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3865 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3866 &ev, sizeof(ev),
3867 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3871 #ifdef CONFIG_BT_FEATURE_DEBUG
3872 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3874 struct mgmt_ev_exp_feature_changed ev;
3876 memset(&ev, 0, sizeof(ev));
3877 memcpy(ev.uuid, debug_uuid, 16);
3878 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3880 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3881 &ev, sizeof(ev),
3882 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 #endif
3886 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3887 void *data, u16 data_len)
3889 struct mgmt_cp_set_exp_feature *cp = data;
3890 struct mgmt_rp_set_exp_feature rp;
3892 bt_dev_dbg(hdev, "sock %p", sk);
3894 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3895 memset(rp.uuid, 0, 16);
3896 rp.flags = cpu_to_le32(0);
3898 #ifdef CONFIG_BT_FEATURE_DEBUG
3899 if (!hdev) {
3900 bool changed = bt_dbg_get();
3902 bt_dbg_set(false);
3904 if (changed)
3905 exp_debug_feature_changed(false, sk);
3907 #endif
3909 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3910 bool changed = hci_dev_test_flag(hdev,
3911 HCI_ENABLE_LL_PRIVACY);
3913 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3915 if (changed)
3916 exp_ll_privacy_feature_changed(false, hdev, sk);
3919 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3921 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3922 MGMT_OP_SET_EXP_FEATURE, 0,
3923 &rp, sizeof(rp));
3926 #ifdef CONFIG_BT_FEATURE_DEBUG
3927 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3928 bool val, changed;
3929 int err;
3931 /* Command requires to use the non-controller index */
3932 if (hdev)
3933 return mgmt_cmd_status(sk, hdev->id,
3934 MGMT_OP_SET_EXP_FEATURE,
3935 MGMT_STATUS_INVALID_INDEX);
3937 /* Parameters are limited to a single octet */
3938 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3939 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3940 MGMT_OP_SET_EXP_FEATURE,
3941 MGMT_STATUS_INVALID_PARAMS);
3943 /* Only boolean on/off is supported */
3944 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3945 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3946 MGMT_OP_SET_EXP_FEATURE,
3947 MGMT_STATUS_INVALID_PARAMS);
3949 val = !!cp->param[0];
3950 changed = val ? !bt_dbg_get() : bt_dbg_get();
3951 bt_dbg_set(val);
3953 memcpy(rp.uuid, debug_uuid, 16);
3954 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3956 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3958 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3959 MGMT_OP_SET_EXP_FEATURE, 0,
3960 &rp, sizeof(rp));
3962 if (changed)
3963 exp_debug_feature_changed(val, sk);
3965 return err;
3967 #endif
3969 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3970 bool val, changed;
3971 int err;
3972 u32 flags;
3974 /* Command requires to use the controller index */
3975 if (!hdev)
3976 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3977 MGMT_OP_SET_EXP_FEATURE,
3978 MGMT_STATUS_INVALID_INDEX);
3980 /* Changes can only be made when controller is powered down */
3981 if (hdev_is_powered(hdev))
3982 return mgmt_cmd_status(sk, hdev->id,
3983 MGMT_OP_SET_EXP_FEATURE,
3984 MGMT_STATUS_NOT_POWERED);
3986 /* Parameters are limited to a single octet */
3987 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3988 return mgmt_cmd_status(sk, hdev->id,
3989 MGMT_OP_SET_EXP_FEATURE,
3990 MGMT_STATUS_INVALID_PARAMS);
3992 /* Only boolean on/off is supported */
3993 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3994 return mgmt_cmd_status(sk, hdev->id,
3995 MGMT_OP_SET_EXP_FEATURE,
3996 MGMT_STATUS_INVALID_PARAMS);
3998 val = !!cp->param[0];
4000 if (val) {
4001 changed = !hci_dev_test_flag(hdev,
4002 HCI_ENABLE_LL_PRIVACY);
4003 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4004 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4006 /* Enable LL privacy + supported settings changed */
4007 flags = BIT(0) | BIT(1);
4008 } else {
4009 changed = hci_dev_test_flag(hdev,
4010 HCI_ENABLE_LL_PRIVACY);
4011 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4013 /* Disable LL privacy + supported settings changed */
4014 flags = BIT(1);
4017 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4018 rp.flags = cpu_to_le32(flags);
4020 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4022 err = mgmt_cmd_complete(sk, hdev->id,
4023 MGMT_OP_SET_EXP_FEATURE, 0,
4024 &rp, sizeof(rp));
4026 if (changed)
4027 exp_ll_privacy_feature_changed(val, hdev, sk);
4029 return err;
4032 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4033 MGMT_OP_SET_EXP_FEATURE,
4034 MGMT_STATUS_NOT_SUPPORTED);
4037 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4039 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4040 u16 data_len)
4042 struct mgmt_cp_get_device_flags *cp = data;
4043 struct mgmt_rp_get_device_flags rp;
4044 struct bdaddr_list_with_flags *br_params;
4045 struct hci_conn_params *params;
4046 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4047 u32 current_flags = 0;
4048 u8 status = MGMT_STATUS_INVALID_PARAMS;
4050 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4051 &cp->addr.bdaddr, cp->addr.type);
4053 hci_dev_lock(hdev);
4055 if (cp->addr.type == BDADDR_BREDR) {
4056 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4057 &cp->addr.bdaddr,
4058 cp->addr.type);
4059 if (!br_params)
4060 goto done;
4062 current_flags = br_params->current_flags;
4063 } else {
4064 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4065 le_addr_type(cp->addr.type));
4067 if (!params)
4068 goto done;
4070 current_flags = params->current_flags;
4073 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4074 rp.addr.type = cp->addr.type;
4075 rp.supported_flags = cpu_to_le32(supported_flags);
4076 rp.current_flags = cpu_to_le32(current_flags);
4078 status = MGMT_STATUS_SUCCESS;
4080 done:
4081 hci_dev_unlock(hdev);
4083 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4084 &rp, sizeof(rp));
4087 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4088 bdaddr_t *bdaddr, u8 bdaddr_type,
4089 u32 supported_flags, u32 current_flags)
4091 struct mgmt_ev_device_flags_changed ev;
4093 bacpy(&ev.addr.bdaddr, bdaddr);
4094 ev.addr.type = bdaddr_type;
4095 ev.supported_flags = cpu_to_le32(supported_flags);
4096 ev.current_flags = cpu_to_le32(current_flags);
4098 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4101 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4102 u16 len)
4104 struct mgmt_cp_set_device_flags *cp = data;
4105 struct bdaddr_list_with_flags *br_params;
4106 struct hci_conn_params *params;
4107 u8 status = MGMT_STATUS_INVALID_PARAMS;
4108 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4109 u32 current_flags = __le32_to_cpu(cp->current_flags);
4111 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4112 &cp->addr.bdaddr, cp->addr.type,
4113 __le32_to_cpu(current_flags));
4115 if ((supported_flags | current_flags) != supported_flags) {
4116 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4117 current_flags, supported_flags);
4118 goto done;
4121 hci_dev_lock(hdev);
4123 if (cp->addr.type == BDADDR_BREDR) {
4124 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4125 &cp->addr.bdaddr,
4126 cp->addr.type);
4128 if (br_params) {
4129 br_params->current_flags = current_flags;
4130 status = MGMT_STATUS_SUCCESS;
4131 } else {
4132 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4133 &cp->addr.bdaddr, cp->addr.type);
4135 } else {
4136 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4137 le_addr_type(cp->addr.type));
4138 if (params) {
4139 params->current_flags = current_flags;
4140 status = MGMT_STATUS_SUCCESS;
4141 } else {
4142 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4143 &cp->addr.bdaddr,
4144 le_addr_type(cp->addr.type));
4148 done:
4149 hci_dev_unlock(hdev);
4151 if (status == MGMT_STATUS_SUCCESS)
4152 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4153 supported_flags, current_flags);
4155 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4156 &cp->addr, sizeof(cp->addr));
4159 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4160 u16 handle)
4162 struct mgmt_ev_adv_monitor_added ev;
4164 ev.monitor_handle = cpu_to_le16(handle);
4166 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4169 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4170 u16 handle)
4172 struct mgmt_ev_adv_monitor_added ev;
4174 ev.monitor_handle = cpu_to_le16(handle);
4176 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4179 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4180 void *data, u16 len)
4182 struct adv_monitor *monitor = NULL;
4183 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4184 int handle, err;
4185 size_t rp_size = 0;
4186 __u32 supported = 0;
4187 __u16 num_handles = 0;
4188 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4190 BT_DBG("request for %s", hdev->name);
4192 hci_dev_lock(hdev);
4194 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4195 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4197 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4198 handles[num_handles++] = monitor->handle;
4201 hci_dev_unlock(hdev);
4203 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4204 rp = kmalloc(rp_size, GFP_KERNEL);
4205 if (!rp)
4206 return -ENOMEM;
4208 /* Once controller-based monitoring is in place, the enabled_features
4209 * should reflect the use.
4211 rp->supported_features = cpu_to_le32(supported);
4212 rp->enabled_features = 0;
4213 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4214 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4215 rp->num_handles = cpu_to_le16(num_handles);
4216 if (num_handles)
4217 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4219 err = mgmt_cmd_complete(sk, hdev->id,
4220 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4221 MGMT_STATUS_SUCCESS, rp, rp_size);
4223 kfree(rp);
4225 return err;
4228 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4229 void *data, u16 len)
4231 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4232 struct mgmt_rp_add_adv_patterns_monitor rp;
4233 struct adv_monitor *m = NULL;
4234 struct adv_pattern *p = NULL;
4235 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4236 __u8 cp_ofst = 0, cp_len = 0;
4237 int err, i;
4239 BT_DBG("request for %s", hdev->name);
4241 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4242 err = mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4244 MGMT_STATUS_INVALID_PARAMS);
4245 goto failed;
4248 m = kmalloc(sizeof(*m), GFP_KERNEL);
4249 if (!m) {
4250 err = -ENOMEM;
4251 goto failed;
4254 INIT_LIST_HEAD(&m->patterns);
4255 m->active = false;
4257 for (i = 0; i < cp->pattern_count; i++) {
4258 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4259 err = mgmt_cmd_status(sk, hdev->id,
4260 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4261 MGMT_STATUS_INVALID_PARAMS);
4262 goto failed;
4265 cp_ofst = cp->patterns[i].offset;
4266 cp_len = cp->patterns[i].length;
4267 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4268 cp_len > HCI_MAX_AD_LENGTH ||
4269 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4270 err = mgmt_cmd_status(sk, hdev->id,
4271 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4272 MGMT_STATUS_INVALID_PARAMS);
4273 goto failed;
4276 p = kmalloc(sizeof(*p), GFP_KERNEL);
4277 if (!p) {
4278 err = -ENOMEM;
4279 goto failed;
4282 p->ad_type = cp->patterns[i].ad_type;
4283 p->offset = cp->patterns[i].offset;
4284 p->length = cp->patterns[i].length;
4285 memcpy(p->value, cp->patterns[i].value, p->length);
4287 INIT_LIST_HEAD(&p->list);
4288 list_add(&p->list, &m->patterns);
4291 if (mp_cnt != cp->pattern_count) {
4292 err = mgmt_cmd_status(sk, hdev->id,
4293 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4294 MGMT_STATUS_INVALID_PARAMS);
4295 goto failed;
4298 hci_dev_lock(hdev);
4300 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4302 err = hci_add_adv_monitor(hdev, m);
4303 if (err) {
4304 if (err == -ENOSPC) {
4305 mgmt_cmd_status(sk, hdev->id,
4306 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4307 MGMT_STATUS_NO_RESOURCES);
4309 goto unlock;
4312 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4313 mgmt_adv_monitor_added(sk, hdev, m->handle);
4315 hci_dev_unlock(hdev);
4317 rp.monitor_handle = cpu_to_le16(m->handle);
4319 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4320 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4322 unlock:
4323 hci_dev_unlock(hdev);
4325 failed:
4326 hci_free_adv_monitor(m);
4327 return err;
4330 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4331 void *data, u16 len)
4333 struct mgmt_cp_remove_adv_monitor *cp = data;
4334 struct mgmt_rp_remove_adv_monitor rp;
4335 unsigned int prev_adv_monitors_cnt;
4336 u16 handle;
4337 int err;
4339 BT_DBG("request for %s", hdev->name);
4341 hci_dev_lock(hdev);
4343 handle = __le16_to_cpu(cp->monitor_handle);
4344 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4346 err = hci_remove_adv_monitor(hdev, handle);
4347 if (err == -ENOENT) {
4348 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4349 MGMT_STATUS_INVALID_INDEX);
4350 goto unlock;
4353 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4354 mgmt_adv_monitor_removed(sk, hdev, handle);
4356 hci_dev_unlock(hdev);
4358 rp.monitor_handle = cp->monitor_handle;
4360 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4361 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4363 unlock:
4364 hci_dev_unlock(hdev);
4365 return err;
4368 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4369 u16 opcode, struct sk_buff *skb)
4371 struct mgmt_rp_read_local_oob_data mgmt_rp;
4372 size_t rp_size = sizeof(mgmt_rp);
4373 struct mgmt_pending_cmd *cmd;
4375 bt_dev_dbg(hdev, "status %u", status);
4377 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4378 if (!cmd)
4379 return;
4381 if (status || !skb) {
4382 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4383 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4384 goto remove;
4387 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4389 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4390 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4392 if (skb->len < sizeof(*rp)) {
4393 mgmt_cmd_status(cmd->sk, hdev->id,
4394 MGMT_OP_READ_LOCAL_OOB_DATA,
4395 MGMT_STATUS_FAILED);
4396 goto remove;
4399 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4400 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4402 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4403 } else {
4404 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4406 if (skb->len < sizeof(*rp)) {
4407 mgmt_cmd_status(cmd->sk, hdev->id,
4408 MGMT_OP_READ_LOCAL_OOB_DATA,
4409 MGMT_STATUS_FAILED);
4410 goto remove;
4413 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4414 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4416 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4417 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4420 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4421 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4423 remove:
4424 mgmt_pending_remove(cmd);
4427 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4428 void *data, u16 data_len)
4430 struct mgmt_pending_cmd *cmd;
4431 struct hci_request req;
4432 int err;
4434 bt_dev_dbg(hdev, "sock %p", sk);
4436 hci_dev_lock(hdev);
4438 if (!hdev_is_powered(hdev)) {
4439 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4440 MGMT_STATUS_NOT_POWERED);
4441 goto unlock;
4444 if (!lmp_ssp_capable(hdev)) {
4445 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4446 MGMT_STATUS_NOT_SUPPORTED);
4447 goto unlock;
4450 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4451 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4452 MGMT_STATUS_BUSY);
4453 goto unlock;
4456 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4457 if (!cmd) {
4458 err = -ENOMEM;
4459 goto unlock;
4462 hci_req_init(&req, hdev);
4464 if (bredr_sc_enabled(hdev))
4465 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4466 else
4467 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4469 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4470 if (err < 0)
4471 mgmt_pending_remove(cmd);
4473 unlock:
4474 hci_dev_unlock(hdev);
4475 return err;
4478 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4479 void *data, u16 len)
4481 struct mgmt_addr_info *addr = data;
4482 int err;
4484 bt_dev_dbg(hdev, "sock %p", sk);
4486 if (!bdaddr_type_is_valid(addr->type))
4487 return mgmt_cmd_complete(sk, hdev->id,
4488 MGMT_OP_ADD_REMOTE_OOB_DATA,
4489 MGMT_STATUS_INVALID_PARAMS,
4490 addr, sizeof(*addr));
4492 hci_dev_lock(hdev);
4494 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4495 struct mgmt_cp_add_remote_oob_data *cp = data;
4496 u8 status;
4498 if (cp->addr.type != BDADDR_BREDR) {
4499 err = mgmt_cmd_complete(sk, hdev->id,
4500 MGMT_OP_ADD_REMOTE_OOB_DATA,
4501 MGMT_STATUS_INVALID_PARAMS,
4502 &cp->addr, sizeof(cp->addr));
4503 goto unlock;
4506 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4507 cp->addr.type, cp->hash,
4508 cp->rand, NULL, NULL);
4509 if (err < 0)
4510 status = MGMT_STATUS_FAILED;
4511 else
4512 status = MGMT_STATUS_SUCCESS;
4514 err = mgmt_cmd_complete(sk, hdev->id,
4515 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4516 &cp->addr, sizeof(cp->addr));
4517 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4518 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4519 u8 *rand192, *hash192, *rand256, *hash256;
4520 u8 status;
4522 if (bdaddr_type_is_le(cp->addr.type)) {
4523 /* Enforce zero-valued 192-bit parameters as
4524 * long as legacy SMP OOB isn't implemented.
4526 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4527 memcmp(cp->hash192, ZERO_KEY, 16)) {
4528 err = mgmt_cmd_complete(sk, hdev->id,
4529 MGMT_OP_ADD_REMOTE_OOB_DATA,
4530 MGMT_STATUS_INVALID_PARAMS,
4531 addr, sizeof(*addr));
4532 goto unlock;
4535 rand192 = NULL;
4536 hash192 = NULL;
4537 } else {
4538 /* In case one of the P-192 values is set to zero,
4539 * then just disable OOB data for P-192.
4541 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4542 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4543 rand192 = NULL;
4544 hash192 = NULL;
4545 } else {
4546 rand192 = cp->rand192;
4547 hash192 = cp->hash192;
4551 /* In case one of the P-256 values is set to zero, then just
4552 * disable OOB data for P-256.
4554 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4555 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4556 rand256 = NULL;
4557 hash256 = NULL;
4558 } else {
4559 rand256 = cp->rand256;
4560 hash256 = cp->hash256;
4563 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4564 cp->addr.type, hash192, rand192,
4565 hash256, rand256);
4566 if (err < 0)
4567 status = MGMT_STATUS_FAILED;
4568 else
4569 status = MGMT_STATUS_SUCCESS;
4571 err = mgmt_cmd_complete(sk, hdev->id,
4572 MGMT_OP_ADD_REMOTE_OOB_DATA,
4573 status, &cp->addr, sizeof(cp->addr));
4574 } else {
4575 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4576 len);
4577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4578 MGMT_STATUS_INVALID_PARAMS);
4581 unlock:
4582 hci_dev_unlock(hdev);
4583 return err;
4586 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4587 void *data, u16 len)
4589 struct mgmt_cp_remove_remote_oob_data *cp = data;
4590 u8 status;
4591 int err;
4593 bt_dev_dbg(hdev, "sock %p", sk);
4595 if (cp->addr.type != BDADDR_BREDR)
4596 return mgmt_cmd_complete(sk, hdev->id,
4597 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4598 MGMT_STATUS_INVALID_PARAMS,
4599 &cp->addr, sizeof(cp->addr));
4601 hci_dev_lock(hdev);
4603 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4604 hci_remote_oob_data_clear(hdev);
4605 status = MGMT_STATUS_SUCCESS;
4606 goto done;
4609 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4610 if (err < 0)
4611 status = MGMT_STATUS_INVALID_PARAMS;
4612 else
4613 status = MGMT_STATUS_SUCCESS;
4615 done:
4616 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4617 status, &cp->addr, sizeof(cp->addr));
4619 hci_dev_unlock(hdev);
4620 return err;
4623 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4625 struct mgmt_pending_cmd *cmd;
4627 bt_dev_dbg(hdev, "status %d", status);
4629 hci_dev_lock(hdev);
4631 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4632 if (!cmd)
4633 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4635 if (!cmd)
4636 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4638 if (cmd) {
4639 cmd->cmd_complete(cmd, mgmt_status(status));
4640 mgmt_pending_remove(cmd);
4643 hci_dev_unlock(hdev);
4645 /* Handle suspend notifier */
4646 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4647 hdev->suspend_tasks)) {
4648 bt_dev_dbg(hdev, "Unpaused discovery");
4649 wake_up(&hdev->suspend_wait_q);
4653 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4654 uint8_t *mgmt_status)
4656 switch (type) {
4657 case DISCOV_TYPE_LE:
4658 *mgmt_status = mgmt_le_support(hdev);
4659 if (*mgmt_status)
4660 return false;
4661 break;
4662 case DISCOV_TYPE_INTERLEAVED:
4663 *mgmt_status = mgmt_le_support(hdev);
4664 if (*mgmt_status)
4665 return false;
4666 fallthrough;
4667 case DISCOV_TYPE_BREDR:
4668 *mgmt_status = mgmt_bredr_support(hdev);
4669 if (*mgmt_status)
4670 return false;
4671 break;
4672 default:
4673 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4674 return false;
4677 return true;
4680 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4681 u16 op, void *data, u16 len)
4683 struct mgmt_cp_start_discovery *cp = data;
4684 struct mgmt_pending_cmd *cmd;
4685 u8 status;
4686 int err;
4688 bt_dev_dbg(hdev, "sock %p", sk);
4690 hci_dev_lock(hdev);
4692 if (!hdev_is_powered(hdev)) {
4693 err = mgmt_cmd_complete(sk, hdev->id, op,
4694 MGMT_STATUS_NOT_POWERED,
4695 &cp->type, sizeof(cp->type));
4696 goto failed;
4699 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4700 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4701 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4702 &cp->type, sizeof(cp->type));
4703 goto failed;
4706 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4707 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4708 &cp->type, sizeof(cp->type));
4709 goto failed;
4712 /* Can't start discovery when it is paused */
4713 if (hdev->discovery_paused) {
4714 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4715 &cp->type, sizeof(cp->type));
4716 goto failed;
4719 /* Clear the discovery filter first to free any previously
4720 * allocated memory for the UUID list.
4722 hci_discovery_filter_clear(hdev);
4724 hdev->discovery.type = cp->type;
4725 hdev->discovery.report_invalid_rssi = false;
4726 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4727 hdev->discovery.limited = true;
4728 else
4729 hdev->discovery.limited = false;
4731 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4732 if (!cmd) {
4733 err = -ENOMEM;
4734 goto failed;
4737 cmd->cmd_complete = generic_cmd_complete;
4739 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4740 queue_work(hdev->req_workqueue, &hdev->discov_update);
4741 err = 0;
4743 failed:
4744 hci_dev_unlock(hdev);
4745 return err;
4748 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4749 void *data, u16 len)
4751 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4752 data, len);
4755 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4756 void *data, u16 len)
4758 return start_discovery_internal(sk, hdev,
4759 MGMT_OP_START_LIMITED_DISCOVERY,
4760 data, len);
4763 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4764 u8 status)
4766 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4767 cmd->param, 1);
4770 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4771 void *data, u16 len)
4773 struct mgmt_cp_start_service_discovery *cp = data;
4774 struct mgmt_pending_cmd *cmd;
4775 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4776 u16 uuid_count, expected_len;
4777 u8 status;
4778 int err;
4780 bt_dev_dbg(hdev, "sock %p", sk);
4782 hci_dev_lock(hdev);
4784 if (!hdev_is_powered(hdev)) {
4785 err = mgmt_cmd_complete(sk, hdev->id,
4786 MGMT_OP_START_SERVICE_DISCOVERY,
4787 MGMT_STATUS_NOT_POWERED,
4788 &cp->type, sizeof(cp->type));
4789 goto failed;
4792 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4793 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4794 err = mgmt_cmd_complete(sk, hdev->id,
4795 MGMT_OP_START_SERVICE_DISCOVERY,
4796 MGMT_STATUS_BUSY, &cp->type,
4797 sizeof(cp->type));
4798 goto failed;
4801 uuid_count = __le16_to_cpu(cp->uuid_count);
4802 if (uuid_count > max_uuid_count) {
4803 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4804 uuid_count);
4805 err = mgmt_cmd_complete(sk, hdev->id,
4806 MGMT_OP_START_SERVICE_DISCOVERY,
4807 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4808 sizeof(cp->type));
4809 goto failed;
4812 expected_len = sizeof(*cp) + uuid_count * 16;
4813 if (expected_len != len) {
4814 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4815 expected_len, len);
4816 err = mgmt_cmd_complete(sk, hdev->id,
4817 MGMT_OP_START_SERVICE_DISCOVERY,
4818 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4819 sizeof(cp->type));
4820 goto failed;
4823 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4824 err = mgmt_cmd_complete(sk, hdev->id,
4825 MGMT_OP_START_SERVICE_DISCOVERY,
4826 status, &cp->type, sizeof(cp->type));
4827 goto failed;
4830 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4831 hdev, data, len);
4832 if (!cmd) {
4833 err = -ENOMEM;
4834 goto failed;
4837 cmd->cmd_complete = service_discovery_cmd_complete;
4839 /* Clear the discovery filter first to free any previously
4840 * allocated memory for the UUID list.
4842 hci_discovery_filter_clear(hdev);
4844 hdev->discovery.result_filtering = true;
4845 hdev->discovery.type = cp->type;
4846 hdev->discovery.rssi = cp->rssi;
4847 hdev->discovery.uuid_count = uuid_count;
4849 if (uuid_count > 0) {
4850 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4851 GFP_KERNEL);
4852 if (!hdev->discovery.uuids) {
4853 err = mgmt_cmd_complete(sk, hdev->id,
4854 MGMT_OP_START_SERVICE_DISCOVERY,
4855 MGMT_STATUS_FAILED,
4856 &cp->type, sizeof(cp->type));
4857 mgmt_pending_remove(cmd);
4858 goto failed;
4862 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4863 queue_work(hdev->req_workqueue, &hdev->discov_update);
4864 err = 0;
4866 failed:
4867 hci_dev_unlock(hdev);
4868 return err;
4871 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4873 struct mgmt_pending_cmd *cmd;
4875 bt_dev_dbg(hdev, "status %d", status);
4877 hci_dev_lock(hdev);
4879 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4880 if (cmd) {
4881 cmd->cmd_complete(cmd, mgmt_status(status));
4882 mgmt_pending_remove(cmd);
4885 hci_dev_unlock(hdev);
4887 /* Handle suspend notifier */
4888 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4889 bt_dev_dbg(hdev, "Paused discovery");
4890 wake_up(&hdev->suspend_wait_q);
4894 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4895 u16 len)
4897 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4898 struct mgmt_pending_cmd *cmd;
4899 int err;
4901 bt_dev_dbg(hdev, "sock %p", sk);
4903 hci_dev_lock(hdev);
4905 if (!hci_discovery_active(hdev)) {
4906 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4907 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4908 sizeof(mgmt_cp->type));
4909 goto unlock;
4912 if (hdev->discovery.type != mgmt_cp->type) {
4913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4914 MGMT_STATUS_INVALID_PARAMS,
4915 &mgmt_cp->type, sizeof(mgmt_cp->type));
4916 goto unlock;
4919 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4920 if (!cmd) {
4921 err = -ENOMEM;
4922 goto unlock;
4925 cmd->cmd_complete = generic_cmd_complete;
4927 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4928 queue_work(hdev->req_workqueue, &hdev->discov_update);
4929 err = 0;
4931 unlock:
4932 hci_dev_unlock(hdev);
4933 return err;
4936 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4937 u16 len)
4939 struct mgmt_cp_confirm_name *cp = data;
4940 struct inquiry_entry *e;
4941 int err;
4943 bt_dev_dbg(hdev, "sock %p", sk);
4945 hci_dev_lock(hdev);
4947 if (!hci_discovery_active(hdev)) {
4948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4949 MGMT_STATUS_FAILED, &cp->addr,
4950 sizeof(cp->addr));
4951 goto failed;
4954 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4955 if (!e) {
4956 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4957 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4958 sizeof(cp->addr));
4959 goto failed;
4962 if (cp->name_known) {
4963 e->name_state = NAME_KNOWN;
4964 list_del(&e->list);
4965 } else {
4966 e->name_state = NAME_NEEDED;
4967 hci_inquiry_cache_update_resolve(hdev, e);
4970 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4971 &cp->addr, sizeof(cp->addr));
4973 failed:
4974 hci_dev_unlock(hdev);
4975 return err;
4978 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4979 u16 len)
4981 struct mgmt_cp_block_device *cp = data;
4982 u8 status;
4983 int err;
4985 bt_dev_dbg(hdev, "sock %p", sk);
4987 if (!bdaddr_type_is_valid(cp->addr.type))
4988 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4989 MGMT_STATUS_INVALID_PARAMS,
4990 &cp->addr, sizeof(cp->addr));
4992 hci_dev_lock(hdev);
4994 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4995 cp->addr.type);
4996 if (err < 0) {
4997 status = MGMT_STATUS_FAILED;
4998 goto done;
5001 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5002 sk);
5003 status = MGMT_STATUS_SUCCESS;
5005 done:
5006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5007 &cp->addr, sizeof(cp->addr));
5009 hci_dev_unlock(hdev);
5011 return err;
5014 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5015 u16 len)
5017 struct mgmt_cp_unblock_device *cp = data;
5018 u8 status;
5019 int err;
5021 bt_dev_dbg(hdev, "sock %p", sk);
5023 if (!bdaddr_type_is_valid(cp->addr.type))
5024 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5025 MGMT_STATUS_INVALID_PARAMS,
5026 &cp->addr, sizeof(cp->addr));
5028 hci_dev_lock(hdev);
5030 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5031 cp->addr.type);
5032 if (err < 0) {
5033 status = MGMT_STATUS_INVALID_PARAMS;
5034 goto done;
5037 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5038 sk);
5039 status = MGMT_STATUS_SUCCESS;
5041 done:
5042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5043 &cp->addr, sizeof(cp->addr));
5045 hci_dev_unlock(hdev);
5047 return err;
5050 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5051 u16 len)
5053 struct mgmt_cp_set_device_id *cp = data;
5054 struct hci_request req;
5055 int err;
5056 __u16 source;
5058 bt_dev_dbg(hdev, "sock %p", sk);
5060 source = __le16_to_cpu(cp->source);
5062 if (source > 0x0002)
5063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5064 MGMT_STATUS_INVALID_PARAMS);
5066 hci_dev_lock(hdev);
5068 hdev->devid_source = source;
5069 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5070 hdev->devid_product = __le16_to_cpu(cp->product);
5071 hdev->devid_version = __le16_to_cpu(cp->version);
5073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5074 NULL, 0);
5076 hci_req_init(&req, hdev);
5077 __hci_req_update_eir(&req);
5078 hci_req_run(&req, NULL);
5080 hci_dev_unlock(hdev);
5082 return err;
5085 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5086 u16 opcode)
5088 bt_dev_dbg(hdev, "status %d", status);
5091 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5092 u16 opcode)
5094 struct cmd_lookup match = { NULL, hdev };
5095 struct hci_request req;
5096 u8 instance;
5097 struct adv_info *adv_instance;
5098 int err;
5100 hci_dev_lock(hdev);
5102 if (status) {
5103 u8 mgmt_err = mgmt_status(status);
5105 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5106 cmd_status_rsp, &mgmt_err);
5107 goto unlock;
5110 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5111 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5112 else
5113 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5115 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5116 &match);
5118 new_settings(hdev, match.sk);
5120 if (match.sk)
5121 sock_put(match.sk);
5123 /* Handle suspend notifier */
5124 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5125 hdev->suspend_tasks)) {
5126 bt_dev_dbg(hdev, "Paused advertising");
5127 wake_up(&hdev->suspend_wait_q);
5128 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5129 hdev->suspend_tasks)) {
5130 bt_dev_dbg(hdev, "Unpaused advertising");
5131 wake_up(&hdev->suspend_wait_q);
5134 /* If "Set Advertising" was just disabled and instance advertising was
5135 * set up earlier, then re-enable multi-instance advertising.
5137 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5138 list_empty(&hdev->adv_instances))
5139 goto unlock;
5141 instance = hdev->cur_adv_instance;
5142 if (!instance) {
5143 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5144 struct adv_info, list);
5145 if (!adv_instance)
5146 goto unlock;
5148 instance = adv_instance->instance;
5151 hci_req_init(&req, hdev);
5153 err = __hci_req_schedule_adv_instance(&req, instance, true);
5155 if (!err)
5156 err = hci_req_run(&req, enable_advertising_instance);
5158 if (err)
5159 bt_dev_err(hdev, "failed to re-configure advertising");
5161 unlock:
5162 hci_dev_unlock(hdev);
5165 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5166 u16 len)
5168 struct mgmt_mode *cp = data;
5169 struct mgmt_pending_cmd *cmd;
5170 struct hci_request req;
5171 u8 val, status;
5172 int err;
5174 bt_dev_dbg(hdev, "sock %p", sk);
5176 status = mgmt_le_support(hdev);
5177 if (status)
5178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5179 status);
5181 /* Enabling the experimental LL Privay support disables support for
5182 * advertising.
5184 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5186 MGMT_STATUS_NOT_SUPPORTED);
5188 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5190 MGMT_STATUS_INVALID_PARAMS);
5192 if (hdev->advertising_paused)
5193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5194 MGMT_STATUS_BUSY);
5196 hci_dev_lock(hdev);
5198 val = !!cp->val;
5200 /* The following conditions are ones which mean that we should
5201 * not do any HCI communication but directly send a mgmt
5202 * response to user space (after toggling the flag if
5203 * necessary).
5205 if (!hdev_is_powered(hdev) ||
5206 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5207 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5208 hci_conn_num(hdev, LE_LINK) > 0 ||
5209 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5210 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5211 bool changed;
5213 if (cp->val) {
5214 hdev->cur_adv_instance = 0x00;
5215 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5216 if (cp->val == 0x02)
5217 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5218 else
5219 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5220 } else {
5221 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5222 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5225 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5226 if (err < 0)
5227 goto unlock;
5229 if (changed)
5230 err = new_settings(hdev, sk);
5232 goto unlock;
5235 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5236 pending_find(MGMT_OP_SET_LE, hdev)) {
5237 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5238 MGMT_STATUS_BUSY);
5239 goto unlock;
5242 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5243 if (!cmd) {
5244 err = -ENOMEM;
5245 goto unlock;
5248 hci_req_init(&req, hdev);
5250 if (cp->val == 0x02)
5251 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5252 else
5253 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5255 cancel_adv_timeout(hdev);
5257 if (val) {
5258 /* Switch to instance "0" for the Set Advertising setting.
5259 * We cannot use update_[adv|scan_rsp]_data() here as the
5260 * HCI_ADVERTISING flag is not yet set.
5262 hdev->cur_adv_instance = 0x00;
5264 if (ext_adv_capable(hdev)) {
5265 __hci_req_start_ext_adv(&req, 0x00);
5266 } else {
5267 __hci_req_update_adv_data(&req, 0x00);
5268 __hci_req_update_scan_rsp_data(&req, 0x00);
5269 __hci_req_enable_advertising(&req);
5271 } else {
5272 __hci_req_disable_advertising(&req);
5275 err = hci_req_run(&req, set_advertising_complete);
5276 if (err < 0)
5277 mgmt_pending_remove(cmd);
5279 unlock:
5280 hci_dev_unlock(hdev);
5281 return err;
5284 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5285 void *data, u16 len)
5287 struct mgmt_cp_set_static_address *cp = data;
5288 int err;
5290 bt_dev_dbg(hdev, "sock %p", sk);
5292 if (!lmp_le_capable(hdev))
5293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5294 MGMT_STATUS_NOT_SUPPORTED);
5296 if (hdev_is_powered(hdev))
5297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5298 MGMT_STATUS_REJECTED);
5300 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5301 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5302 return mgmt_cmd_status(sk, hdev->id,
5303 MGMT_OP_SET_STATIC_ADDRESS,
5304 MGMT_STATUS_INVALID_PARAMS);
5306 /* Two most significant bits shall be set */
5307 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5308 return mgmt_cmd_status(sk, hdev->id,
5309 MGMT_OP_SET_STATIC_ADDRESS,
5310 MGMT_STATUS_INVALID_PARAMS);
5313 hci_dev_lock(hdev);
5315 bacpy(&hdev->static_addr, &cp->bdaddr);
5317 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5318 if (err < 0)
5319 goto unlock;
5321 err = new_settings(hdev, sk);
5323 unlock:
5324 hci_dev_unlock(hdev);
5325 return err;
5328 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5329 void *data, u16 len)
5331 struct mgmt_cp_set_scan_params *cp = data;
5332 __u16 interval, window;
5333 int err;
5335 bt_dev_dbg(hdev, "sock %p", sk);
5337 if (!lmp_le_capable(hdev))
5338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5339 MGMT_STATUS_NOT_SUPPORTED);
5341 interval = __le16_to_cpu(cp->interval);
5343 if (interval < 0x0004 || interval > 0x4000)
5344 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5345 MGMT_STATUS_INVALID_PARAMS);
5347 window = __le16_to_cpu(cp->window);
5349 if (window < 0x0004 || window > 0x4000)
5350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5351 MGMT_STATUS_INVALID_PARAMS);
5353 if (window > interval)
5354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5355 MGMT_STATUS_INVALID_PARAMS);
5357 hci_dev_lock(hdev);
5359 hdev->le_scan_interval = interval;
5360 hdev->le_scan_window = window;
5362 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5363 NULL, 0);
5365 /* If background scan is running, restart it so new parameters are
5366 * loaded.
5368 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5369 hdev->discovery.state == DISCOVERY_STOPPED) {
5370 struct hci_request req;
5372 hci_req_init(&req, hdev);
5374 hci_req_add_le_scan_disable(&req, false);
5375 hci_req_add_le_passive_scan(&req);
5377 hci_req_run(&req, NULL);
5380 hci_dev_unlock(hdev);
5382 return err;
5385 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5386 u16 opcode)
5388 struct mgmt_pending_cmd *cmd;
5390 bt_dev_dbg(hdev, "status 0x%02x", status);
5392 hci_dev_lock(hdev);
5394 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5395 if (!cmd)
5396 goto unlock;
5398 if (status) {
5399 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5400 mgmt_status(status));
5401 } else {
5402 struct mgmt_mode *cp = cmd->param;
5404 if (cp->val)
5405 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5406 else
5407 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5409 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5410 new_settings(hdev, cmd->sk);
5413 mgmt_pending_remove(cmd);
5415 unlock:
5416 hci_dev_unlock(hdev);
5419 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5420 void *data, u16 len)
5422 struct mgmt_mode *cp = data;
5423 struct mgmt_pending_cmd *cmd;
5424 struct hci_request req;
5425 int err;
5427 bt_dev_dbg(hdev, "sock %p", sk);
5429 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5430 hdev->hci_ver < BLUETOOTH_VER_1_2)
5431 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5432 MGMT_STATUS_NOT_SUPPORTED);
5434 if (cp->val != 0x00 && cp->val != 0x01)
5435 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5436 MGMT_STATUS_INVALID_PARAMS);
5438 hci_dev_lock(hdev);
5440 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5442 MGMT_STATUS_BUSY);
5443 goto unlock;
5446 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5447 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5448 hdev);
5449 goto unlock;
5452 if (!hdev_is_powered(hdev)) {
5453 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5454 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5455 hdev);
5456 new_settings(hdev, sk);
5457 goto unlock;
5460 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5461 data, len);
5462 if (!cmd) {
5463 err = -ENOMEM;
5464 goto unlock;
5467 hci_req_init(&req, hdev);
5469 __hci_req_write_fast_connectable(&req, cp->val);
5471 err = hci_req_run(&req, fast_connectable_complete);
5472 if (err < 0) {
5473 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5474 MGMT_STATUS_FAILED);
5475 mgmt_pending_remove(cmd);
5478 unlock:
5479 hci_dev_unlock(hdev);
5481 return err;
5484 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5486 struct mgmt_pending_cmd *cmd;
5488 bt_dev_dbg(hdev, "status 0x%02x", status);
5490 hci_dev_lock(hdev);
5492 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5493 if (!cmd)
5494 goto unlock;
5496 if (status) {
5497 u8 mgmt_err = mgmt_status(status);
5499 /* We need to restore the flag if related HCI commands
5500 * failed.
5502 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5504 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5505 } else {
5506 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5507 new_settings(hdev, cmd->sk);
5510 mgmt_pending_remove(cmd);
5512 unlock:
5513 hci_dev_unlock(hdev);
5516 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5518 struct mgmt_mode *cp = data;
5519 struct mgmt_pending_cmd *cmd;
5520 struct hci_request req;
5521 int err;
5523 bt_dev_dbg(hdev, "sock %p", sk);
5525 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5527 MGMT_STATUS_NOT_SUPPORTED);
5529 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5531 MGMT_STATUS_REJECTED);
5533 if (cp->val != 0x00 && cp->val != 0x01)
5534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5535 MGMT_STATUS_INVALID_PARAMS);
5537 hci_dev_lock(hdev);
5539 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5540 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5541 goto unlock;
5544 if (!hdev_is_powered(hdev)) {
5545 if (!cp->val) {
5546 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5547 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5548 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5549 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5550 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5553 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5555 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5556 if (err < 0)
5557 goto unlock;
5559 err = new_settings(hdev, sk);
5560 goto unlock;
5563 /* Reject disabling when powered on */
5564 if (!cp->val) {
5565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5566 MGMT_STATUS_REJECTED);
5567 goto unlock;
5568 } else {
5569 /* When configuring a dual-mode controller to operate
5570 * with LE only and using a static address, then switching
5571 * BR/EDR back on is not allowed.
5573 * Dual-mode controllers shall operate with the public
5574 * address as its identity address for BR/EDR and LE. So
5575 * reject the attempt to create an invalid configuration.
5577 * The same restrictions applies when secure connections
5578 * has been enabled. For BR/EDR this is a controller feature
5579 * while for LE it is a host stack feature. This means that
5580 * switching BR/EDR back on when secure connections has been
5581 * enabled is not a supported transaction.
5583 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5584 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5585 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5587 MGMT_STATUS_REJECTED);
5588 goto unlock;
5592 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5594 MGMT_STATUS_BUSY);
5595 goto unlock;
5598 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5599 if (!cmd) {
5600 err = -ENOMEM;
5601 goto unlock;
5604 /* We need to flip the bit already here so that
5605 * hci_req_update_adv_data generates the correct flags.
5607 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5609 hci_req_init(&req, hdev);
5611 __hci_req_write_fast_connectable(&req, false);
5612 __hci_req_update_scan(&req);
5614 /* Since only the advertising data flags will change, there
5615 * is no need to update the scan response data.
5617 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5619 err = hci_req_run(&req, set_bredr_complete);
5620 if (err < 0)
5621 mgmt_pending_remove(cmd);
5623 unlock:
5624 hci_dev_unlock(hdev);
5625 return err;
5628 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5630 struct mgmt_pending_cmd *cmd;
5631 struct mgmt_mode *cp;
5633 bt_dev_dbg(hdev, "status %u", status);
5635 hci_dev_lock(hdev);
5637 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5638 if (!cmd)
5639 goto unlock;
5641 if (status) {
5642 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5643 mgmt_status(status));
5644 goto remove;
5647 cp = cmd->param;
5649 switch (cp->val) {
5650 case 0x00:
5651 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5652 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5653 break;
5654 case 0x01:
5655 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5656 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5657 break;
5658 case 0x02:
5659 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5660 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5661 break;
5664 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5665 new_settings(hdev, cmd->sk);
5667 remove:
5668 mgmt_pending_remove(cmd);
5669 unlock:
5670 hci_dev_unlock(hdev);
5673 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5674 void *data, u16 len)
5676 struct mgmt_mode *cp = data;
5677 struct mgmt_pending_cmd *cmd;
5678 struct hci_request req;
5679 u8 val;
5680 int err;
5682 bt_dev_dbg(hdev, "sock %p", sk);
5684 if (!lmp_sc_capable(hdev) &&
5685 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5687 MGMT_STATUS_NOT_SUPPORTED);
5689 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5690 lmp_sc_capable(hdev) &&
5691 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5692 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5693 MGMT_STATUS_REJECTED);
5695 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5696 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5697 MGMT_STATUS_INVALID_PARAMS);
5699 hci_dev_lock(hdev);
5701 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5702 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5703 bool changed;
5705 if (cp->val) {
5706 changed = !hci_dev_test_and_set_flag(hdev,
5707 HCI_SC_ENABLED);
5708 if (cp->val == 0x02)
5709 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5710 else
5711 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5712 } else {
5713 changed = hci_dev_test_and_clear_flag(hdev,
5714 HCI_SC_ENABLED);
5715 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5718 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5719 if (err < 0)
5720 goto failed;
5722 if (changed)
5723 err = new_settings(hdev, sk);
5725 goto failed;
5728 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5730 MGMT_STATUS_BUSY);
5731 goto failed;
5734 val = !!cp->val;
5736 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5737 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5738 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5739 goto failed;
5742 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5743 if (!cmd) {
5744 err = -ENOMEM;
5745 goto failed;
5748 hci_req_init(&req, hdev);
5749 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5750 err = hci_req_run(&req, sc_enable_complete);
5751 if (err < 0) {
5752 mgmt_pending_remove(cmd);
5753 goto failed;
5756 failed:
5757 hci_dev_unlock(hdev);
5758 return err;
5761 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5762 void *data, u16 len)
5764 struct mgmt_mode *cp = data;
5765 bool changed, use_changed;
5766 int err;
5768 bt_dev_dbg(hdev, "sock %p", sk);
5770 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5771 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5772 MGMT_STATUS_INVALID_PARAMS);
5774 hci_dev_lock(hdev);
5776 if (cp->val)
5777 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5778 else
5779 changed = hci_dev_test_and_clear_flag(hdev,
5780 HCI_KEEP_DEBUG_KEYS);
5782 if (cp->val == 0x02)
5783 use_changed = !hci_dev_test_and_set_flag(hdev,
5784 HCI_USE_DEBUG_KEYS);
5785 else
5786 use_changed = hci_dev_test_and_clear_flag(hdev,
5787 HCI_USE_DEBUG_KEYS);
5789 if (hdev_is_powered(hdev) && use_changed &&
5790 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5791 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5792 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5793 sizeof(mode), &mode);
5796 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5797 if (err < 0)
5798 goto unlock;
5800 if (changed)
5801 err = new_settings(hdev, sk);
5803 unlock:
5804 hci_dev_unlock(hdev);
5805 return err;
5808 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5809 u16 len)
5811 struct mgmt_cp_set_privacy *cp = cp_data;
5812 bool changed;
5813 int err;
5815 bt_dev_dbg(hdev, "sock %p", sk);
5817 if (!lmp_le_capable(hdev))
5818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5819 MGMT_STATUS_NOT_SUPPORTED);
5821 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5823 MGMT_STATUS_INVALID_PARAMS);
5825 if (hdev_is_powered(hdev))
5826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5827 MGMT_STATUS_REJECTED);
5829 hci_dev_lock(hdev);
5831 /* If user space supports this command it is also expected to
5832 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5834 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5836 if (cp->privacy) {
5837 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5838 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5839 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5840 hci_adv_instances_set_rpa_expired(hdev, true);
5841 if (cp->privacy == 0x02)
5842 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5843 else
5844 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5845 } else {
5846 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5847 memset(hdev->irk, 0, sizeof(hdev->irk));
5848 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5849 hci_adv_instances_set_rpa_expired(hdev, false);
5850 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5853 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5854 if (err < 0)
5855 goto unlock;
5857 if (changed)
5858 err = new_settings(hdev, sk);
5860 unlock:
5861 hci_dev_unlock(hdev);
5862 return err;
5865 static bool irk_is_valid(struct mgmt_irk_info *irk)
5867 switch (irk->addr.type) {
5868 case BDADDR_LE_PUBLIC:
5869 return true;
5871 case BDADDR_LE_RANDOM:
5872 /* Two most significant bits shall be set */
5873 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5874 return false;
5875 return true;
5878 return false;
5881 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5882 u16 len)
5884 struct mgmt_cp_load_irks *cp = cp_data;
5885 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5886 sizeof(struct mgmt_irk_info));
5887 u16 irk_count, expected_len;
5888 int i, err;
5890 bt_dev_dbg(hdev, "sock %p", sk);
5892 if (!lmp_le_capable(hdev))
5893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5894 MGMT_STATUS_NOT_SUPPORTED);
5896 irk_count = __le16_to_cpu(cp->irk_count);
5897 if (irk_count > max_irk_count) {
5898 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5899 irk_count);
5900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5901 MGMT_STATUS_INVALID_PARAMS);
5904 expected_len = struct_size(cp, irks, irk_count);
5905 if (expected_len != len) {
5906 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5907 expected_len, len);
5908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5909 MGMT_STATUS_INVALID_PARAMS);
5912 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5914 for (i = 0; i < irk_count; i++) {
5915 struct mgmt_irk_info *key = &cp->irks[i];
5917 if (!irk_is_valid(key))
5918 return mgmt_cmd_status(sk, hdev->id,
5919 MGMT_OP_LOAD_IRKS,
5920 MGMT_STATUS_INVALID_PARAMS);
5923 hci_dev_lock(hdev);
5925 hci_smp_irks_clear(hdev);
5927 for (i = 0; i < irk_count; i++) {
5928 struct mgmt_irk_info *irk = &cp->irks[i];
5930 if (hci_is_blocked_key(hdev,
5931 HCI_BLOCKED_KEY_TYPE_IRK,
5932 irk->val)) {
5933 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5934 &irk->addr.bdaddr);
5935 continue;
5938 hci_add_irk(hdev, &irk->addr.bdaddr,
5939 le_addr_type(irk->addr.type), irk->val,
5940 BDADDR_ANY);
5943 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5945 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5947 hci_dev_unlock(hdev);
5949 return err;
5952 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5954 if (key->master != 0x00 && key->master != 0x01)
5955 return false;
5957 switch (key->addr.type) {
5958 case BDADDR_LE_PUBLIC:
5959 return true;
5961 case BDADDR_LE_RANDOM:
5962 /* Two most significant bits shall be set */
5963 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5964 return false;
5965 return true;
5968 return false;
5971 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5972 void *cp_data, u16 len)
5974 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5975 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5976 sizeof(struct mgmt_ltk_info));
5977 u16 key_count, expected_len;
5978 int i, err;
5980 bt_dev_dbg(hdev, "sock %p", sk);
5982 if (!lmp_le_capable(hdev))
5983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5984 MGMT_STATUS_NOT_SUPPORTED);
5986 key_count = __le16_to_cpu(cp->key_count);
5987 if (key_count > max_key_count) {
5988 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5989 key_count);
5990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5991 MGMT_STATUS_INVALID_PARAMS);
5994 expected_len = struct_size(cp, keys, key_count);
5995 if (expected_len != len) {
5996 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5997 expected_len, len);
5998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5999 MGMT_STATUS_INVALID_PARAMS);
6002 bt_dev_dbg(hdev, "key_count %u", key_count);
6004 for (i = 0; i < key_count; i++) {
6005 struct mgmt_ltk_info *key = &cp->keys[i];
6007 if (!ltk_is_valid(key))
6008 return mgmt_cmd_status(sk, hdev->id,
6009 MGMT_OP_LOAD_LONG_TERM_KEYS,
6010 MGMT_STATUS_INVALID_PARAMS);
6013 hci_dev_lock(hdev);
6015 hci_smp_ltks_clear(hdev);
6017 for (i = 0; i < key_count; i++) {
6018 struct mgmt_ltk_info *key = &cp->keys[i];
6019 u8 type, authenticated;
6021 if (hci_is_blocked_key(hdev,
6022 HCI_BLOCKED_KEY_TYPE_LTK,
6023 key->val)) {
6024 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6025 &key->addr.bdaddr);
6026 continue;
6029 switch (key->type) {
6030 case MGMT_LTK_UNAUTHENTICATED:
6031 authenticated = 0x00;
6032 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6033 break;
6034 case MGMT_LTK_AUTHENTICATED:
6035 authenticated = 0x01;
6036 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6037 break;
6038 case MGMT_LTK_P256_UNAUTH:
6039 authenticated = 0x00;
6040 type = SMP_LTK_P256;
6041 break;
6042 case MGMT_LTK_P256_AUTH:
6043 authenticated = 0x01;
6044 type = SMP_LTK_P256;
6045 break;
6046 case MGMT_LTK_P256_DEBUG:
6047 authenticated = 0x00;
6048 type = SMP_LTK_P256_DEBUG;
6049 fallthrough;
6050 default:
6051 continue;
6054 hci_add_ltk(hdev, &key->addr.bdaddr,
6055 le_addr_type(key->addr.type), type, authenticated,
6056 key->val, key->enc_size, key->ediv, key->rand);
6059 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6060 NULL, 0);
6062 hci_dev_unlock(hdev);
6064 return err;
6067 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6069 struct hci_conn *conn = cmd->user_data;
6070 struct mgmt_rp_get_conn_info rp;
6071 int err;
6073 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6075 if (status == MGMT_STATUS_SUCCESS) {
6076 rp.rssi = conn->rssi;
6077 rp.tx_power = conn->tx_power;
6078 rp.max_tx_power = conn->max_tx_power;
6079 } else {
6080 rp.rssi = HCI_RSSI_INVALID;
6081 rp.tx_power = HCI_TX_POWER_INVALID;
6082 rp.max_tx_power = HCI_TX_POWER_INVALID;
6085 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6086 status, &rp, sizeof(rp));
6088 hci_conn_drop(conn);
6089 hci_conn_put(conn);
6091 return err;
6094 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6095 u16 opcode)
6097 struct hci_cp_read_rssi *cp;
6098 struct mgmt_pending_cmd *cmd;
6099 struct hci_conn *conn;
6100 u16 handle;
6101 u8 status;
6103 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6105 hci_dev_lock(hdev);
6107 /* Commands sent in request are either Read RSSI or Read Transmit Power
6108 * Level so we check which one was last sent to retrieve connection
6109 * handle. Both commands have handle as first parameter so it's safe to
6110 * cast data on the same command struct.
6112 * First command sent is always Read RSSI and we fail only if it fails.
6113 * In other case we simply override error to indicate success as we
6114 * already remembered if TX power value is actually valid.
6116 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6117 if (!cp) {
6118 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6119 status = MGMT_STATUS_SUCCESS;
6120 } else {
6121 status = mgmt_status(hci_status);
6124 if (!cp) {
6125 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6126 goto unlock;
6129 handle = __le16_to_cpu(cp->handle);
6130 conn = hci_conn_hash_lookup_handle(hdev, handle);
6131 if (!conn) {
6132 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6133 handle);
6134 goto unlock;
6137 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6138 if (!cmd)
6139 goto unlock;
6141 cmd->cmd_complete(cmd, status);
6142 mgmt_pending_remove(cmd);
6144 unlock:
6145 hci_dev_unlock(hdev);
6148 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6149 u16 len)
6151 struct mgmt_cp_get_conn_info *cp = data;
6152 struct mgmt_rp_get_conn_info rp;
6153 struct hci_conn *conn;
6154 unsigned long conn_info_age;
6155 int err = 0;
6157 bt_dev_dbg(hdev, "sock %p", sk);
6159 memset(&rp, 0, sizeof(rp));
6160 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6161 rp.addr.type = cp->addr.type;
6163 if (!bdaddr_type_is_valid(cp->addr.type))
6164 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6165 MGMT_STATUS_INVALID_PARAMS,
6166 &rp, sizeof(rp));
6168 hci_dev_lock(hdev);
6170 if (!hdev_is_powered(hdev)) {
6171 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6172 MGMT_STATUS_NOT_POWERED, &rp,
6173 sizeof(rp));
6174 goto unlock;
6177 if (cp->addr.type == BDADDR_BREDR)
6178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6179 &cp->addr.bdaddr);
6180 else
6181 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6183 if (!conn || conn->state != BT_CONNECTED) {
6184 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6185 MGMT_STATUS_NOT_CONNECTED, &rp,
6186 sizeof(rp));
6187 goto unlock;
6190 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6192 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6193 goto unlock;
6196 /* To avoid client trying to guess when to poll again for information we
6197 * calculate conn info age as random value between min/max set in hdev.
6199 conn_info_age = hdev->conn_info_min_age +
6200 prandom_u32_max(hdev->conn_info_max_age -
6201 hdev->conn_info_min_age);
6203 /* Query controller to refresh cached values if they are too old or were
6204 * never read.
6206 if (time_after(jiffies, conn->conn_info_timestamp +
6207 msecs_to_jiffies(conn_info_age)) ||
6208 !conn->conn_info_timestamp) {
6209 struct hci_request req;
6210 struct hci_cp_read_tx_power req_txp_cp;
6211 struct hci_cp_read_rssi req_rssi_cp;
6212 struct mgmt_pending_cmd *cmd;
6214 hci_req_init(&req, hdev);
6215 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6216 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6217 &req_rssi_cp);
6219 /* For LE links TX power does not change thus we don't need to
6220 * query for it once value is known.
6222 if (!bdaddr_type_is_le(cp->addr.type) ||
6223 conn->tx_power == HCI_TX_POWER_INVALID) {
6224 req_txp_cp.handle = cpu_to_le16(conn->handle);
6225 req_txp_cp.type = 0x00;
6226 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6227 sizeof(req_txp_cp), &req_txp_cp);
6230 /* Max TX power needs to be read only once per connection */
6231 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6232 req_txp_cp.handle = cpu_to_le16(conn->handle);
6233 req_txp_cp.type = 0x01;
6234 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6235 sizeof(req_txp_cp), &req_txp_cp);
6238 err = hci_req_run(&req, conn_info_refresh_complete);
6239 if (err < 0)
6240 goto unlock;
6242 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6243 data, len);
6244 if (!cmd) {
6245 err = -ENOMEM;
6246 goto unlock;
6249 hci_conn_hold(conn);
6250 cmd->user_data = hci_conn_get(conn);
6251 cmd->cmd_complete = conn_info_cmd_complete;
6253 conn->conn_info_timestamp = jiffies;
6254 } else {
6255 /* Cache is valid, just reply with values cached in hci_conn */
6256 rp.rssi = conn->rssi;
6257 rp.tx_power = conn->tx_power;
6258 rp.max_tx_power = conn->max_tx_power;
6260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6261 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6264 unlock:
6265 hci_dev_unlock(hdev);
6266 return err;
6269 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6271 struct hci_conn *conn = cmd->user_data;
6272 struct mgmt_rp_get_clock_info rp;
6273 struct hci_dev *hdev;
6274 int err;
6276 memset(&rp, 0, sizeof(rp));
6277 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6279 if (status)
6280 goto complete;
6282 hdev = hci_dev_get(cmd->index);
6283 if (hdev) {
6284 rp.local_clock = cpu_to_le32(hdev->clock);
6285 hci_dev_put(hdev);
6288 if (conn) {
6289 rp.piconet_clock = cpu_to_le32(conn->clock);
6290 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6293 complete:
6294 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6295 sizeof(rp));
6297 if (conn) {
6298 hci_conn_drop(conn);
6299 hci_conn_put(conn);
6302 return err;
6305 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6307 struct hci_cp_read_clock *hci_cp;
6308 struct mgmt_pending_cmd *cmd;
6309 struct hci_conn *conn;
6311 bt_dev_dbg(hdev, "status %u", status);
6313 hci_dev_lock(hdev);
6315 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6316 if (!hci_cp)
6317 goto unlock;
6319 if (hci_cp->which) {
6320 u16 handle = __le16_to_cpu(hci_cp->handle);
6321 conn = hci_conn_hash_lookup_handle(hdev, handle);
6322 } else {
6323 conn = NULL;
6326 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6327 if (!cmd)
6328 goto unlock;
6330 cmd->cmd_complete(cmd, mgmt_status(status));
6331 mgmt_pending_remove(cmd);
6333 unlock:
6334 hci_dev_unlock(hdev);
6337 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6338 u16 len)
6340 struct mgmt_cp_get_clock_info *cp = data;
6341 struct mgmt_rp_get_clock_info rp;
6342 struct hci_cp_read_clock hci_cp;
6343 struct mgmt_pending_cmd *cmd;
6344 struct hci_request req;
6345 struct hci_conn *conn;
6346 int err;
6348 bt_dev_dbg(hdev, "sock %p", sk);
6350 memset(&rp, 0, sizeof(rp));
6351 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6352 rp.addr.type = cp->addr.type;
6354 if (cp->addr.type != BDADDR_BREDR)
6355 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6356 MGMT_STATUS_INVALID_PARAMS,
6357 &rp, sizeof(rp));
6359 hci_dev_lock(hdev);
6361 if (!hdev_is_powered(hdev)) {
6362 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6363 MGMT_STATUS_NOT_POWERED, &rp,
6364 sizeof(rp));
6365 goto unlock;
6368 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6369 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6370 &cp->addr.bdaddr);
6371 if (!conn || conn->state != BT_CONNECTED) {
6372 err = mgmt_cmd_complete(sk, hdev->id,
6373 MGMT_OP_GET_CLOCK_INFO,
6374 MGMT_STATUS_NOT_CONNECTED,
6375 &rp, sizeof(rp));
6376 goto unlock;
6378 } else {
6379 conn = NULL;
6382 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6383 if (!cmd) {
6384 err = -ENOMEM;
6385 goto unlock;
6388 cmd->cmd_complete = clock_info_cmd_complete;
6390 hci_req_init(&req, hdev);
6392 memset(&hci_cp, 0, sizeof(hci_cp));
6393 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6395 if (conn) {
6396 hci_conn_hold(conn);
6397 cmd->user_data = hci_conn_get(conn);
6399 hci_cp.handle = cpu_to_le16(conn->handle);
6400 hci_cp.which = 0x01; /* Piconet clock */
6401 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6404 err = hci_req_run(&req, get_clock_info_complete);
6405 if (err < 0)
6406 mgmt_pending_remove(cmd);
6408 unlock:
6409 hci_dev_unlock(hdev);
6410 return err;
6413 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6415 struct hci_conn *conn;
6417 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6418 if (!conn)
6419 return false;
6421 if (conn->dst_type != type)
6422 return false;
6424 if (conn->state != BT_CONNECTED)
6425 return false;
6427 return true;
6430 /* This function requires the caller holds hdev->lock */
6431 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6432 u8 addr_type, u8 auto_connect)
6434 struct hci_conn_params *params;
6436 params = hci_conn_params_add(hdev, addr, addr_type);
6437 if (!params)
6438 return -EIO;
6440 if (params->auto_connect == auto_connect)
6441 return 0;
6443 list_del_init(&params->action);
6445 switch (auto_connect) {
6446 case HCI_AUTO_CONN_DISABLED:
6447 case HCI_AUTO_CONN_LINK_LOSS:
6448 /* If auto connect is being disabled when we're trying to
6449 * connect to device, keep connecting.
6451 if (params->explicit_connect)
6452 list_add(&params->action, &hdev->pend_le_conns);
6453 break;
6454 case HCI_AUTO_CONN_REPORT:
6455 if (params->explicit_connect)
6456 list_add(&params->action, &hdev->pend_le_conns);
6457 else
6458 list_add(&params->action, &hdev->pend_le_reports);
6459 break;
6460 case HCI_AUTO_CONN_DIRECT:
6461 case HCI_AUTO_CONN_ALWAYS:
6462 if (!is_connected(hdev, addr, addr_type))
6463 list_add(&params->action, &hdev->pend_le_conns);
6464 break;
6467 params->auto_connect = auto_connect;
6469 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6470 addr, addr_type, auto_connect);
6472 return 0;
6475 static void device_added(struct sock *sk, struct hci_dev *hdev,
6476 bdaddr_t *bdaddr, u8 type, u8 action)
6478 struct mgmt_ev_device_added ev;
6480 bacpy(&ev.addr.bdaddr, bdaddr);
6481 ev.addr.type = type;
6482 ev.action = action;
6484 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6487 static int add_device(struct sock *sk, struct hci_dev *hdev,
6488 void *data, u16 len)
6490 struct mgmt_cp_add_device *cp = data;
6491 u8 auto_conn, addr_type;
6492 struct hci_conn_params *params;
6493 int err;
6494 u32 current_flags = 0;
6496 bt_dev_dbg(hdev, "sock %p", sk);
6498 if (!bdaddr_type_is_valid(cp->addr.type) ||
6499 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6500 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6501 MGMT_STATUS_INVALID_PARAMS,
6502 &cp->addr, sizeof(cp->addr));
6504 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6505 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6506 MGMT_STATUS_INVALID_PARAMS,
6507 &cp->addr, sizeof(cp->addr));
6509 hci_dev_lock(hdev);
6511 if (cp->addr.type == BDADDR_BREDR) {
6512 /* Only incoming connections action is supported for now */
6513 if (cp->action != 0x01) {
6514 err = mgmt_cmd_complete(sk, hdev->id,
6515 MGMT_OP_ADD_DEVICE,
6516 MGMT_STATUS_INVALID_PARAMS,
6517 &cp->addr, sizeof(cp->addr));
6518 goto unlock;
6521 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6522 &cp->addr.bdaddr,
6523 cp->addr.type, 0);
6524 if (err)
6525 goto unlock;
6527 hci_req_update_scan(hdev);
6529 goto added;
6532 addr_type = le_addr_type(cp->addr.type);
6534 if (cp->action == 0x02)
6535 auto_conn = HCI_AUTO_CONN_ALWAYS;
6536 else if (cp->action == 0x01)
6537 auto_conn = HCI_AUTO_CONN_DIRECT;
6538 else
6539 auto_conn = HCI_AUTO_CONN_REPORT;
6541 /* Kernel internally uses conn_params with resolvable private
6542 * address, but Add Device allows only identity addresses.
6543 * Make sure it is enforced before calling
6544 * hci_conn_params_lookup.
6546 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6547 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6548 MGMT_STATUS_INVALID_PARAMS,
6549 &cp->addr, sizeof(cp->addr));
6550 goto unlock;
6553 /* If the connection parameters don't exist for this device,
6554 * they will be created and configured with defaults.
6556 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6557 auto_conn) < 0) {
6558 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6559 MGMT_STATUS_FAILED, &cp->addr,
6560 sizeof(cp->addr));
6561 goto unlock;
6562 } else {
6563 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6564 addr_type);
6565 if (params)
6566 current_flags = params->current_flags;
6569 hci_update_background_scan(hdev);
6571 added:
6572 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6573 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6574 SUPPORTED_DEVICE_FLAGS(), current_flags);
6576 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6577 MGMT_STATUS_SUCCESS, &cp->addr,
6578 sizeof(cp->addr));
6580 unlock:
6581 hci_dev_unlock(hdev);
6582 return err;
6585 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6586 bdaddr_t *bdaddr, u8 type)
6588 struct mgmt_ev_device_removed ev;
6590 bacpy(&ev.addr.bdaddr, bdaddr);
6591 ev.addr.type = type;
6593 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6596 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6597 void *data, u16 len)
6599 struct mgmt_cp_remove_device *cp = data;
6600 int err;
6602 bt_dev_dbg(hdev, "sock %p", sk);
6604 hci_dev_lock(hdev);
6606 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6607 struct hci_conn_params *params;
6608 u8 addr_type;
6610 if (!bdaddr_type_is_valid(cp->addr.type)) {
6611 err = mgmt_cmd_complete(sk, hdev->id,
6612 MGMT_OP_REMOVE_DEVICE,
6613 MGMT_STATUS_INVALID_PARAMS,
6614 &cp->addr, sizeof(cp->addr));
6615 goto unlock;
6618 if (cp->addr.type == BDADDR_BREDR) {
6619 err = hci_bdaddr_list_del(&hdev->whitelist,
6620 &cp->addr.bdaddr,
6621 cp->addr.type);
6622 if (err) {
6623 err = mgmt_cmd_complete(sk, hdev->id,
6624 MGMT_OP_REMOVE_DEVICE,
6625 MGMT_STATUS_INVALID_PARAMS,
6626 &cp->addr,
6627 sizeof(cp->addr));
6628 goto unlock;
6631 hci_req_update_scan(hdev);
6633 device_removed(sk, hdev, &cp->addr.bdaddr,
6634 cp->addr.type);
6635 goto complete;
6638 addr_type = le_addr_type(cp->addr.type);
6640 /* Kernel internally uses conn_params with resolvable private
6641 * address, but Remove Device allows only identity addresses.
6642 * Make sure it is enforced before calling
6643 * hci_conn_params_lookup.
6645 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6646 err = mgmt_cmd_complete(sk, hdev->id,
6647 MGMT_OP_REMOVE_DEVICE,
6648 MGMT_STATUS_INVALID_PARAMS,
6649 &cp->addr, sizeof(cp->addr));
6650 goto unlock;
6653 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6654 addr_type);
6655 if (!params) {
6656 err = mgmt_cmd_complete(sk, hdev->id,
6657 MGMT_OP_REMOVE_DEVICE,
6658 MGMT_STATUS_INVALID_PARAMS,
6659 &cp->addr, sizeof(cp->addr));
6660 goto unlock;
6663 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6664 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6665 err = mgmt_cmd_complete(sk, hdev->id,
6666 MGMT_OP_REMOVE_DEVICE,
6667 MGMT_STATUS_INVALID_PARAMS,
6668 &cp->addr, sizeof(cp->addr));
6669 goto unlock;
6672 list_del(&params->action);
6673 list_del(&params->list);
6674 kfree(params);
6675 hci_update_background_scan(hdev);
6677 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6678 } else {
6679 struct hci_conn_params *p, *tmp;
6680 struct bdaddr_list *b, *btmp;
6682 if (cp->addr.type) {
6683 err = mgmt_cmd_complete(sk, hdev->id,
6684 MGMT_OP_REMOVE_DEVICE,
6685 MGMT_STATUS_INVALID_PARAMS,
6686 &cp->addr, sizeof(cp->addr));
6687 goto unlock;
6690 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6691 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6692 list_del(&b->list);
6693 kfree(b);
6696 hci_req_update_scan(hdev);
6698 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6699 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6700 continue;
6701 device_removed(sk, hdev, &p->addr, p->addr_type);
6702 if (p->explicit_connect) {
6703 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6704 continue;
6706 list_del(&p->action);
6707 list_del(&p->list);
6708 kfree(p);
6711 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6713 hci_update_background_scan(hdev);
6716 complete:
6717 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6718 MGMT_STATUS_SUCCESS, &cp->addr,
6719 sizeof(cp->addr));
6720 unlock:
6721 hci_dev_unlock(hdev);
6722 return err;
6725 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6726 u16 len)
6728 struct mgmt_cp_load_conn_param *cp = data;
6729 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6730 sizeof(struct mgmt_conn_param));
6731 u16 param_count, expected_len;
6732 int i;
6734 if (!lmp_le_capable(hdev))
6735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6736 MGMT_STATUS_NOT_SUPPORTED);
6738 param_count = __le16_to_cpu(cp->param_count);
6739 if (param_count > max_param_count) {
6740 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6741 param_count);
6742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6743 MGMT_STATUS_INVALID_PARAMS);
6746 expected_len = struct_size(cp, params, param_count);
6747 if (expected_len != len) {
6748 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6749 expected_len, len);
6750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6751 MGMT_STATUS_INVALID_PARAMS);
6754 bt_dev_dbg(hdev, "param_count %u", param_count);
6756 hci_dev_lock(hdev);
6758 hci_conn_params_clear_disabled(hdev);
6760 for (i = 0; i < param_count; i++) {
6761 struct mgmt_conn_param *param = &cp->params[i];
6762 struct hci_conn_params *hci_param;
6763 u16 min, max, latency, timeout;
6764 u8 addr_type;
6766 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6767 param->addr.type);
6769 if (param->addr.type == BDADDR_LE_PUBLIC) {
6770 addr_type = ADDR_LE_DEV_PUBLIC;
6771 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6772 addr_type = ADDR_LE_DEV_RANDOM;
6773 } else {
6774 bt_dev_err(hdev, "ignoring invalid connection parameters");
6775 continue;
6778 min = le16_to_cpu(param->min_interval);
6779 max = le16_to_cpu(param->max_interval);
6780 latency = le16_to_cpu(param->latency);
6781 timeout = le16_to_cpu(param->timeout);
6783 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6784 min, max, latency, timeout);
6786 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6787 bt_dev_err(hdev, "ignoring invalid connection parameters");
6788 continue;
6791 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6792 addr_type);
6793 if (!hci_param) {
6794 bt_dev_err(hdev, "failed to add connection parameters");
6795 continue;
6798 hci_param->conn_min_interval = min;
6799 hci_param->conn_max_interval = max;
6800 hci_param->conn_latency = latency;
6801 hci_param->supervision_timeout = timeout;
6804 hci_dev_unlock(hdev);
6806 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6807 NULL, 0);
6810 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6811 void *data, u16 len)
6813 struct mgmt_cp_set_external_config *cp = data;
6814 bool changed;
6815 int err;
6817 bt_dev_dbg(hdev, "sock %p", sk);
6819 if (hdev_is_powered(hdev))
6820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6821 MGMT_STATUS_REJECTED);
6823 if (cp->config != 0x00 && cp->config != 0x01)
6824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6825 MGMT_STATUS_INVALID_PARAMS);
6827 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6829 MGMT_STATUS_NOT_SUPPORTED);
6831 hci_dev_lock(hdev);
6833 if (cp->config)
6834 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6835 else
6836 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6838 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6839 if (err < 0)
6840 goto unlock;
6842 if (!changed)
6843 goto unlock;
6845 err = new_options(hdev, sk);
6847 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6848 mgmt_index_removed(hdev);
6850 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6851 hci_dev_set_flag(hdev, HCI_CONFIG);
6852 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6854 queue_work(hdev->req_workqueue, &hdev->power_on);
6855 } else {
6856 set_bit(HCI_RAW, &hdev->flags);
6857 mgmt_index_added(hdev);
6861 unlock:
6862 hci_dev_unlock(hdev);
6863 return err;
6866 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6867 void *data, u16 len)
6869 struct mgmt_cp_set_public_address *cp = data;
6870 bool changed;
6871 int err;
6873 bt_dev_dbg(hdev, "sock %p", sk);
6875 if (hdev_is_powered(hdev))
6876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6877 MGMT_STATUS_REJECTED);
6879 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6880 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6881 MGMT_STATUS_INVALID_PARAMS);
6883 if (!hdev->set_bdaddr)
6884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6885 MGMT_STATUS_NOT_SUPPORTED);
6887 hci_dev_lock(hdev);
6889 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6890 bacpy(&hdev->public_addr, &cp->bdaddr);
6892 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6893 if (err < 0)
6894 goto unlock;
6896 if (!changed)
6897 goto unlock;
6899 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6900 err = new_options(hdev, sk);
6902 if (is_configured(hdev)) {
6903 mgmt_index_removed(hdev);
6905 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6907 hci_dev_set_flag(hdev, HCI_CONFIG);
6908 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6910 queue_work(hdev->req_workqueue, &hdev->power_on);
6913 unlock:
6914 hci_dev_unlock(hdev);
6915 return err;
6918 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6919 u16 opcode, struct sk_buff *skb)
6921 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6922 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6923 u8 *h192, *r192, *h256, *r256;
6924 struct mgmt_pending_cmd *cmd;
6925 u16 eir_len;
6926 int err;
6928 bt_dev_dbg(hdev, "status %u", status);
6930 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6931 if (!cmd)
6932 return;
6934 mgmt_cp = cmd->param;
6936 if (status) {
6937 status = mgmt_status(status);
6938 eir_len = 0;
6940 h192 = NULL;
6941 r192 = NULL;
6942 h256 = NULL;
6943 r256 = NULL;
6944 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6945 struct hci_rp_read_local_oob_data *rp;
6947 if (skb->len != sizeof(*rp)) {
6948 status = MGMT_STATUS_FAILED;
6949 eir_len = 0;
6950 } else {
6951 status = MGMT_STATUS_SUCCESS;
6952 rp = (void *)skb->data;
6954 eir_len = 5 + 18 + 18;
6955 h192 = rp->hash;
6956 r192 = rp->rand;
6957 h256 = NULL;
6958 r256 = NULL;
6960 } else {
6961 struct hci_rp_read_local_oob_ext_data *rp;
6963 if (skb->len != sizeof(*rp)) {
6964 status = MGMT_STATUS_FAILED;
6965 eir_len = 0;
6966 } else {
6967 status = MGMT_STATUS_SUCCESS;
6968 rp = (void *)skb->data;
6970 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6971 eir_len = 5 + 18 + 18;
6972 h192 = NULL;
6973 r192 = NULL;
6974 } else {
6975 eir_len = 5 + 18 + 18 + 18 + 18;
6976 h192 = rp->hash192;
6977 r192 = rp->rand192;
6980 h256 = rp->hash256;
6981 r256 = rp->rand256;
6985 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6986 if (!mgmt_rp)
6987 goto done;
6989 if (status)
6990 goto send_rsp;
6992 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6993 hdev->dev_class, 3);
6995 if (h192 && r192) {
6996 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6997 EIR_SSP_HASH_C192, h192, 16);
6998 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6999 EIR_SSP_RAND_R192, r192, 16);
7002 if (h256 && r256) {
7003 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7004 EIR_SSP_HASH_C256, h256, 16);
7005 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7006 EIR_SSP_RAND_R256, r256, 16);
7009 send_rsp:
7010 mgmt_rp->type = mgmt_cp->type;
7011 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7013 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7014 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7015 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7016 if (err < 0 || status)
7017 goto done;
7019 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7021 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7022 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7023 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7024 done:
7025 kfree(mgmt_rp);
7026 mgmt_pending_remove(cmd);
7029 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7030 struct mgmt_cp_read_local_oob_ext_data *cp)
7032 struct mgmt_pending_cmd *cmd;
7033 struct hci_request req;
7034 int err;
7036 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7037 cp, sizeof(*cp));
7038 if (!cmd)
7039 return -ENOMEM;
7041 hci_req_init(&req, hdev);
7043 if (bredr_sc_enabled(hdev))
7044 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7045 else
7046 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7048 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7049 if (err < 0) {
7050 mgmt_pending_remove(cmd);
7051 return err;
7054 return 0;
7057 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7058 void *data, u16 data_len)
7060 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7061 struct mgmt_rp_read_local_oob_ext_data *rp;
7062 size_t rp_len;
7063 u16 eir_len;
7064 u8 status, flags, role, addr[7], hash[16], rand[16];
7065 int err;
7067 bt_dev_dbg(hdev, "sock %p", sk);
7069 if (hdev_is_powered(hdev)) {
7070 switch (cp->type) {
7071 case BIT(BDADDR_BREDR):
7072 status = mgmt_bredr_support(hdev);
7073 if (status)
7074 eir_len = 0;
7075 else
7076 eir_len = 5;
7077 break;
7078 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7079 status = mgmt_le_support(hdev);
7080 if (status)
7081 eir_len = 0;
7082 else
7083 eir_len = 9 + 3 + 18 + 18 + 3;
7084 break;
7085 default:
7086 status = MGMT_STATUS_INVALID_PARAMS;
7087 eir_len = 0;
7088 break;
7090 } else {
7091 status = MGMT_STATUS_NOT_POWERED;
7092 eir_len = 0;
7095 rp_len = sizeof(*rp) + eir_len;
7096 rp = kmalloc(rp_len, GFP_ATOMIC);
7097 if (!rp)
7098 return -ENOMEM;
7100 if (status)
7101 goto complete;
7103 hci_dev_lock(hdev);
7105 eir_len = 0;
7106 switch (cp->type) {
7107 case BIT(BDADDR_BREDR):
7108 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7109 err = read_local_ssp_oob_req(hdev, sk, cp);
7110 hci_dev_unlock(hdev);
7111 if (!err)
7112 goto done;
7114 status = MGMT_STATUS_FAILED;
7115 goto complete;
7116 } else {
7117 eir_len = eir_append_data(rp->eir, eir_len,
7118 EIR_CLASS_OF_DEV,
7119 hdev->dev_class, 3);
7121 break;
7122 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7123 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7124 smp_generate_oob(hdev, hash, rand) < 0) {
7125 hci_dev_unlock(hdev);
7126 status = MGMT_STATUS_FAILED;
7127 goto complete;
7130 /* This should return the active RPA, but since the RPA
7131 * is only programmed on demand, it is really hard to fill
7132 * this in at the moment. For now disallow retrieving
7133 * local out-of-band data when privacy is in use.
7135 * Returning the identity address will not help here since
7136 * pairing happens before the identity resolving key is
7137 * known and thus the connection establishment happens
7138 * based on the RPA and not the identity address.
7140 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7141 hci_dev_unlock(hdev);
7142 status = MGMT_STATUS_REJECTED;
7143 goto complete;
7146 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7147 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7148 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7149 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7150 memcpy(addr, &hdev->static_addr, 6);
7151 addr[6] = 0x01;
7152 } else {
7153 memcpy(addr, &hdev->bdaddr, 6);
7154 addr[6] = 0x00;
7157 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7158 addr, sizeof(addr));
7160 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7161 role = 0x02;
7162 else
7163 role = 0x01;
7165 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7166 &role, sizeof(role));
7168 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7169 eir_len = eir_append_data(rp->eir, eir_len,
7170 EIR_LE_SC_CONFIRM,
7171 hash, sizeof(hash));
7173 eir_len = eir_append_data(rp->eir, eir_len,
7174 EIR_LE_SC_RANDOM,
7175 rand, sizeof(rand));
7178 flags = mgmt_get_adv_discov_flags(hdev);
7180 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7181 flags |= LE_AD_NO_BREDR;
7183 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7184 &flags, sizeof(flags));
7185 break;
7188 hci_dev_unlock(hdev);
7190 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7192 status = MGMT_STATUS_SUCCESS;
7194 complete:
7195 rp->type = cp->type;
7196 rp->eir_len = cpu_to_le16(eir_len);
7198 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7199 status, rp, sizeof(*rp) + eir_len);
7200 if (err < 0 || status)
7201 goto done;
7203 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7204 rp, sizeof(*rp) + eir_len,
7205 HCI_MGMT_OOB_DATA_EVENTS, sk);
7207 done:
7208 kfree(rp);
7210 return err;
7213 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7215 u32 flags = 0;
7217 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7218 flags |= MGMT_ADV_FLAG_DISCOV;
7219 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7220 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7221 flags |= MGMT_ADV_FLAG_APPEARANCE;
7222 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7223 flags |= MGMT_ADV_PARAM_DURATION;
7224 flags |= MGMT_ADV_PARAM_TIMEOUT;
7225 flags |= MGMT_ADV_PARAM_INTERVALS;
7226 flags |= MGMT_ADV_PARAM_TX_POWER;
7228 /* In extended adv TX_POWER returned from Set Adv Param
7229 * will be always valid.
7231 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7232 ext_adv_capable(hdev))
7233 flags |= MGMT_ADV_FLAG_TX_POWER;
7235 if (ext_adv_capable(hdev)) {
7236 flags |= MGMT_ADV_FLAG_SEC_1M;
7237 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7238 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7240 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7241 flags |= MGMT_ADV_FLAG_SEC_2M;
7243 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7244 flags |= MGMT_ADV_FLAG_SEC_CODED;
7247 return flags;
7250 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7251 void *data, u16 data_len)
7253 struct mgmt_rp_read_adv_features *rp;
7254 size_t rp_len;
7255 int err;
7256 struct adv_info *adv_instance;
7257 u32 supported_flags;
7258 u8 *instance;
7260 bt_dev_dbg(hdev, "sock %p", sk);
7262 if (!lmp_le_capable(hdev))
7263 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7264 MGMT_STATUS_REJECTED);
7266 /* Enabling the experimental LL Privay support disables support for
7267 * advertising.
7269 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7271 MGMT_STATUS_NOT_SUPPORTED);
7273 hci_dev_lock(hdev);
7275 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7276 rp = kmalloc(rp_len, GFP_ATOMIC);
7277 if (!rp) {
7278 hci_dev_unlock(hdev);
7279 return -ENOMEM;
7282 supported_flags = get_supported_adv_flags(hdev);
7284 rp->supported_flags = cpu_to_le32(supported_flags);
7285 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7286 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7287 rp->max_instances = hdev->le_num_of_adv_sets;
7288 rp->num_instances = hdev->adv_instance_cnt;
7290 instance = rp->instance;
7291 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7292 *instance = adv_instance->instance;
7293 instance++;
7296 hci_dev_unlock(hdev);
7298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7299 MGMT_STATUS_SUCCESS, rp, rp_len);
7301 kfree(rp);
7303 return err;
7306 static u8 calculate_name_len(struct hci_dev *hdev)
7308 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7310 return append_local_name(hdev, buf, 0);
7313 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7314 bool is_adv_data)
7316 u8 max_len = HCI_MAX_AD_LENGTH;
7318 if (is_adv_data) {
7319 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7320 MGMT_ADV_FLAG_LIMITED_DISCOV |
7321 MGMT_ADV_FLAG_MANAGED_FLAGS))
7322 max_len -= 3;
7324 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7325 max_len -= 3;
7326 } else {
7327 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7328 max_len -= calculate_name_len(hdev);
7330 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7331 max_len -= 4;
7334 return max_len;
7337 static bool flags_managed(u32 adv_flags)
7339 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7340 MGMT_ADV_FLAG_LIMITED_DISCOV |
7341 MGMT_ADV_FLAG_MANAGED_FLAGS);
7344 static bool tx_power_managed(u32 adv_flags)
7346 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7349 static bool name_managed(u32 adv_flags)
7351 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7354 static bool appearance_managed(u32 adv_flags)
7356 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7359 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7360 u8 len, bool is_adv_data)
7362 int i, cur_len;
7363 u8 max_len;
7365 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7367 if (len > max_len)
7368 return false;
7370 /* Make sure that the data is correctly formatted. */
7371 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7372 cur_len = data[i];
7374 if (data[i + 1] == EIR_FLAGS &&
7375 (!is_adv_data || flags_managed(adv_flags)))
7376 return false;
7378 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7379 return false;
7381 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7382 return false;
7384 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7385 return false;
7387 if (data[i + 1] == EIR_APPEARANCE &&
7388 appearance_managed(adv_flags))
7389 return false;
7391 /* If the current field length would exceed the total data
7392 * length, then it's invalid.
7394 if (i + cur_len >= len)
7395 return false;
7398 return true;
7401 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7403 u32 supported_flags, phy_flags;
7405 /* The current implementation only supports a subset of the specified
7406 * flags. Also need to check mutual exclusiveness of sec flags.
7408 supported_flags = get_supported_adv_flags(hdev);
7409 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7410 if (adv_flags & ~supported_flags ||
7411 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7412 return false;
7414 return true;
7417 static bool adv_busy(struct hci_dev *hdev)
7419 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7420 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7421 pending_find(MGMT_OP_SET_LE, hdev) ||
7422 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7423 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7426 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7427 u16 opcode)
7429 struct mgmt_pending_cmd *cmd;
7430 struct mgmt_cp_add_advertising *cp;
7431 struct mgmt_rp_add_advertising rp;
7432 struct adv_info *adv_instance, *n;
7433 u8 instance;
7435 bt_dev_dbg(hdev, "status %d", status);
7437 hci_dev_lock(hdev);
7439 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7440 if (!cmd)
7441 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7443 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7444 if (!adv_instance->pending)
7445 continue;
7447 if (!status) {
7448 adv_instance->pending = false;
7449 continue;
7452 instance = adv_instance->instance;
7454 if (hdev->cur_adv_instance == instance)
7455 cancel_adv_timeout(hdev);
7457 hci_remove_adv_instance(hdev, instance);
7458 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7461 if (!cmd)
7462 goto unlock;
7464 cp = cmd->param;
7465 rp.instance = cp->instance;
7467 if (status)
7468 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7469 mgmt_status(status));
7470 else
7471 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7472 mgmt_status(status), &rp, sizeof(rp));
7474 mgmt_pending_remove(cmd);
7476 unlock:
7477 hci_dev_unlock(hdev);
7480 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7481 void *data, u16 data_len)
7483 struct mgmt_cp_add_advertising *cp = data;
7484 struct mgmt_rp_add_advertising rp;
7485 u32 flags;
7486 u8 status;
7487 u16 timeout, duration;
7488 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7489 u8 schedule_instance = 0;
7490 struct adv_info *next_instance;
7491 int err;
7492 struct mgmt_pending_cmd *cmd;
7493 struct hci_request req;
7495 bt_dev_dbg(hdev, "sock %p", sk);
7497 status = mgmt_le_support(hdev);
7498 if (status)
7499 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7500 status);
7502 /* Enabling the experimental LL Privay support disables support for
7503 * advertising.
7505 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7506 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7507 MGMT_STATUS_NOT_SUPPORTED);
7509 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7511 MGMT_STATUS_INVALID_PARAMS);
7513 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7515 MGMT_STATUS_INVALID_PARAMS);
7517 flags = __le32_to_cpu(cp->flags);
7518 timeout = __le16_to_cpu(cp->timeout);
7519 duration = __le16_to_cpu(cp->duration);
7521 if (!requested_adv_flags_are_valid(hdev, flags))
7522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7523 MGMT_STATUS_INVALID_PARAMS);
7525 hci_dev_lock(hdev);
7527 if (timeout && !hdev_is_powered(hdev)) {
7528 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7529 MGMT_STATUS_REJECTED);
7530 goto unlock;
7533 if (adv_busy(hdev)) {
7534 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7535 MGMT_STATUS_BUSY);
7536 goto unlock;
7539 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7540 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7541 cp->scan_rsp_len, false)) {
7542 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7543 MGMT_STATUS_INVALID_PARAMS);
7544 goto unlock;
7547 err = hci_add_adv_instance(hdev, cp->instance, flags,
7548 cp->adv_data_len, cp->data,
7549 cp->scan_rsp_len,
7550 cp->data + cp->adv_data_len,
7551 timeout, duration,
7552 HCI_ADV_TX_POWER_NO_PREFERENCE,
7553 hdev->le_adv_min_interval,
7554 hdev->le_adv_max_interval);
7555 if (err < 0) {
7556 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7557 MGMT_STATUS_FAILED);
7558 goto unlock;
7561 /* Only trigger an advertising added event if a new instance was
7562 * actually added.
7564 if (hdev->adv_instance_cnt > prev_instance_cnt)
7565 mgmt_advertising_added(sk, hdev, cp->instance);
7567 if (hdev->cur_adv_instance == cp->instance) {
7568 /* If the currently advertised instance is being changed then
7569 * cancel the current advertising and schedule the next
7570 * instance. If there is only one instance then the overridden
7571 * advertising data will be visible right away.
7573 cancel_adv_timeout(hdev);
7575 next_instance = hci_get_next_instance(hdev, cp->instance);
7576 if (next_instance)
7577 schedule_instance = next_instance->instance;
7578 } else if (!hdev->adv_instance_timeout) {
7579 /* Immediately advertise the new instance if no other
7580 * instance is currently being advertised.
7582 schedule_instance = cp->instance;
7585 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7586 * there is no instance to be advertised then we have no HCI
7587 * communication to make. Simply return.
7589 if (!hdev_is_powered(hdev) ||
7590 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7591 !schedule_instance) {
7592 rp.instance = cp->instance;
7593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7594 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7595 goto unlock;
7598 /* We're good to go, update advertising data, parameters, and start
7599 * advertising.
7601 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7602 data_len);
7603 if (!cmd) {
7604 err = -ENOMEM;
7605 goto unlock;
7608 hci_req_init(&req, hdev);
7610 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7612 if (!err)
7613 err = hci_req_run(&req, add_advertising_complete);
7615 if (err < 0) {
7616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7617 MGMT_STATUS_FAILED);
7618 mgmt_pending_remove(cmd);
7621 unlock:
7622 hci_dev_unlock(hdev);
7624 return err;
7627 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7628 u16 opcode)
7630 struct mgmt_pending_cmd *cmd;
7631 struct mgmt_cp_add_ext_adv_params *cp;
7632 struct mgmt_rp_add_ext_adv_params rp;
7633 struct adv_info *adv_instance;
7634 u32 flags;
7636 BT_DBG("%s", hdev->name);
7638 hci_dev_lock(hdev);
7640 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7641 if (!cmd)
7642 goto unlock;
7644 cp = cmd->param;
7645 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7646 if (!adv_instance)
7647 goto unlock;
7649 rp.instance = cp->instance;
7650 rp.tx_power = adv_instance->tx_power;
7652 /* While we're at it, inform userspace of the available space for this
7653 * advertisement, given the flags that will be used.
7655 flags = __le32_to_cpu(cp->flags);
7656 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7657 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7659 if (status) {
7660 /* If this advertisement was previously advertising and we
7661 * failed to update it, we signal that it has been removed and
7662 * delete its structure
7664 if (!adv_instance->pending)
7665 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7667 hci_remove_adv_instance(hdev, cp->instance);
7669 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7670 mgmt_status(status));
7672 } else {
7673 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7674 mgmt_status(status), &rp, sizeof(rp));
7677 unlock:
7678 if (cmd)
7679 mgmt_pending_remove(cmd);
7681 hci_dev_unlock(hdev);
7684 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7685 void *data, u16 data_len)
7687 struct mgmt_cp_add_ext_adv_params *cp = data;
7688 struct mgmt_rp_add_ext_adv_params rp;
7689 struct mgmt_pending_cmd *cmd = NULL;
7690 struct adv_info *adv_instance;
7691 struct hci_request req;
7692 u32 flags, min_interval, max_interval;
7693 u16 timeout, duration;
7694 u8 status;
7695 s8 tx_power;
7696 int err;
7698 BT_DBG("%s", hdev->name);
7700 status = mgmt_le_support(hdev);
7701 if (status)
7702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7703 status);
7705 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7706 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7707 MGMT_STATUS_INVALID_PARAMS);
7709 /* The purpose of breaking add_advertising into two separate MGMT calls
7710 * for params and data is to allow more parameters to be added to this
7711 * structure in the future. For this reason, we verify that we have the
7712 * bare minimum structure we know of when the interface was defined. Any
7713 * extra parameters we don't know about will be ignored in this request.
7715 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7717 MGMT_STATUS_INVALID_PARAMS);
7719 flags = __le32_to_cpu(cp->flags);
7721 if (!requested_adv_flags_are_valid(hdev, flags))
7722 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7723 MGMT_STATUS_INVALID_PARAMS);
7725 hci_dev_lock(hdev);
7727 /* In new interface, we require that we are powered to register */
7728 if (!hdev_is_powered(hdev)) {
7729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7730 MGMT_STATUS_REJECTED);
7731 goto unlock;
7734 if (adv_busy(hdev)) {
7735 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7736 MGMT_STATUS_BUSY);
7737 goto unlock;
7740 /* Parse defined parameters from request, use defaults otherwise */
7741 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7742 __le16_to_cpu(cp->timeout) : 0;
7744 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7745 __le16_to_cpu(cp->duration) :
7746 hdev->def_multi_adv_rotation_duration;
7748 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7749 __le32_to_cpu(cp->min_interval) :
7750 hdev->le_adv_min_interval;
7752 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7753 __le32_to_cpu(cp->max_interval) :
7754 hdev->le_adv_max_interval;
7756 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7757 cp->tx_power :
7758 HCI_ADV_TX_POWER_NO_PREFERENCE;
7760 /* Create advertising instance with no advertising or response data */
7761 err = hci_add_adv_instance(hdev, cp->instance, flags,
7762 0, NULL, 0, NULL, timeout, duration,
7763 tx_power, min_interval, max_interval);
7765 if (err < 0) {
7766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7767 MGMT_STATUS_FAILED);
7768 goto unlock;
7771 hdev->cur_adv_instance = cp->instance;
7772 /* Submit request for advertising params if ext adv available */
7773 if (ext_adv_capable(hdev)) {
7774 hci_req_init(&req, hdev);
7775 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7777 /* Updating parameters of an active instance will return a
7778 * Command Disallowed error, so we must first disable the
7779 * instance if it is active.
7781 if (!adv_instance->pending)
7782 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7784 __hci_req_setup_ext_adv_instance(&req, cp->instance);
7786 err = hci_req_run(&req, add_ext_adv_params_complete);
7788 if (!err)
7789 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
7790 hdev, data, data_len);
7791 if (!cmd) {
7792 err = -ENOMEM;
7793 hci_remove_adv_instance(hdev, cp->instance);
7794 goto unlock;
7797 } else {
7798 rp.instance = cp->instance;
7799 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
7800 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7801 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7802 err = mgmt_cmd_complete(sk, hdev->id,
7803 MGMT_OP_ADD_EXT_ADV_PARAMS,
7804 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7807 unlock:
7808 hci_dev_unlock(hdev);
7810 return err;
7813 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
7814 u16 data_len)
7816 struct mgmt_cp_add_ext_adv_data *cp = data;
7817 struct mgmt_rp_add_ext_adv_data rp;
7818 u8 schedule_instance = 0;
7819 struct adv_info *next_instance;
7820 struct adv_info *adv_instance;
7821 int err = 0;
7822 struct mgmt_pending_cmd *cmd;
7823 struct hci_request req;
7825 BT_DBG("%s", hdev->name);
7827 hci_dev_lock(hdev);
7829 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7831 if (!adv_instance) {
7832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7833 MGMT_STATUS_INVALID_PARAMS);
7834 goto unlock;
7837 /* In new interface, we require that we are powered to register */
7838 if (!hdev_is_powered(hdev)) {
7839 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7840 MGMT_STATUS_REJECTED);
7841 goto clear_new_instance;
7844 if (adv_busy(hdev)) {
7845 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7846 MGMT_STATUS_BUSY);
7847 goto clear_new_instance;
7850 /* Validate new data */
7851 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
7852 cp->adv_data_len, true) ||
7853 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
7854 cp->adv_data_len, cp->scan_rsp_len, false)) {
7855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7856 MGMT_STATUS_INVALID_PARAMS);
7857 goto clear_new_instance;
7860 /* Set the data in the advertising instance */
7861 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
7862 cp->data, cp->scan_rsp_len,
7863 cp->data + cp->adv_data_len);
7865 /* We're good to go, update advertising data, parameters, and start
7866 * advertising.
7869 hci_req_init(&req, hdev);
7871 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
7873 if (ext_adv_capable(hdev)) {
7874 __hci_req_update_adv_data(&req, cp->instance);
7875 __hci_req_update_scan_rsp_data(&req, cp->instance);
7876 __hci_req_enable_ext_advertising(&req, cp->instance);
7878 } else {
7879 /* If using software rotation, determine next instance to use */
7881 if (hdev->cur_adv_instance == cp->instance) {
7882 /* If the currently advertised instance is being changed
7883 * then cancel the current advertising and schedule the
7884 * next instance. If there is only one instance then the
7885 * overridden advertising data will be visible right
7886 * away
7888 cancel_adv_timeout(hdev);
7890 next_instance = hci_get_next_instance(hdev,
7891 cp->instance);
7892 if (next_instance)
7893 schedule_instance = next_instance->instance;
7894 } else if (!hdev->adv_instance_timeout) {
7895 /* Immediately advertise the new instance if no other
7896 * instance is currently being advertised.
7898 schedule_instance = cp->instance;
7901 /* If the HCI_ADVERTISING flag is set or there is no instance to
7902 * be advertised then we have no HCI communication to make.
7903 * Simply return.
7905 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7906 !schedule_instance) {
7907 if (adv_instance->pending) {
7908 mgmt_advertising_added(sk, hdev, cp->instance);
7909 adv_instance->pending = false;
7911 rp.instance = cp->instance;
7912 err = mgmt_cmd_complete(sk, hdev->id,
7913 MGMT_OP_ADD_EXT_ADV_DATA,
7914 MGMT_STATUS_SUCCESS, &rp,
7915 sizeof(rp));
7916 goto unlock;
7919 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
7920 true);
7923 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
7924 data_len);
7925 if (!cmd) {
7926 err = -ENOMEM;
7927 goto clear_new_instance;
7930 if (!err)
7931 err = hci_req_run(&req, add_advertising_complete);
7933 if (err < 0) {
7934 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7935 MGMT_STATUS_FAILED);
7936 mgmt_pending_remove(cmd);
7937 goto clear_new_instance;
7940 /* We were successful in updating data, so trigger advertising_added
7941 * event if this is an instance that wasn't previously advertising. If
7942 * a failure occurs in the requests we initiated, we will remove the
7943 * instance again in add_advertising_complete
7945 if (adv_instance->pending)
7946 mgmt_advertising_added(sk, hdev, cp->instance);
7948 goto unlock;
7950 clear_new_instance:
7951 hci_remove_adv_instance(hdev, cp->instance);
7953 unlock:
7954 hci_dev_unlock(hdev);
7956 return err;
7959 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7960 u16 opcode)
7962 struct mgmt_pending_cmd *cmd;
7963 struct mgmt_cp_remove_advertising *cp;
7964 struct mgmt_rp_remove_advertising rp;
7966 bt_dev_dbg(hdev, "status %d", status);
7968 hci_dev_lock(hdev);
7970 /* A failure status here only means that we failed to disable
7971 * advertising. Otherwise, the advertising instance has been removed,
7972 * so report success.
7974 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7975 if (!cmd)
7976 goto unlock;
7978 cp = cmd->param;
7979 rp.instance = cp->instance;
7981 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7982 &rp, sizeof(rp));
7983 mgmt_pending_remove(cmd);
7985 unlock:
7986 hci_dev_unlock(hdev);
7989 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7990 void *data, u16 data_len)
7992 struct mgmt_cp_remove_advertising *cp = data;
7993 struct mgmt_rp_remove_advertising rp;
7994 struct mgmt_pending_cmd *cmd;
7995 struct hci_request req;
7996 int err;
7998 bt_dev_dbg(hdev, "sock %p", sk);
8000 /* Enabling the experimental LL Privay support disables support for
8001 * advertising.
8003 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8005 MGMT_STATUS_NOT_SUPPORTED);
8007 hci_dev_lock(hdev);
8009 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8010 err = mgmt_cmd_status(sk, hdev->id,
8011 MGMT_OP_REMOVE_ADVERTISING,
8012 MGMT_STATUS_INVALID_PARAMS);
8013 goto unlock;
8016 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8017 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8018 pending_find(MGMT_OP_SET_LE, hdev)) {
8019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8020 MGMT_STATUS_BUSY);
8021 goto unlock;
8024 if (list_empty(&hdev->adv_instances)) {
8025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8026 MGMT_STATUS_INVALID_PARAMS);
8027 goto unlock;
8030 hci_req_init(&req, hdev);
8032 /* If we use extended advertising, instance is disabled and removed */
8033 if (ext_adv_capable(hdev)) {
8034 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8035 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8038 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8040 if (list_empty(&hdev->adv_instances))
8041 __hci_req_disable_advertising(&req);
8043 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8044 * flag is set or the device isn't powered then we have no HCI
8045 * communication to make. Simply return.
8047 if (skb_queue_empty(&req.cmd_q) ||
8048 !hdev_is_powered(hdev) ||
8049 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8050 hci_req_purge(&req);
8051 rp.instance = cp->instance;
8052 err = mgmt_cmd_complete(sk, hdev->id,
8053 MGMT_OP_REMOVE_ADVERTISING,
8054 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8055 goto unlock;
8058 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8059 data_len);
8060 if (!cmd) {
8061 err = -ENOMEM;
8062 goto unlock;
8065 err = hci_req_run(&req, remove_advertising_complete);
8066 if (err < 0)
8067 mgmt_pending_remove(cmd);
8069 unlock:
8070 hci_dev_unlock(hdev);
8072 return err;
8075 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8076 void *data, u16 data_len)
8078 struct mgmt_cp_get_adv_size_info *cp = data;
8079 struct mgmt_rp_get_adv_size_info rp;
8080 u32 flags, supported_flags;
8081 int err;
8083 bt_dev_dbg(hdev, "sock %p", sk);
8085 if (!lmp_le_capable(hdev))
8086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8087 MGMT_STATUS_REJECTED);
8089 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8090 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8091 MGMT_STATUS_INVALID_PARAMS);
8093 flags = __le32_to_cpu(cp->flags);
8095 /* The current implementation only supports a subset of the specified
8096 * flags.
8098 supported_flags = get_supported_adv_flags(hdev);
8099 if (flags & ~supported_flags)
8100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8101 MGMT_STATUS_INVALID_PARAMS);
8103 rp.instance = cp->instance;
8104 rp.flags = cp->flags;
8105 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8106 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8108 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8109 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8111 return err;
8114 static const struct hci_mgmt_handler mgmt_handlers[] = {
8115 { NULL }, /* 0x0000 (no command) */
8116 { read_version, MGMT_READ_VERSION_SIZE,
8117 HCI_MGMT_NO_HDEV |
8118 HCI_MGMT_UNTRUSTED },
8119 { read_commands, MGMT_READ_COMMANDS_SIZE,
8120 HCI_MGMT_NO_HDEV |
8121 HCI_MGMT_UNTRUSTED },
8122 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8123 HCI_MGMT_NO_HDEV |
8124 HCI_MGMT_UNTRUSTED },
8125 { read_controller_info, MGMT_READ_INFO_SIZE,
8126 HCI_MGMT_UNTRUSTED },
8127 { set_powered, MGMT_SETTING_SIZE },
8128 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8129 { set_connectable, MGMT_SETTING_SIZE },
8130 { set_fast_connectable, MGMT_SETTING_SIZE },
8131 { set_bondable, MGMT_SETTING_SIZE },
8132 { set_link_security, MGMT_SETTING_SIZE },
8133 { set_ssp, MGMT_SETTING_SIZE },
8134 { set_hs, MGMT_SETTING_SIZE },
8135 { set_le, MGMT_SETTING_SIZE },
8136 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8137 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8138 { add_uuid, MGMT_ADD_UUID_SIZE },
8139 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8140 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8141 HCI_MGMT_VAR_LEN },
8142 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8143 HCI_MGMT_VAR_LEN },
8144 { disconnect, MGMT_DISCONNECT_SIZE },
8145 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8146 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8147 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8148 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8149 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8150 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8151 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8152 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8153 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8154 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8155 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8156 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8157 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8158 HCI_MGMT_VAR_LEN },
8159 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8160 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8161 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8162 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8163 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8164 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8165 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8166 { set_advertising, MGMT_SETTING_SIZE },
8167 { set_bredr, MGMT_SETTING_SIZE },
8168 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8169 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8170 { set_secure_conn, MGMT_SETTING_SIZE },
8171 { set_debug_keys, MGMT_SETTING_SIZE },
8172 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8173 { load_irks, MGMT_LOAD_IRKS_SIZE,
8174 HCI_MGMT_VAR_LEN },
8175 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8176 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8177 { add_device, MGMT_ADD_DEVICE_SIZE },
8178 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8179 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8180 HCI_MGMT_VAR_LEN },
8181 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8182 HCI_MGMT_NO_HDEV |
8183 HCI_MGMT_UNTRUSTED },
8184 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8185 HCI_MGMT_UNCONFIGURED |
8186 HCI_MGMT_UNTRUSTED },
8187 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8188 HCI_MGMT_UNCONFIGURED },
8189 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8190 HCI_MGMT_UNCONFIGURED },
8191 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8192 HCI_MGMT_VAR_LEN },
8193 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8194 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8195 HCI_MGMT_NO_HDEV |
8196 HCI_MGMT_UNTRUSTED },
8197 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8198 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8199 HCI_MGMT_VAR_LEN },
8200 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8201 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8202 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8203 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8204 HCI_MGMT_UNTRUSTED },
8205 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8206 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8207 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8208 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8209 HCI_MGMT_VAR_LEN },
8210 { set_wideband_speech, MGMT_SETTING_SIZE },
8211 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8212 HCI_MGMT_UNTRUSTED },
8213 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8214 HCI_MGMT_UNTRUSTED |
8215 HCI_MGMT_HDEV_OPTIONAL },
8216 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8217 HCI_MGMT_VAR_LEN |
8218 HCI_MGMT_HDEV_OPTIONAL },
8219 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8220 HCI_MGMT_UNTRUSTED },
8221 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8222 HCI_MGMT_VAR_LEN },
8223 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8224 HCI_MGMT_UNTRUSTED },
8225 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8226 HCI_MGMT_VAR_LEN },
8227 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8228 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8229 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8230 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8231 HCI_MGMT_VAR_LEN },
8232 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8233 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8234 HCI_MGMT_VAR_LEN },
8235 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8236 HCI_MGMT_VAR_LEN },
8239 void mgmt_index_added(struct hci_dev *hdev)
8241 struct mgmt_ev_ext_index ev;
8243 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8244 return;
8246 switch (hdev->dev_type) {
8247 case HCI_PRIMARY:
8248 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8249 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8250 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8251 ev.type = 0x01;
8252 } else {
8253 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8254 HCI_MGMT_INDEX_EVENTS);
8255 ev.type = 0x00;
8257 break;
8258 case HCI_AMP:
8259 ev.type = 0x02;
8260 break;
8261 default:
8262 return;
8265 ev.bus = hdev->bus;
8267 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8268 HCI_MGMT_EXT_INDEX_EVENTS);
8271 void mgmt_index_removed(struct hci_dev *hdev)
8273 struct mgmt_ev_ext_index ev;
8274 u8 status = MGMT_STATUS_INVALID_INDEX;
8276 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8277 return;
8279 switch (hdev->dev_type) {
8280 case HCI_PRIMARY:
8281 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8283 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8284 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8285 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8286 ev.type = 0x01;
8287 } else {
8288 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8289 HCI_MGMT_INDEX_EVENTS);
8290 ev.type = 0x00;
8292 break;
8293 case HCI_AMP:
8294 ev.type = 0x02;
8295 break;
8296 default:
8297 return;
8300 ev.bus = hdev->bus;
8302 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8303 HCI_MGMT_EXT_INDEX_EVENTS);
8306 /* This function requires the caller holds hdev->lock */
8307 static void restart_le_actions(struct hci_dev *hdev)
8309 struct hci_conn_params *p;
8311 list_for_each_entry(p, &hdev->le_conn_params, list) {
8312 /* Needed for AUTO_OFF case where might not "really"
8313 * have been powered off.
8315 list_del_init(&p->action);
8317 switch (p->auto_connect) {
8318 case HCI_AUTO_CONN_DIRECT:
8319 case HCI_AUTO_CONN_ALWAYS:
8320 list_add(&p->action, &hdev->pend_le_conns);
8321 break;
8322 case HCI_AUTO_CONN_REPORT:
8323 list_add(&p->action, &hdev->pend_le_reports);
8324 break;
8325 default:
8326 break;
8331 void mgmt_power_on(struct hci_dev *hdev, int err)
8333 struct cmd_lookup match = { NULL, hdev };
8335 bt_dev_dbg(hdev, "err %d", err);
8337 hci_dev_lock(hdev);
8339 if (!err) {
8340 restart_le_actions(hdev);
8341 hci_update_background_scan(hdev);
8344 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8346 new_settings(hdev, match.sk);
8348 if (match.sk)
8349 sock_put(match.sk);
8351 hci_dev_unlock(hdev);
8354 void __mgmt_power_off(struct hci_dev *hdev)
8356 struct cmd_lookup match = { NULL, hdev };
8357 u8 status, zero_cod[] = { 0, 0, 0 };
8359 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8361 /* If the power off is because of hdev unregistration let
8362 * use the appropriate INVALID_INDEX status. Otherwise use
8363 * NOT_POWERED. We cover both scenarios here since later in
8364 * mgmt_index_removed() any hci_conn callbacks will have already
8365 * been triggered, potentially causing misleading DISCONNECTED
8366 * status responses.
8368 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8369 status = MGMT_STATUS_INVALID_INDEX;
8370 else
8371 status = MGMT_STATUS_NOT_POWERED;
8373 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8375 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8376 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8377 zero_cod, sizeof(zero_cod),
8378 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8379 ext_info_changed(hdev, NULL);
8382 new_settings(hdev, match.sk);
8384 if (match.sk)
8385 sock_put(match.sk);
8388 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8390 struct mgmt_pending_cmd *cmd;
8391 u8 status;
8393 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8394 if (!cmd)
8395 return;
8397 if (err == -ERFKILL)
8398 status = MGMT_STATUS_RFKILLED;
8399 else
8400 status = MGMT_STATUS_FAILED;
8402 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8404 mgmt_pending_remove(cmd);
8407 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8408 bool persistent)
8410 struct mgmt_ev_new_link_key ev;
8412 memset(&ev, 0, sizeof(ev));
8414 ev.store_hint = persistent;
8415 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8416 ev.key.addr.type = BDADDR_BREDR;
8417 ev.key.type = key->type;
8418 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8419 ev.key.pin_len = key->pin_len;
8421 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8424 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8426 switch (ltk->type) {
8427 case SMP_LTK:
8428 case SMP_LTK_SLAVE:
8429 if (ltk->authenticated)
8430 return MGMT_LTK_AUTHENTICATED;
8431 return MGMT_LTK_UNAUTHENTICATED;
8432 case SMP_LTK_P256:
8433 if (ltk->authenticated)
8434 return MGMT_LTK_P256_AUTH;
8435 return MGMT_LTK_P256_UNAUTH;
8436 case SMP_LTK_P256_DEBUG:
8437 return MGMT_LTK_P256_DEBUG;
8440 return MGMT_LTK_UNAUTHENTICATED;
8443 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8445 struct mgmt_ev_new_long_term_key ev;
8447 memset(&ev, 0, sizeof(ev));
8449 /* Devices using resolvable or non-resolvable random addresses
8450 * without providing an identity resolving key don't require
8451 * to store long term keys. Their addresses will change the
8452 * next time around.
8454 * Only when a remote device provides an identity address
8455 * make sure the long term key is stored. If the remote
8456 * identity is known, the long term keys are internally
8457 * mapped to the identity address. So allow static random
8458 * and public addresses here.
8460 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8461 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8462 ev.store_hint = 0x00;
8463 else
8464 ev.store_hint = persistent;
8466 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8467 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8468 ev.key.type = mgmt_ltk_type(key);
8469 ev.key.enc_size = key->enc_size;
8470 ev.key.ediv = key->ediv;
8471 ev.key.rand = key->rand;
8473 if (key->type == SMP_LTK)
8474 ev.key.master = 1;
8476 /* Make sure we copy only the significant bytes based on the
8477 * encryption key size, and set the rest of the value to zeroes.
8479 memcpy(ev.key.val, key->val, key->enc_size);
8480 memset(ev.key.val + key->enc_size, 0,
8481 sizeof(ev.key.val) - key->enc_size);
8483 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8486 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8488 struct mgmt_ev_new_irk ev;
8490 memset(&ev, 0, sizeof(ev));
8492 ev.store_hint = persistent;
8494 bacpy(&ev.rpa, &irk->rpa);
8495 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8496 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8497 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8499 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8502 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8503 bool persistent)
8505 struct mgmt_ev_new_csrk ev;
8507 memset(&ev, 0, sizeof(ev));
8509 /* Devices using resolvable or non-resolvable random addresses
8510 * without providing an identity resolving key don't require
8511 * to store signature resolving keys. Their addresses will change
8512 * the next time around.
8514 * Only when a remote device provides an identity address
8515 * make sure the signature resolving key is stored. So allow
8516 * static random and public addresses here.
8518 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8519 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8520 ev.store_hint = 0x00;
8521 else
8522 ev.store_hint = persistent;
8524 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8525 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8526 ev.key.type = csrk->type;
8527 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8529 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8532 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8533 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8534 u16 max_interval, u16 latency, u16 timeout)
8536 struct mgmt_ev_new_conn_param ev;
8538 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8539 return;
8541 memset(&ev, 0, sizeof(ev));
8542 bacpy(&ev.addr.bdaddr, bdaddr);
8543 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8544 ev.store_hint = store_hint;
8545 ev.min_interval = cpu_to_le16(min_interval);
8546 ev.max_interval = cpu_to_le16(max_interval);
8547 ev.latency = cpu_to_le16(latency);
8548 ev.timeout = cpu_to_le16(timeout);
8550 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8553 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8554 u32 flags, u8 *name, u8 name_len)
8556 char buf[512];
8557 struct mgmt_ev_device_connected *ev = (void *) buf;
8558 u16 eir_len = 0;
8560 bacpy(&ev->addr.bdaddr, &conn->dst);
8561 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8563 ev->flags = __cpu_to_le32(flags);
8565 /* We must ensure that the EIR Data fields are ordered and
8566 * unique. Keep it simple for now and avoid the problem by not
8567 * adding any BR/EDR data to the LE adv.
8569 if (conn->le_adv_data_len > 0) {
8570 memcpy(&ev->eir[eir_len],
8571 conn->le_adv_data, conn->le_adv_data_len);
8572 eir_len = conn->le_adv_data_len;
8573 } else {
8574 if (name_len > 0)
8575 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8576 name, name_len);
8578 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8579 eir_len = eir_append_data(ev->eir, eir_len,
8580 EIR_CLASS_OF_DEV,
8581 conn->dev_class, 3);
8584 ev->eir_len = cpu_to_le16(eir_len);
8586 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8587 sizeof(*ev) + eir_len, NULL);
8590 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8592 struct sock **sk = data;
8594 cmd->cmd_complete(cmd, 0);
8596 *sk = cmd->sk;
8597 sock_hold(*sk);
8599 mgmt_pending_remove(cmd);
8602 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8604 struct hci_dev *hdev = data;
8605 struct mgmt_cp_unpair_device *cp = cmd->param;
8607 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8609 cmd->cmd_complete(cmd, 0);
8610 mgmt_pending_remove(cmd);
8613 bool mgmt_powering_down(struct hci_dev *hdev)
8615 struct mgmt_pending_cmd *cmd;
8616 struct mgmt_mode *cp;
8618 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8619 if (!cmd)
8620 return false;
8622 cp = cmd->param;
8623 if (!cp->val)
8624 return true;
8626 return false;
8629 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8630 u8 link_type, u8 addr_type, u8 reason,
8631 bool mgmt_connected)
8633 struct mgmt_ev_device_disconnected ev;
8634 struct sock *sk = NULL;
8636 /* The connection is still in hci_conn_hash so test for 1
8637 * instead of 0 to know if this is the last one.
8639 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8640 cancel_delayed_work(&hdev->power_off);
8641 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8644 if (!mgmt_connected)
8645 return;
8647 if (link_type != ACL_LINK && link_type != LE_LINK)
8648 return;
8650 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8652 bacpy(&ev.addr.bdaddr, bdaddr);
8653 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8654 ev.reason = reason;
8656 /* Report disconnects due to suspend */
8657 if (hdev->suspended)
8658 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8660 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8662 if (sk)
8663 sock_put(sk);
8665 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8666 hdev);
8669 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8670 u8 link_type, u8 addr_type, u8 status)
8672 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8673 struct mgmt_cp_disconnect *cp;
8674 struct mgmt_pending_cmd *cmd;
8676 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8677 hdev);
8679 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8680 if (!cmd)
8681 return;
8683 cp = cmd->param;
8685 if (bacmp(bdaddr, &cp->addr.bdaddr))
8686 return;
8688 if (cp->addr.type != bdaddr_type)
8689 return;
8691 cmd->cmd_complete(cmd, mgmt_status(status));
8692 mgmt_pending_remove(cmd);
8695 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8696 u8 addr_type, u8 status)
8698 struct mgmt_ev_connect_failed ev;
8700 /* The connection is still in hci_conn_hash so test for 1
8701 * instead of 0 to know if this is the last one.
8703 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8704 cancel_delayed_work(&hdev->power_off);
8705 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8708 bacpy(&ev.addr.bdaddr, bdaddr);
8709 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8710 ev.status = mgmt_status(status);
8712 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8715 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8717 struct mgmt_ev_pin_code_request ev;
8719 bacpy(&ev.addr.bdaddr, bdaddr);
8720 ev.addr.type = BDADDR_BREDR;
8721 ev.secure = secure;
8723 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8726 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8727 u8 status)
8729 struct mgmt_pending_cmd *cmd;
8731 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8732 if (!cmd)
8733 return;
8735 cmd->cmd_complete(cmd, mgmt_status(status));
8736 mgmt_pending_remove(cmd);
8739 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8740 u8 status)
8742 struct mgmt_pending_cmd *cmd;
8744 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8745 if (!cmd)
8746 return;
8748 cmd->cmd_complete(cmd, mgmt_status(status));
8749 mgmt_pending_remove(cmd);
8752 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8753 u8 link_type, u8 addr_type, u32 value,
8754 u8 confirm_hint)
8756 struct mgmt_ev_user_confirm_request ev;
8758 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8760 bacpy(&ev.addr.bdaddr, bdaddr);
8761 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8762 ev.confirm_hint = confirm_hint;
8763 ev.value = cpu_to_le32(value);
8765 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8766 NULL);
8769 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8770 u8 link_type, u8 addr_type)
8772 struct mgmt_ev_user_passkey_request ev;
8774 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8776 bacpy(&ev.addr.bdaddr, bdaddr);
8777 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8779 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8780 NULL);
8783 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8784 u8 link_type, u8 addr_type, u8 status,
8785 u8 opcode)
8787 struct mgmt_pending_cmd *cmd;
8789 cmd = pending_find(opcode, hdev);
8790 if (!cmd)
8791 return -ENOENT;
8793 cmd->cmd_complete(cmd, mgmt_status(status));
8794 mgmt_pending_remove(cmd);
8796 return 0;
8799 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8800 u8 link_type, u8 addr_type, u8 status)
8802 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8803 status, MGMT_OP_USER_CONFIRM_REPLY);
8806 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8807 u8 link_type, u8 addr_type, u8 status)
8809 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8810 status,
8811 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8814 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8815 u8 link_type, u8 addr_type, u8 status)
8817 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8818 status, MGMT_OP_USER_PASSKEY_REPLY);
8821 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8822 u8 link_type, u8 addr_type, u8 status)
8824 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8825 status,
8826 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8829 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8830 u8 link_type, u8 addr_type, u32 passkey,
8831 u8 entered)
8833 struct mgmt_ev_passkey_notify ev;
8835 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8837 bacpy(&ev.addr.bdaddr, bdaddr);
8838 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8839 ev.passkey = __cpu_to_le32(passkey);
8840 ev.entered = entered;
8842 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8845 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8847 struct mgmt_ev_auth_failed ev;
8848 struct mgmt_pending_cmd *cmd;
8849 u8 status = mgmt_status(hci_status);
8851 bacpy(&ev.addr.bdaddr, &conn->dst);
8852 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8853 ev.status = status;
8855 cmd = find_pairing(conn);
8857 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8858 cmd ? cmd->sk : NULL);
8860 if (cmd) {
8861 cmd->cmd_complete(cmd, status);
8862 mgmt_pending_remove(cmd);
8866 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8868 struct cmd_lookup match = { NULL, hdev };
8869 bool changed;
8871 if (status) {
8872 u8 mgmt_err = mgmt_status(status);
8873 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8874 cmd_status_rsp, &mgmt_err);
8875 return;
8878 if (test_bit(HCI_AUTH, &hdev->flags))
8879 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8880 else
8881 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8883 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8884 &match);
8886 if (changed)
8887 new_settings(hdev, match.sk);
8889 if (match.sk)
8890 sock_put(match.sk);
8893 static void clear_eir(struct hci_request *req)
8895 struct hci_dev *hdev = req->hdev;
8896 struct hci_cp_write_eir cp;
8898 if (!lmp_ext_inq_capable(hdev))
8899 return;
8901 memset(hdev->eir, 0, sizeof(hdev->eir));
8903 memset(&cp, 0, sizeof(cp));
8905 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8908 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8910 struct cmd_lookup match = { NULL, hdev };
8911 struct hci_request req;
8912 bool changed = false;
8914 if (status) {
8915 u8 mgmt_err = mgmt_status(status);
8917 if (enable && hci_dev_test_and_clear_flag(hdev,
8918 HCI_SSP_ENABLED)) {
8919 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8920 new_settings(hdev, NULL);
8923 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8924 &mgmt_err);
8925 return;
8928 if (enable) {
8929 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8930 } else {
8931 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8932 if (!changed)
8933 changed = hci_dev_test_and_clear_flag(hdev,
8934 HCI_HS_ENABLED);
8935 else
8936 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8939 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8941 if (changed)
8942 new_settings(hdev, match.sk);
8944 if (match.sk)
8945 sock_put(match.sk);
8947 hci_req_init(&req, hdev);
8949 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8950 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8951 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8952 sizeof(enable), &enable);
8953 __hci_req_update_eir(&req);
8954 } else {
8955 clear_eir(&req);
8958 hci_req_run(&req, NULL);
8961 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8963 struct cmd_lookup *match = data;
8965 if (match->sk == NULL) {
8966 match->sk = cmd->sk;
8967 sock_hold(match->sk);
8971 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8972 u8 status)
8974 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8976 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8977 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8978 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8980 if (!status) {
8981 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8982 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8983 ext_info_changed(hdev, NULL);
8986 if (match.sk)
8987 sock_put(match.sk);
8990 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8992 struct mgmt_cp_set_local_name ev;
8993 struct mgmt_pending_cmd *cmd;
8995 if (status)
8996 return;
8998 memset(&ev, 0, sizeof(ev));
8999 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9000 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9002 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9003 if (!cmd) {
9004 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9006 /* If this is a HCI command related to powering on the
9007 * HCI dev don't send any mgmt signals.
9009 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9010 return;
9013 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9014 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9015 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9018 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9020 int i;
9022 for (i = 0; i < uuid_count; i++) {
9023 if (!memcmp(uuid, uuids[i], 16))
9024 return true;
9027 return false;
9030 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9032 u16 parsed = 0;
9034 while (parsed < eir_len) {
9035 u8 field_len = eir[0];
9036 u8 uuid[16];
9037 int i;
9039 if (field_len == 0)
9040 break;
9042 if (eir_len - parsed < field_len + 1)
9043 break;
9045 switch (eir[1]) {
9046 case EIR_UUID16_ALL:
9047 case EIR_UUID16_SOME:
9048 for (i = 0; i + 3 <= field_len; i += 2) {
9049 memcpy(uuid, bluetooth_base_uuid, 16);
9050 uuid[13] = eir[i + 3];
9051 uuid[12] = eir[i + 2];
9052 if (has_uuid(uuid, uuid_count, uuids))
9053 return true;
9055 break;
9056 case EIR_UUID32_ALL:
9057 case EIR_UUID32_SOME:
9058 for (i = 0; i + 5 <= field_len; i += 4) {
9059 memcpy(uuid, bluetooth_base_uuid, 16);
9060 uuid[15] = eir[i + 5];
9061 uuid[14] = eir[i + 4];
9062 uuid[13] = eir[i + 3];
9063 uuid[12] = eir[i + 2];
9064 if (has_uuid(uuid, uuid_count, uuids))
9065 return true;
9067 break;
9068 case EIR_UUID128_ALL:
9069 case EIR_UUID128_SOME:
9070 for (i = 0; i + 17 <= field_len; i += 16) {
9071 memcpy(uuid, eir + i + 2, 16);
9072 if (has_uuid(uuid, uuid_count, uuids))
9073 return true;
9075 break;
9078 parsed += field_len + 1;
9079 eir += field_len + 1;
9082 return false;
9085 static void restart_le_scan(struct hci_dev *hdev)
9087 /* If controller is not scanning we are done. */
9088 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9089 return;
9091 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9092 hdev->discovery.scan_start +
9093 hdev->discovery.scan_duration))
9094 return;
9096 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9097 DISCOV_LE_RESTART_DELAY);
9100 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9101 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9103 /* If a RSSI threshold has been specified, and
9104 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9105 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9106 * is set, let it through for further processing, as we might need to
9107 * restart the scan.
9109 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9110 * the results are also dropped.
9112 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9113 (rssi == HCI_RSSI_INVALID ||
9114 (rssi < hdev->discovery.rssi &&
9115 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9116 return false;
9118 if (hdev->discovery.uuid_count != 0) {
9119 /* If a list of UUIDs is provided in filter, results with no
9120 * matching UUID should be dropped.
9122 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9123 hdev->discovery.uuids) &&
9124 !eir_has_uuids(scan_rsp, scan_rsp_len,
9125 hdev->discovery.uuid_count,
9126 hdev->discovery.uuids))
9127 return false;
9130 /* If duplicate filtering does not report RSSI changes, then restart
9131 * scanning to ensure updated result with updated RSSI values.
9133 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9134 restart_le_scan(hdev);
9136 /* Validate RSSI value against the RSSI threshold once more. */
9137 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9138 rssi < hdev->discovery.rssi)
9139 return false;
9142 return true;
9145 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9146 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9147 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9149 char buf[512];
9150 struct mgmt_ev_device_found *ev = (void *)buf;
9151 size_t ev_size;
9153 /* Don't send events for a non-kernel initiated discovery. With
9154 * LE one exception is if we have pend_le_reports > 0 in which
9155 * case we're doing passive scanning and want these events.
9157 if (!hci_discovery_active(hdev)) {
9158 if (link_type == ACL_LINK)
9159 return;
9160 if (link_type == LE_LINK &&
9161 list_empty(&hdev->pend_le_reports) &&
9162 !hci_is_adv_monitoring(hdev)) {
9163 return;
9167 if (hdev->discovery.result_filtering) {
9168 /* We are using service discovery */
9169 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9170 scan_rsp_len))
9171 return;
9174 if (hdev->discovery.limited) {
9175 /* Check for limited discoverable bit */
9176 if (dev_class) {
9177 if (!(dev_class[1] & 0x20))
9178 return;
9179 } else {
9180 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9181 if (!flags || !(flags[0] & LE_AD_LIMITED))
9182 return;
9186 /* Make sure that the buffer is big enough. The 5 extra bytes
9187 * are for the potential CoD field.
9189 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9190 return;
9192 memset(buf, 0, sizeof(buf));
9194 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9195 * RSSI value was reported as 0 when not available. This behavior
9196 * is kept when using device discovery. This is required for full
9197 * backwards compatibility with the API.
9199 * However when using service discovery, the value 127 will be
9200 * returned when the RSSI is not available.
9202 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9203 link_type == ACL_LINK)
9204 rssi = 0;
9206 bacpy(&ev->addr.bdaddr, bdaddr);
9207 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9208 ev->rssi = rssi;
9209 ev->flags = cpu_to_le32(flags);
9211 if (eir_len > 0)
9212 /* Copy EIR or advertising data into event */
9213 memcpy(ev->eir, eir, eir_len);
9215 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9216 NULL))
9217 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9218 dev_class, 3);
9220 if (scan_rsp_len > 0)
9221 /* Append scan response data to event */
9222 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9224 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9225 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9227 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9230 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9231 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9233 struct mgmt_ev_device_found *ev;
9234 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9235 u16 eir_len;
9237 ev = (struct mgmt_ev_device_found *) buf;
9239 memset(buf, 0, sizeof(buf));
9241 bacpy(&ev->addr.bdaddr, bdaddr);
9242 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9243 ev->rssi = rssi;
9245 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9246 name_len);
9248 ev->eir_len = cpu_to_le16(eir_len);
9250 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9253 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9255 struct mgmt_ev_discovering ev;
9257 bt_dev_dbg(hdev, "discovering %u", discovering);
9259 memset(&ev, 0, sizeof(ev));
9260 ev.type = hdev->discovery.type;
9261 ev.discovering = discovering;
9263 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9266 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9268 struct mgmt_ev_controller_suspend ev;
9270 ev.suspend_state = state;
9271 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9274 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9275 u8 addr_type)
9277 struct mgmt_ev_controller_resume ev;
9279 ev.wake_reason = reason;
9280 if (bdaddr) {
9281 bacpy(&ev.addr.bdaddr, bdaddr);
9282 ev.addr.type = addr_type;
9283 } else {
9284 memset(&ev.addr, 0, sizeof(ev.addr));
9287 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9290 static struct hci_mgmt_chan chan = {
9291 .channel = HCI_CHANNEL_CONTROL,
9292 .handler_count = ARRAY_SIZE(mgmt_handlers),
9293 .handlers = mgmt_handlers,
9294 .hdev_init = mgmt_init_hdev,
9297 int mgmt_init(void)
9299 return hci_mgmt_chan_register(&chan);
9302 void mgmt_exit(void)
9304 hci_mgmt_chan_unregister(&chan);