Linux 4.1.18
[linux/fpc-iii.git] / net / bluetooth / mgmt.c
blob58d60cbbc33f89e310681c666a0d480eebbc9d4f
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
109 MGMT_EV_INDEX_ADDED,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
122 MGMT_EV_AUTH_FAILED,
123 MGMT_EV_DEVICE_FOUND,
124 MGMT_EV_DISCOVERING,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
129 MGMT_EV_NEW_IRK,
130 MGMT_EV_NEW_CSRK,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
146 MGMT_OP_READ_INFO,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
153 MGMT_EV_INDEX_ADDED,
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
172 MGMT_STATUS_SUCCESS,
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
244 u16 len, int flag)
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 flag, NULL);
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 flag, skip_sk);
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
272 u16 data_len)
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
282 &rp, sizeof(rp));
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
286 u16 data_len)
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
290 size_t rp_size;
291 int i, err;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
298 } else {
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
306 if (!rp)
307 return -ENOMEM;
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
320 } else {
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
331 rp, rp_size);
332 kfree(rp);
334 return err;
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
338 u16 data_len)
340 struct mgmt_rp_read_index_list *rp;
341 struct hci_dev *d;
342 size_t rp_len;
343 u16 count;
344 int err;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
350 count = 0;
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
354 count++;
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
359 if (!rp) {
360 read_unlock(&hci_dev_list_lock);
361 return -ENOMEM;
364 count = 0;
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
369 continue;
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
375 continue;
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
390 0, rp, rp_len);
392 kfree(rp);
394 return err;
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
401 struct hci_dev *d;
402 size_t rp_len;
403 u16 count;
404 int err;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
410 count = 0;
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
414 count++;
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
419 if (!rp) {
420 read_unlock(&hci_dev_list_lock);
421 return -ENOMEM;
424 count = 0;
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
429 continue;
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
435 continue;
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
452 kfree(rp);
454 return err;
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
461 struct hci_dev *d;
462 size_t rp_len;
463 u16 count;
464 int err;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
470 count = 0;
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
473 count++;
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
478 if (!rp) {
479 read_unlock(&hci_dev_list_lock);
480 return -ENOMEM;
483 count = 0;
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
488 continue;
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
494 continue;
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
499 else
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
503 } else {
504 continue;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
528 kfree(rp);
530 return err;
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
537 return false;
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
541 return false;
543 return true;
546 static __le32 get_missing_options(struct hci_dev *hdev)
548 u32 options = 0;
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
574 sizeof(options));
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
581 u32 options = 0;
583 BT_DBG("sock %p %s", sk, hdev->name);
585 hci_dev_lock(hdev);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
602 &rp, sizeof(rp));
605 static u32 get_supported_settings(struct hci_dev *hdev)
607 u32 settings = 0;
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
639 hdev->set_bdaddr)
640 settings |= MGMT_SETTING_CONFIGURATION;
642 return settings;
645 static u32 get_current_settings(struct hci_dev *hdev)
647 u32 settings = 0;
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
701 * be evaluated.
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
710 return settings;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
720 if (len < 4)
721 return ptr;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
724 u16 uuid16;
726 if (uuid->size != 16)
727 continue;
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
730 if (uuid16 < 0x1100)
731 continue;
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
734 continue;
736 if (!uuids_start) {
737 uuids_start = ptr;
738 uuids_start[0] = 1;
739 uuids_start[1] = EIR_UUID16_ALL;
740 ptr += 2;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
746 break;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
754 return ptr;
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
762 if (len < 6)
763 return ptr;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
767 continue;
769 if (!uuids_start) {
770 uuids_start = ptr;
771 uuids_start[0] = 1;
772 uuids_start[1] = EIR_UUID32_ALL;
773 ptr += 2;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
779 break;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
783 ptr += sizeof(u32);
784 uuids_start[0] += sizeof(u32);
787 return ptr;
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
795 if (len < 18)
796 return ptr;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
800 continue;
802 if (!uuids_start) {
803 uuids_start = ptr;
804 uuids_start[0] = 1;
805 uuids_start[1] = EIR_UUID128_ALL;
806 ptr += 2;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
812 break;
815 memcpy(ptr, uuid->uuid, 16);
816 ptr += 16;
817 uuids_start[0] += 16;
820 return ptr;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
830 const void *data)
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
837 u8 ad_len = 0;
838 size_t name_len;
840 name_len = strlen(hdev->dev_name);
841 if (name_len > 0) {
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
844 if (name_len > max_len) {
845 name_len = max_len;
846 ptr[1] = EIR_NAME_SHORT;
847 } else
848 ptr[1] = EIR_NAME_COMPLETE;
850 ptr[0] = name_len + 1;
852 memcpy(ptr + 2, hdev->dev_name, name_len);
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
858 return ad_len;
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
869 return hdev->adv_instance.scan_rsp_len;
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
873 u8 instance)
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
877 u8 len;
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
880 return;
882 memset(&cp, 0, sizeof(cp));
884 if (instance)
885 len = create_instance_scan_rsp_data(hdev, cp.data);
886 else
887 len = create_default_scan_rsp_data(hdev, cp.data);
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
891 return;
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
896 cp.length = len;
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
901 static void update_scan_rsp_data(struct hci_request *req)
903 struct hci_dev *hdev = req->hdev;
904 u8 instance;
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
913 instance = 0x01;
914 else
915 instance = 0x00;
917 update_scan_rsp_data_for_instance(req, instance);
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
922 struct mgmt_pending_cmd *cmd;
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
928 if (cmd) {
929 struct mgmt_mode *cp = cmd->param;
930 if (cp->val == 0x01)
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
934 } else {
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
941 return 0;
944 static u8 get_current_adv_instance(struct hci_dev *hdev)
946 /* The "Set Advertising" setting supersedes the "Add Advertising"
947 * setting. Here we set the advertising data based on which
948 * setting was set. When neither apply, default to the global settings,
949 * represented by instance "0".
951 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
952 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
953 return 0x01;
955 return 0x00;
958 static bool get_connectable(struct hci_dev *hdev)
960 struct mgmt_pending_cmd *cmd;
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
965 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
966 if (cmd) {
967 struct mgmt_mode *cp = cmd->param;
969 return cp->val;
972 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
977 u32 flags;
979 if (instance > 0x01)
980 return 0;
982 if (instance == 0x01)
983 return hdev->adv_instance.flags;
985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
988 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 * to the "connectable" instance flag.
991 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
992 flags |= MGMT_ADV_FLAG_CONNECTABLE;
994 return flags;
997 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
999 /* Ignore instance 0 and other unsupported instances */
1000 if (instance != 0x01)
1001 return 0;
1003 /* TODO: Take into account the "appearance" and "local-name" flags here.
1004 * These are currently being ignored as they are not supported.
1006 return hdev->adv_instance.scan_rsp_len;
1009 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1011 u8 ad_len = 0, flags = 0;
1012 u32 instance_flags = get_adv_instance_flags(hdev, instance);
1014 /* The Add Advertising command allows userspace to set both the general
1015 * and limited discoverable flags.
1017 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1018 flags |= LE_AD_GENERAL;
1020 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1021 flags |= LE_AD_LIMITED;
1023 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1024 /* If a discovery flag wasn't provided, simply use the global
1025 * settings.
1027 if (!flags)
1028 flags |= get_adv_discov_flags(hdev);
1030 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1031 flags |= LE_AD_NO_BREDR;
1033 /* If flags would still be empty, then there is no need to
1034 * include the "Flags" AD field".
1036 if (flags) {
1037 ptr[0] = 0x02;
1038 ptr[1] = EIR_FLAGS;
1039 ptr[2] = flags;
1041 ad_len += 3;
1042 ptr += 3;
1046 if (instance) {
1047 memcpy(ptr, hdev->adv_instance.adv_data,
1048 hdev->adv_instance.adv_data_len);
1050 ad_len += hdev->adv_instance.adv_data_len;
1051 ptr += hdev->adv_instance.adv_data_len;
1054 /* Provide Tx Power only if we can provide a valid value for it */
1055 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1056 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1057 ptr[0] = 0x02;
1058 ptr[1] = EIR_TX_POWER;
1059 ptr[2] = (u8)hdev->adv_tx_power;
1061 ad_len += 3;
1062 ptr += 3;
1065 return ad_len;
1068 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1070 struct hci_dev *hdev = req->hdev;
1071 struct hci_cp_le_set_adv_data cp;
1072 u8 len;
1074 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1075 return;
1077 memset(&cp, 0, sizeof(cp));
1079 len = create_instance_adv_data(hdev, instance, cp.data);
1081 /* There's nothing to do if the data hasn't changed */
1082 if (hdev->adv_data_len == len &&
1083 memcmp(cp.data, hdev->adv_data, len) == 0)
1084 return;
1086 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1087 hdev->adv_data_len = len;
1089 cp.length = len;
1091 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1094 static void update_adv_data(struct hci_request *req)
1096 struct hci_dev *hdev = req->hdev;
1097 u8 instance = get_current_adv_instance(hdev);
1099 update_adv_data_for_instance(req, instance);
1102 int mgmt_update_adv_data(struct hci_dev *hdev)
1104 struct hci_request req;
1106 hci_req_init(&req, hdev);
1107 update_adv_data(&req);
1109 return hci_req_run(&req, NULL);
1112 static void create_eir(struct hci_dev *hdev, u8 *data)
1114 u8 *ptr = data;
1115 size_t name_len;
1117 name_len = strlen(hdev->dev_name);
1119 if (name_len > 0) {
1120 /* EIR Data type */
1121 if (name_len > 48) {
1122 name_len = 48;
1123 ptr[1] = EIR_NAME_SHORT;
1124 } else
1125 ptr[1] = EIR_NAME_COMPLETE;
1127 /* EIR Data length */
1128 ptr[0] = name_len + 1;
1130 memcpy(ptr + 2, hdev->dev_name, name_len);
1132 ptr += (name_len + 2);
1135 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1136 ptr[0] = 2;
1137 ptr[1] = EIR_TX_POWER;
1138 ptr[2] = (u8) hdev->inq_tx_power;
1140 ptr += 3;
1143 if (hdev->devid_source > 0) {
1144 ptr[0] = 9;
1145 ptr[1] = EIR_DEVICE_ID;
1147 put_unaligned_le16(hdev->devid_source, ptr + 2);
1148 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1149 put_unaligned_le16(hdev->devid_product, ptr + 6);
1150 put_unaligned_le16(hdev->devid_version, ptr + 8);
1152 ptr += 10;
1155 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1156 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1157 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1160 static void update_eir(struct hci_request *req)
1162 struct hci_dev *hdev = req->hdev;
1163 struct hci_cp_write_eir cp;
1165 if (!hdev_is_powered(hdev))
1166 return;
1168 if (!lmp_ext_inq_capable(hdev))
1169 return;
1171 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1172 return;
1174 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1175 return;
1177 memset(&cp, 0, sizeof(cp));
1179 create_eir(hdev, cp.data);
1181 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1182 return;
1184 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1186 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1189 static u8 get_service_classes(struct hci_dev *hdev)
1191 struct bt_uuid *uuid;
1192 u8 val = 0;
1194 list_for_each_entry(uuid, &hdev->uuids, list)
1195 val |= uuid->svc_hint;
1197 return val;
1200 static void update_class(struct hci_request *req)
1202 struct hci_dev *hdev = req->hdev;
1203 u8 cod[3];
1205 BT_DBG("%s", hdev->name);
1207 if (!hdev_is_powered(hdev))
1208 return;
1210 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1211 return;
1213 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1214 return;
1216 cod[0] = hdev->minor_class;
1217 cod[1] = hdev->major_class;
1218 cod[2] = get_service_classes(hdev);
1220 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1221 cod[1] |= 0x20;
1223 if (memcmp(cod, hdev->dev_class, 3) == 0)
1224 return;
1226 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1229 static void disable_advertising(struct hci_request *req)
1231 u8 enable = 0x00;
1233 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1236 static void enable_advertising(struct hci_request *req)
1238 struct hci_dev *hdev = req->hdev;
1239 struct hci_cp_le_set_adv_param cp;
1240 u8 own_addr_type, enable = 0x01;
1241 bool connectable;
1242 u8 instance;
1243 u32 flags;
1245 if (hci_conn_num(hdev, LE_LINK) > 0)
1246 return;
1248 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1249 disable_advertising(req);
1251 /* Clear the HCI_LE_ADV bit temporarily so that the
1252 * hci_update_random_address knows that it's safe to go ahead
1253 * and write a new random address. The flag will be set back on
1254 * as soon as the SET_ADV_ENABLE HCI command completes.
1256 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1258 instance = get_current_adv_instance(hdev);
1259 flags = get_adv_instance_flags(hdev, instance);
1261 /* If the "connectable" instance flag was not set, then choose between
1262 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1264 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1265 get_connectable(hdev);
1267 /* Set require_privacy to true only when non-connectable
1268 * advertising is used. In that case it is fine to use a
1269 * non-resolvable private address.
1271 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1272 return;
1274 memset(&cp, 0, sizeof(cp));
1275 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1276 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1278 if (connectable)
1279 cp.type = LE_ADV_IND;
1280 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1281 cp.type = LE_ADV_SCAN_IND;
1282 else
1283 cp.type = LE_ADV_NONCONN_IND;
1285 cp.own_address_type = own_addr_type;
1286 cp.channel_map = hdev->le_adv_channel_map;
1288 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1290 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1293 static void service_cache_off(struct work_struct *work)
1295 struct hci_dev *hdev = container_of(work, struct hci_dev,
1296 service_cache.work);
1297 struct hci_request req;
1299 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1300 return;
1302 hci_req_init(&req, hdev);
1304 hci_dev_lock(hdev);
1306 update_eir(&req);
1307 update_class(&req);
1309 hci_dev_unlock(hdev);
1311 hci_req_run(&req, NULL);
1314 static void rpa_expired(struct work_struct *work)
1316 struct hci_dev *hdev = container_of(work, struct hci_dev,
1317 rpa_expired.work);
1318 struct hci_request req;
1320 BT_DBG("");
1322 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1324 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1325 return;
1327 /* The generation of a new RPA and programming it into the
1328 * controller happens in the enable_advertising() function.
1330 hci_req_init(&req, hdev);
1331 enable_advertising(&req);
1332 hci_req_run(&req, NULL);
1335 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1337 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1338 return;
1340 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1341 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1343 /* Non-mgmt controlled devices get this bit set
1344 * implicitly so that pairing works for them, however
1345 * for mgmt we require user-space to explicitly enable
1346 * it
1348 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1351 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1352 void *data, u16 data_len)
1354 struct mgmt_rp_read_info rp;
1356 BT_DBG("sock %p %s", sk, hdev->name);
1358 hci_dev_lock(hdev);
1360 memset(&rp, 0, sizeof(rp));
1362 bacpy(&rp.bdaddr, &hdev->bdaddr);
1364 rp.version = hdev->hci_ver;
1365 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1367 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1368 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1370 memcpy(rp.dev_class, hdev->dev_class, 3);
1372 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1373 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1375 hci_dev_unlock(hdev);
1377 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1378 sizeof(rp));
1381 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1383 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1385 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1386 sizeof(settings));
1389 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1391 BT_DBG("%s status 0x%02x", hdev->name, status);
1393 if (hci_conn_count(hdev) == 0) {
1394 cancel_delayed_work(&hdev->power_off);
1395 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1399 static bool hci_stop_discovery(struct hci_request *req)
1401 struct hci_dev *hdev = req->hdev;
1402 struct hci_cp_remote_name_req_cancel cp;
1403 struct inquiry_entry *e;
1405 switch (hdev->discovery.state) {
1406 case DISCOVERY_FINDING:
1407 if (test_bit(HCI_INQUIRY, &hdev->flags))
1408 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1410 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1411 cancel_delayed_work(&hdev->le_scan_disable);
1412 hci_req_add_le_scan_disable(req);
1415 return true;
1417 case DISCOVERY_RESOLVING:
1418 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1419 NAME_PENDING);
1420 if (!e)
1421 break;
1423 bacpy(&cp.bdaddr, &e->data.bdaddr);
1424 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1425 &cp);
1427 return true;
1429 default:
1430 /* Passive scanning */
1431 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1432 hci_req_add_le_scan_disable(req);
1433 return true;
1436 break;
1439 return false;
1442 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1443 u8 instance)
1445 struct mgmt_ev_advertising_added ev;
1447 ev.instance = instance;
1449 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1452 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1453 u8 instance)
1455 struct mgmt_ev_advertising_removed ev;
1457 ev.instance = instance;
1459 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1462 static void clear_adv_instance(struct hci_dev *hdev)
1464 struct hci_request req;
1466 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1467 return;
1469 if (hdev->adv_instance.timeout)
1470 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1472 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1473 advertising_removed(NULL, hdev, 1);
1474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1476 if (!hdev_is_powered(hdev) ||
1477 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1478 return;
1480 hci_req_init(&req, hdev);
1481 disable_advertising(&req);
1482 hci_req_run(&req, NULL);
1485 static int clean_up_hci_state(struct hci_dev *hdev)
1487 struct hci_request req;
1488 struct hci_conn *conn;
1489 bool discov_stopped;
1490 int err;
1492 hci_req_init(&req, hdev);
1494 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1495 test_bit(HCI_PSCAN, &hdev->flags)) {
1496 u8 scan = 0x00;
1497 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1500 if (hdev->adv_instance.timeout)
1501 clear_adv_instance(hdev);
1503 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1504 disable_advertising(&req);
1506 discov_stopped = hci_stop_discovery(&req);
1508 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1509 struct hci_cp_disconnect dc;
1510 struct hci_cp_reject_conn_req rej;
1512 switch (conn->state) {
1513 case BT_CONNECTED:
1514 case BT_CONFIG:
1515 dc.handle = cpu_to_le16(conn->handle);
1516 dc.reason = 0x15; /* Terminated due to Power Off */
1517 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1518 break;
1519 case BT_CONNECT:
1520 if (conn->type == LE_LINK)
1521 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1522 0, NULL);
1523 else if (conn->type == ACL_LINK)
1524 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1525 6, &conn->dst);
1526 break;
1527 case BT_CONNECT2:
1528 bacpy(&rej.bdaddr, &conn->dst);
1529 rej.reason = 0x15; /* Terminated due to Power Off */
1530 if (conn->type == ACL_LINK)
1531 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1532 sizeof(rej), &rej);
1533 else if (conn->type == SCO_LINK)
1534 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1535 sizeof(rej), &rej);
1536 break;
1540 err = hci_req_run(&req, clean_up_hci_complete);
1541 if (!err && discov_stopped)
1542 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1544 return err;
1547 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1548 u16 len)
1550 struct mgmt_mode *cp = data;
1551 struct mgmt_pending_cmd *cmd;
1552 int err;
1554 BT_DBG("request for %s", hdev->name);
1556 if (cp->val != 0x00 && cp->val != 0x01)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1558 MGMT_STATUS_INVALID_PARAMS);
1560 hci_dev_lock(hdev);
1562 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1564 MGMT_STATUS_BUSY);
1565 goto failed;
1568 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1569 cancel_delayed_work(&hdev->power_off);
1571 if (cp->val) {
1572 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1573 data, len);
1574 err = mgmt_powered(hdev, 1);
1575 goto failed;
1579 if (!!cp->val == hdev_is_powered(hdev)) {
1580 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1581 goto failed;
1584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1585 if (!cmd) {
1586 err = -ENOMEM;
1587 goto failed;
1590 if (cp->val) {
1591 queue_work(hdev->req_workqueue, &hdev->power_on);
1592 err = 0;
1593 } else {
1594 /* Disconnect connections, stop scans, etc */
1595 err = clean_up_hci_state(hdev);
1596 if (!err)
1597 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1598 HCI_POWER_OFF_TIMEOUT);
1600 /* ENODATA means there were no HCI commands queued */
1601 if (err == -ENODATA) {
1602 cancel_delayed_work(&hdev->power_off);
1603 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1604 err = 0;
1608 failed:
1609 hci_dev_unlock(hdev);
1610 return err;
1613 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1615 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1617 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1618 sizeof(ev), skip);
1621 int mgmt_new_settings(struct hci_dev *hdev)
1623 return new_settings(hdev, NULL);
1626 struct cmd_lookup {
1627 struct sock *sk;
1628 struct hci_dev *hdev;
1629 u8 mgmt_status;
1632 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1634 struct cmd_lookup *match = data;
1636 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1638 list_del(&cmd->list);
1640 if (match->sk == NULL) {
1641 match->sk = cmd->sk;
1642 sock_hold(match->sk);
1645 mgmt_pending_free(cmd);
1648 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1650 u8 *status = data;
1652 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1653 mgmt_pending_remove(cmd);
1656 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1658 if (cmd->cmd_complete) {
1659 u8 *status = data;
1661 cmd->cmd_complete(cmd, *status);
1662 mgmt_pending_remove(cmd);
1664 return;
1667 cmd_status_rsp(cmd, data);
1670 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1672 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1673 cmd->param, cmd->param_len);
1676 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1678 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1679 cmd->param, sizeof(struct mgmt_addr_info));
1682 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1684 if (!lmp_bredr_capable(hdev))
1685 return MGMT_STATUS_NOT_SUPPORTED;
1686 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1687 return MGMT_STATUS_REJECTED;
1688 else
1689 return MGMT_STATUS_SUCCESS;
1692 static u8 mgmt_le_support(struct hci_dev *hdev)
1694 if (!lmp_le_capable(hdev))
1695 return MGMT_STATUS_NOT_SUPPORTED;
1696 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1697 return MGMT_STATUS_REJECTED;
1698 else
1699 return MGMT_STATUS_SUCCESS;
1702 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1703 u16 opcode)
1705 struct mgmt_pending_cmd *cmd;
1706 struct mgmt_mode *cp;
1707 struct hci_request req;
1708 bool changed;
1710 BT_DBG("status 0x%02x", status);
1712 hci_dev_lock(hdev);
1714 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1715 if (!cmd)
1716 goto unlock;
1718 if (status) {
1719 u8 mgmt_err = mgmt_status(status);
1720 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1721 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1722 goto remove_cmd;
1725 cp = cmd->param;
1726 if (cp->val) {
1727 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1729 if (hdev->discov_timeout > 0) {
1730 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1731 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1732 to);
1734 } else {
1735 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1738 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1740 if (changed)
1741 new_settings(hdev, cmd->sk);
1743 /* When the discoverable mode gets changed, make sure
1744 * that class of device has the limited discoverable
1745 * bit correctly set. Also update page scan based on whitelist
1746 * entries.
1748 hci_req_init(&req, hdev);
1749 __hci_update_page_scan(&req);
1750 update_class(&req);
1751 hci_req_run(&req, NULL);
1753 remove_cmd:
1754 mgmt_pending_remove(cmd);
1756 unlock:
1757 hci_dev_unlock(hdev);
1760 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 u16 len)
1763 struct mgmt_cp_set_discoverable *cp = data;
1764 struct mgmt_pending_cmd *cmd;
1765 struct hci_request req;
1766 u16 timeout;
1767 u8 scan;
1768 int err;
1770 BT_DBG("request for %s", hdev->name);
1772 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1773 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1775 MGMT_STATUS_REJECTED);
1777 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 MGMT_STATUS_INVALID_PARAMS);
1781 timeout = __le16_to_cpu(cp->timeout);
1783 /* Disabling discoverable requires that no timeout is set,
1784 * and enabling limited discoverable requires a timeout.
1786 if ((cp->val == 0x00 && timeout > 0) ||
1787 (cp->val == 0x02 && timeout == 0))
1788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1789 MGMT_STATUS_INVALID_PARAMS);
1791 hci_dev_lock(hdev);
1793 if (!hdev_is_powered(hdev) && timeout > 0) {
1794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1795 MGMT_STATUS_NOT_POWERED);
1796 goto failed;
1799 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1800 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1802 MGMT_STATUS_BUSY);
1803 goto failed;
1806 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1808 MGMT_STATUS_REJECTED);
1809 goto failed;
1812 if (!hdev_is_powered(hdev)) {
1813 bool changed = false;
1815 /* Setting limited discoverable when powered off is
1816 * not a valid operation since it requires a timeout
1817 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1819 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1820 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1821 changed = true;
1824 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1825 if (err < 0)
1826 goto failed;
1828 if (changed)
1829 err = new_settings(hdev, sk);
1831 goto failed;
1834 /* If the current mode is the same, then just update the timeout
1835 * value with the new value. And if only the timeout gets updated,
1836 * then no need for any HCI transactions.
1838 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1839 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1840 HCI_LIMITED_DISCOVERABLE)) {
1841 cancel_delayed_work(&hdev->discov_off);
1842 hdev->discov_timeout = timeout;
1844 if (cp->val && hdev->discov_timeout > 0) {
1845 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1846 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1847 to);
1850 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1851 goto failed;
1854 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1855 if (!cmd) {
1856 err = -ENOMEM;
1857 goto failed;
1860 /* Cancel any potential discoverable timeout that might be
1861 * still active and store new timeout value. The arming of
1862 * the timeout happens in the complete handler.
1864 cancel_delayed_work(&hdev->discov_off);
1865 hdev->discov_timeout = timeout;
1867 /* Limited discoverable mode */
1868 if (cp->val == 0x02)
1869 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1870 else
1871 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1873 hci_req_init(&req, hdev);
1875 /* The procedure for LE-only controllers is much simpler - just
1876 * update the advertising data.
1878 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1879 goto update_ad;
1881 scan = SCAN_PAGE;
1883 if (cp->val) {
1884 struct hci_cp_write_current_iac_lap hci_cp;
1886 if (cp->val == 0x02) {
1887 /* Limited discoverable mode */
1888 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1889 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1890 hci_cp.iac_lap[1] = 0x8b;
1891 hci_cp.iac_lap[2] = 0x9e;
1892 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1893 hci_cp.iac_lap[4] = 0x8b;
1894 hci_cp.iac_lap[5] = 0x9e;
1895 } else {
1896 /* General discoverable mode */
1897 hci_cp.num_iac = 1;
1898 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1899 hci_cp.iac_lap[1] = 0x8b;
1900 hci_cp.iac_lap[2] = 0x9e;
1903 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1904 (hci_cp.num_iac * 3) + 1, &hci_cp);
1906 scan |= SCAN_INQUIRY;
1907 } else {
1908 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1911 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1913 update_ad:
1914 update_adv_data(&req);
1916 err = hci_req_run(&req, set_discoverable_complete);
1917 if (err < 0)
1918 mgmt_pending_remove(cmd);
1920 failed:
1921 hci_dev_unlock(hdev);
1922 return err;
1925 static void write_fast_connectable(struct hci_request *req, bool enable)
1927 struct hci_dev *hdev = req->hdev;
1928 struct hci_cp_write_page_scan_activity acp;
1929 u8 type;
1931 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1932 return;
1934 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1935 return;
1937 if (enable) {
1938 type = PAGE_SCAN_TYPE_INTERLACED;
1940 /* 160 msec page scan interval */
1941 acp.interval = cpu_to_le16(0x0100);
1942 } else {
1943 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1945 /* default 1.28 sec page scan */
1946 acp.interval = cpu_to_le16(0x0800);
1949 acp.window = cpu_to_le16(0x0012);
1951 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1952 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1953 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1954 sizeof(acp), &acp);
1956 if (hdev->page_scan_type != type)
1957 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1960 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1961 u16 opcode)
1963 struct mgmt_pending_cmd *cmd;
1964 struct mgmt_mode *cp;
1965 bool conn_changed, discov_changed;
1967 BT_DBG("status 0x%02x", status);
1969 hci_dev_lock(hdev);
1971 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1972 if (!cmd)
1973 goto unlock;
1975 if (status) {
1976 u8 mgmt_err = mgmt_status(status);
1977 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1978 goto remove_cmd;
1981 cp = cmd->param;
1982 if (cp->val) {
1983 conn_changed = !hci_dev_test_and_set_flag(hdev,
1984 HCI_CONNECTABLE);
1985 discov_changed = false;
1986 } else {
1987 conn_changed = hci_dev_test_and_clear_flag(hdev,
1988 HCI_CONNECTABLE);
1989 discov_changed = hci_dev_test_and_clear_flag(hdev,
1990 HCI_DISCOVERABLE);
1993 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1995 if (conn_changed || discov_changed) {
1996 new_settings(hdev, cmd->sk);
1997 hci_update_page_scan(hdev);
1998 if (discov_changed)
1999 mgmt_update_adv_data(hdev);
2000 hci_update_background_scan(hdev);
2003 remove_cmd:
2004 mgmt_pending_remove(cmd);
2006 unlock:
2007 hci_dev_unlock(hdev);
2010 static int set_connectable_update_settings(struct hci_dev *hdev,
2011 struct sock *sk, u8 val)
2013 bool changed = false;
2014 int err;
2016 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2017 changed = true;
2019 if (val) {
2020 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2021 } else {
2022 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2023 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2026 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2027 if (err < 0)
2028 return err;
2030 if (changed) {
2031 hci_update_page_scan(hdev);
2032 hci_update_background_scan(hdev);
2033 return new_settings(hdev, sk);
2036 return 0;
2039 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2040 u16 len)
2042 struct mgmt_mode *cp = data;
2043 struct mgmt_pending_cmd *cmd;
2044 struct hci_request req;
2045 u8 scan;
2046 int err;
2048 BT_DBG("request for %s", hdev->name);
2050 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2051 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2053 MGMT_STATUS_REJECTED);
2055 if (cp->val != 0x00 && cp->val != 0x01)
2056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2057 MGMT_STATUS_INVALID_PARAMS);
2059 hci_dev_lock(hdev);
2061 if (!hdev_is_powered(hdev)) {
2062 err = set_connectable_update_settings(hdev, sk, cp->val);
2063 goto failed;
2066 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2067 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2069 MGMT_STATUS_BUSY);
2070 goto failed;
2073 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2074 if (!cmd) {
2075 err = -ENOMEM;
2076 goto failed;
2079 hci_req_init(&req, hdev);
2081 /* If BR/EDR is not enabled and we disable advertising as a
2082 * by-product of disabling connectable, we need to update the
2083 * advertising flags.
2085 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2086 if (!cp->val) {
2087 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2088 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2090 update_adv_data(&req);
2091 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2092 if (cp->val) {
2093 scan = SCAN_PAGE;
2094 } else {
2095 /* If we don't have any whitelist entries just
2096 * disable all scanning. If there are entries
2097 * and we had both page and inquiry scanning
2098 * enabled then fall back to only page scanning.
2099 * Otherwise no changes are needed.
2101 if (list_empty(&hdev->whitelist))
2102 scan = SCAN_DISABLED;
2103 else if (test_bit(HCI_ISCAN, &hdev->flags))
2104 scan = SCAN_PAGE;
2105 else
2106 goto no_scan_update;
2108 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2109 hdev->discov_timeout > 0)
2110 cancel_delayed_work(&hdev->discov_off);
2113 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2116 no_scan_update:
2117 /* Update the advertising parameters if necessary */
2118 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2119 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2120 enable_advertising(&req);
2122 err = hci_req_run(&req, set_connectable_complete);
2123 if (err < 0) {
2124 mgmt_pending_remove(cmd);
2125 if (err == -ENODATA)
2126 err = set_connectable_update_settings(hdev, sk,
2127 cp->val);
2128 goto failed;
2131 failed:
2132 hci_dev_unlock(hdev);
2133 return err;
2136 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2137 u16 len)
2139 struct mgmt_mode *cp = data;
2140 bool changed;
2141 int err;
2143 BT_DBG("request for %s", hdev->name);
2145 if (cp->val != 0x00 && cp->val != 0x01)
2146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2147 MGMT_STATUS_INVALID_PARAMS);
2149 hci_dev_lock(hdev);
2151 if (cp->val)
2152 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2153 else
2154 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2156 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2157 if (err < 0)
2158 goto unlock;
2160 if (changed)
2161 err = new_settings(hdev, sk);
2163 unlock:
2164 hci_dev_unlock(hdev);
2165 return err;
2168 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2169 u16 len)
2171 struct mgmt_mode *cp = data;
2172 struct mgmt_pending_cmd *cmd;
2173 u8 val, status;
2174 int err;
2176 BT_DBG("request for %s", hdev->name);
2178 status = mgmt_bredr_support(hdev);
2179 if (status)
2180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2181 status);
2183 if (cp->val != 0x00 && cp->val != 0x01)
2184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2185 MGMT_STATUS_INVALID_PARAMS);
2187 hci_dev_lock(hdev);
2189 if (!hdev_is_powered(hdev)) {
2190 bool changed = false;
2192 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2193 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2194 changed = true;
2197 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2198 if (err < 0)
2199 goto failed;
2201 if (changed)
2202 err = new_settings(hdev, sk);
2204 goto failed;
2207 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2208 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2209 MGMT_STATUS_BUSY);
2210 goto failed;
2213 val = !!cp->val;
2215 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2216 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2217 goto failed;
2220 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2221 if (!cmd) {
2222 err = -ENOMEM;
2223 goto failed;
2226 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2227 if (err < 0) {
2228 mgmt_pending_remove(cmd);
2229 goto failed;
2232 failed:
2233 hci_dev_unlock(hdev);
2234 return err;
2237 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2239 struct mgmt_mode *cp = data;
2240 struct mgmt_pending_cmd *cmd;
2241 u8 status;
2242 int err;
2244 BT_DBG("request for %s", hdev->name);
2246 status = mgmt_bredr_support(hdev);
2247 if (status)
2248 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2250 if (!lmp_ssp_capable(hdev))
2251 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2252 MGMT_STATUS_NOT_SUPPORTED);
2254 if (cp->val != 0x00 && cp->val != 0x01)
2255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2256 MGMT_STATUS_INVALID_PARAMS);
2258 hci_dev_lock(hdev);
2260 if (!hdev_is_powered(hdev)) {
2261 bool changed;
2263 if (cp->val) {
2264 changed = !hci_dev_test_and_set_flag(hdev,
2265 HCI_SSP_ENABLED);
2266 } else {
2267 changed = hci_dev_test_and_clear_flag(hdev,
2268 HCI_SSP_ENABLED);
2269 if (!changed)
2270 changed = hci_dev_test_and_clear_flag(hdev,
2271 HCI_HS_ENABLED);
2272 else
2273 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2277 if (err < 0)
2278 goto failed;
2280 if (changed)
2281 err = new_settings(hdev, sk);
2283 goto failed;
2286 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2288 MGMT_STATUS_BUSY);
2289 goto failed;
2292 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2294 goto failed;
2297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2298 if (!cmd) {
2299 err = -ENOMEM;
2300 goto failed;
2303 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2304 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2305 sizeof(cp->val), &cp->val);
2307 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2308 if (err < 0) {
2309 mgmt_pending_remove(cmd);
2310 goto failed;
2313 failed:
2314 hci_dev_unlock(hdev);
2315 return err;
2318 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2320 struct mgmt_mode *cp = data;
2321 bool changed;
2322 u8 status;
2323 int err;
2325 BT_DBG("request for %s", hdev->name);
2327 status = mgmt_bredr_support(hdev);
2328 if (status)
2329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2331 if (!lmp_ssp_capable(hdev))
2332 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2333 MGMT_STATUS_NOT_SUPPORTED);
2335 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2337 MGMT_STATUS_REJECTED);
2339 if (cp->val != 0x00 && cp->val != 0x01)
2340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2341 MGMT_STATUS_INVALID_PARAMS);
2343 hci_dev_lock(hdev);
2345 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2347 MGMT_STATUS_BUSY);
2348 goto unlock;
2351 if (cp->val) {
2352 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2353 } else {
2354 if (hdev_is_powered(hdev)) {
2355 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2356 MGMT_STATUS_REJECTED);
2357 goto unlock;
2360 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2363 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2364 if (err < 0)
2365 goto unlock;
2367 if (changed)
2368 err = new_settings(hdev, sk);
2370 unlock:
2371 hci_dev_unlock(hdev);
2372 return err;
2375 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2377 struct cmd_lookup match = { NULL, hdev };
2379 hci_dev_lock(hdev);
2381 if (status) {
2382 u8 mgmt_err = mgmt_status(status);
2384 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2385 &mgmt_err);
2386 goto unlock;
2389 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2391 new_settings(hdev, match.sk);
2393 if (match.sk)
2394 sock_put(match.sk);
2396 /* Make sure the controller has a good default for
2397 * advertising data. Restrict the update to when LE
2398 * has actually been enabled. During power on, the
2399 * update in powered_update_hci will take care of it.
2401 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2402 struct hci_request req;
2404 hci_req_init(&req, hdev);
2405 update_adv_data(&req);
2406 update_scan_rsp_data(&req);
2407 __hci_update_background_scan(&req);
2408 hci_req_run(&req, NULL);
2411 unlock:
2412 hci_dev_unlock(hdev);
2415 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2417 struct mgmt_mode *cp = data;
2418 struct hci_cp_write_le_host_supported hci_cp;
2419 struct mgmt_pending_cmd *cmd;
2420 struct hci_request req;
2421 int err;
2422 u8 val, enabled;
2424 BT_DBG("request for %s", hdev->name);
2426 if (!lmp_le_capable(hdev))
2427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2428 MGMT_STATUS_NOT_SUPPORTED);
2430 if (cp->val != 0x00 && cp->val != 0x01)
2431 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2432 MGMT_STATUS_INVALID_PARAMS);
2434 /* Bluetooth single mode LE only controllers or dual-mode
2435 * controllers configured as LE only devices, do not allow
2436 * switching LE off. These have either LE enabled explicitly
2437 * or BR/EDR has been previously switched off.
2439 * When trying to enable an already enabled LE, then gracefully
2440 * send a positive response. Trying to disable it however will
2441 * result into rejection.
2443 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2444 if (cp->val == 0x01)
2445 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2448 MGMT_STATUS_REJECTED);
2451 hci_dev_lock(hdev);
2453 val = !!cp->val;
2454 enabled = lmp_host_le_capable(hdev);
2456 if (!hdev_is_powered(hdev) || val == enabled) {
2457 bool changed = false;
2459 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2460 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2461 changed = true;
2464 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2465 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2466 changed = true;
2469 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2470 if (err < 0)
2471 goto unlock;
2473 if (changed)
2474 err = new_settings(hdev, sk);
2476 goto unlock;
2479 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2480 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2482 MGMT_STATUS_BUSY);
2483 goto unlock;
2486 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2487 if (!cmd) {
2488 err = -ENOMEM;
2489 goto unlock;
2492 hci_req_init(&req, hdev);
2494 memset(&hci_cp, 0, sizeof(hci_cp));
2496 if (val) {
2497 hci_cp.le = val;
2498 hci_cp.simul = 0x00;
2499 } else {
2500 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2501 disable_advertising(&req);
2504 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2505 &hci_cp);
2507 err = hci_req_run(&req, le_enable_complete);
2508 if (err < 0)
2509 mgmt_pending_remove(cmd);
2511 unlock:
2512 hci_dev_unlock(hdev);
2513 return err;
2516 /* This is a helper function to test for pending mgmt commands that can
2517 * cause CoD or EIR HCI commands. We can only allow one such pending
2518 * mgmt command at a time since otherwise we cannot easily track what
2519 * the current values are, will be, and based on that calculate if a new
2520 * HCI command needs to be sent and if yes with what value.
2522 static bool pending_eir_or_class(struct hci_dev *hdev)
2524 struct mgmt_pending_cmd *cmd;
2526 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2527 switch (cmd->opcode) {
2528 case MGMT_OP_ADD_UUID:
2529 case MGMT_OP_REMOVE_UUID:
2530 case MGMT_OP_SET_DEV_CLASS:
2531 case MGMT_OP_SET_POWERED:
2532 return true;
2536 return false;
2539 static const u8 bluetooth_base_uuid[] = {
2540 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2541 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2544 static u8 get_uuid_size(const u8 *uuid)
2546 u32 val;
2548 if (memcmp(uuid, bluetooth_base_uuid, 12))
2549 return 128;
2551 val = get_unaligned_le32(&uuid[12]);
2552 if (val > 0xffff)
2553 return 32;
2555 return 16;
2558 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2560 struct mgmt_pending_cmd *cmd;
2562 hci_dev_lock(hdev);
2564 cmd = pending_find(mgmt_op, hdev);
2565 if (!cmd)
2566 goto unlock;
2568 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2569 mgmt_status(status), hdev->dev_class, 3);
2571 mgmt_pending_remove(cmd);
2573 unlock:
2574 hci_dev_unlock(hdev);
2577 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2579 BT_DBG("status 0x%02x", status);
2581 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2586 struct mgmt_cp_add_uuid *cp = data;
2587 struct mgmt_pending_cmd *cmd;
2588 struct hci_request req;
2589 struct bt_uuid *uuid;
2590 int err;
2592 BT_DBG("request for %s", hdev->name);
2594 hci_dev_lock(hdev);
2596 if (pending_eir_or_class(hdev)) {
2597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2598 MGMT_STATUS_BUSY);
2599 goto failed;
2602 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2603 if (!uuid) {
2604 err = -ENOMEM;
2605 goto failed;
2608 memcpy(uuid->uuid, cp->uuid, 16);
2609 uuid->svc_hint = cp->svc_hint;
2610 uuid->size = get_uuid_size(cp->uuid);
2612 list_add_tail(&uuid->list, &hdev->uuids);
2614 hci_req_init(&req, hdev);
2616 update_class(&req);
2617 update_eir(&req);
2619 err = hci_req_run(&req, add_uuid_complete);
2620 if (err < 0) {
2621 if (err != -ENODATA)
2622 goto failed;
2624 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2625 hdev->dev_class, 3);
2626 goto failed;
2629 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2630 if (!cmd) {
2631 err = -ENOMEM;
2632 goto failed;
2635 err = 0;
2637 failed:
2638 hci_dev_unlock(hdev);
2639 return err;
2642 static bool enable_service_cache(struct hci_dev *hdev)
2644 if (!hdev_is_powered(hdev))
2645 return false;
2647 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2648 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2649 CACHE_TIMEOUT);
2650 return true;
2653 return false;
2656 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2658 BT_DBG("status 0x%02x", status);
2660 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2663 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2664 u16 len)
2666 struct mgmt_cp_remove_uuid *cp = data;
2667 struct mgmt_pending_cmd *cmd;
2668 struct bt_uuid *match, *tmp;
2669 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2670 struct hci_request req;
2671 int err, found;
2673 BT_DBG("request for %s", hdev->name);
2675 hci_dev_lock(hdev);
2677 if (pending_eir_or_class(hdev)) {
2678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2679 MGMT_STATUS_BUSY);
2680 goto unlock;
2683 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2684 hci_uuids_clear(hdev);
2686 if (enable_service_cache(hdev)) {
2687 err = mgmt_cmd_complete(sk, hdev->id,
2688 MGMT_OP_REMOVE_UUID,
2689 0, hdev->dev_class, 3);
2690 goto unlock;
2693 goto update_class;
2696 found = 0;
2698 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2699 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2700 continue;
2702 list_del(&match->list);
2703 kfree(match);
2704 found++;
2707 if (found == 0) {
2708 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2709 MGMT_STATUS_INVALID_PARAMS);
2710 goto unlock;
2713 update_class:
2714 hci_req_init(&req, hdev);
2716 update_class(&req);
2717 update_eir(&req);
2719 err = hci_req_run(&req, remove_uuid_complete);
2720 if (err < 0) {
2721 if (err != -ENODATA)
2722 goto unlock;
2724 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2725 hdev->dev_class, 3);
2726 goto unlock;
2729 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2730 if (!cmd) {
2731 err = -ENOMEM;
2732 goto unlock;
2735 err = 0;
2737 unlock:
2738 hci_dev_unlock(hdev);
2739 return err;
2742 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2744 BT_DBG("status 0x%02x", status);
2746 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2749 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2750 u16 len)
2752 struct mgmt_cp_set_dev_class *cp = data;
2753 struct mgmt_pending_cmd *cmd;
2754 struct hci_request req;
2755 int err;
2757 BT_DBG("request for %s", hdev->name);
2759 if (!lmp_bredr_capable(hdev))
2760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 MGMT_STATUS_NOT_SUPPORTED);
2763 hci_dev_lock(hdev);
2765 if (pending_eir_or_class(hdev)) {
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2767 MGMT_STATUS_BUSY);
2768 goto unlock;
2771 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2773 MGMT_STATUS_INVALID_PARAMS);
2774 goto unlock;
2777 hdev->major_class = cp->major;
2778 hdev->minor_class = cp->minor;
2780 if (!hdev_is_powered(hdev)) {
2781 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2782 hdev->dev_class, 3);
2783 goto unlock;
2786 hci_req_init(&req, hdev);
2788 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2789 hci_dev_unlock(hdev);
2790 cancel_delayed_work_sync(&hdev->service_cache);
2791 hci_dev_lock(hdev);
2792 update_eir(&req);
2795 update_class(&req);
2797 err = hci_req_run(&req, set_class_complete);
2798 if (err < 0) {
2799 if (err != -ENODATA)
2800 goto unlock;
2802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2803 hdev->dev_class, 3);
2804 goto unlock;
2807 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2808 if (!cmd) {
2809 err = -ENOMEM;
2810 goto unlock;
2813 err = 0;
2815 unlock:
2816 hci_dev_unlock(hdev);
2817 return err;
2820 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2821 u16 len)
2823 struct mgmt_cp_load_link_keys *cp = data;
2824 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2825 sizeof(struct mgmt_link_key_info));
2826 u16 key_count, expected_len;
2827 bool changed;
2828 int i;
2830 BT_DBG("request for %s", hdev->name);
2832 if (!lmp_bredr_capable(hdev))
2833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 MGMT_STATUS_NOT_SUPPORTED);
2836 key_count = __le16_to_cpu(cp->key_count);
2837 if (key_count > max_key_count) {
2838 BT_ERR("load_link_keys: too big key_count value %u",
2839 key_count);
2840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2841 MGMT_STATUS_INVALID_PARAMS);
2844 expected_len = sizeof(*cp) + key_count *
2845 sizeof(struct mgmt_link_key_info);
2846 if (expected_len != len) {
2847 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2848 expected_len, len);
2849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2850 MGMT_STATUS_INVALID_PARAMS);
2853 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2855 MGMT_STATUS_INVALID_PARAMS);
2857 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2858 key_count);
2860 for (i = 0; i < key_count; i++) {
2861 struct mgmt_link_key_info *key = &cp->keys[i];
2863 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2864 return mgmt_cmd_status(sk, hdev->id,
2865 MGMT_OP_LOAD_LINK_KEYS,
2866 MGMT_STATUS_INVALID_PARAMS);
2869 hci_dev_lock(hdev);
2871 hci_link_keys_clear(hdev);
2873 if (cp->debug_keys)
2874 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2875 else
2876 changed = hci_dev_test_and_clear_flag(hdev,
2877 HCI_KEEP_DEBUG_KEYS);
2879 if (changed)
2880 new_settings(hdev, NULL);
2882 for (i = 0; i < key_count; i++) {
2883 struct mgmt_link_key_info *key = &cp->keys[i];
2885 /* Always ignore debug keys and require a new pairing if
2886 * the user wants to use them.
2888 if (key->type == HCI_LK_DEBUG_COMBINATION)
2889 continue;
2891 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2892 key->type, key->pin_len, NULL);
2895 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2897 hci_dev_unlock(hdev);
2899 return 0;
2902 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2903 u8 addr_type, struct sock *skip_sk)
2905 struct mgmt_ev_device_unpaired ev;
2907 bacpy(&ev.addr.bdaddr, bdaddr);
2908 ev.addr.type = addr_type;
2910 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2911 skip_sk);
2914 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2915 u16 len)
2917 struct mgmt_cp_unpair_device *cp = data;
2918 struct mgmt_rp_unpair_device rp;
2919 struct hci_cp_disconnect dc;
2920 struct mgmt_pending_cmd *cmd;
2921 struct hci_conn *conn;
2922 int err;
2924 memset(&rp, 0, sizeof(rp));
2925 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2926 rp.addr.type = cp->addr.type;
2928 if (!bdaddr_type_is_valid(cp->addr.type))
2929 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2930 MGMT_STATUS_INVALID_PARAMS,
2931 &rp, sizeof(rp));
2933 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2934 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2935 MGMT_STATUS_INVALID_PARAMS,
2936 &rp, sizeof(rp));
2938 hci_dev_lock(hdev);
2940 if (!hdev_is_powered(hdev)) {
2941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2942 MGMT_STATUS_NOT_POWERED, &rp,
2943 sizeof(rp));
2944 goto unlock;
2947 if (cp->addr.type == BDADDR_BREDR) {
2948 /* If disconnection is requested, then look up the
2949 * connection. If the remote device is connected, it
2950 * will be later used to terminate the link.
2952 * Setting it to NULL explicitly will cause no
2953 * termination of the link.
2955 if (cp->disconnect)
2956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2957 &cp->addr.bdaddr);
2958 else
2959 conn = NULL;
2961 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2962 } else {
2963 u8 addr_type;
2965 if (cp->addr.type == BDADDR_LE_PUBLIC)
2966 addr_type = ADDR_LE_DEV_PUBLIC;
2967 else
2968 addr_type = ADDR_LE_DEV_RANDOM;
2970 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2971 &cp->addr.bdaddr);
2972 if (conn) {
2973 /* Defer clearing up the connection parameters
2974 * until closing to give a chance of keeping
2975 * them if a repairing happens.
2977 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2979 /* If disconnection is not requested, then
2980 * clear the connection variable so that the
2981 * link is not terminated.
2983 if (!cp->disconnect)
2984 conn = NULL;
2985 } else {
2986 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2989 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2991 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2994 if (err < 0) {
2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2996 MGMT_STATUS_NOT_PAIRED, &rp,
2997 sizeof(rp));
2998 goto unlock;
3001 /* If the connection variable is set, then termination of the
3002 * link is requested.
3004 if (!conn) {
3005 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3006 &rp, sizeof(rp));
3007 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3008 goto unlock;
3011 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3012 sizeof(*cp));
3013 if (!cmd) {
3014 err = -ENOMEM;
3015 goto unlock;
3018 cmd->cmd_complete = addr_cmd_complete;
3020 dc.handle = cpu_to_le16(conn->handle);
3021 dc.reason = 0x13; /* Remote User Terminated Connection */
3022 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3023 if (err < 0)
3024 mgmt_pending_remove(cmd);
3026 unlock:
3027 hci_dev_unlock(hdev);
3028 return err;
3031 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3032 u16 len)
3034 struct mgmt_cp_disconnect *cp = data;
3035 struct mgmt_rp_disconnect rp;
3036 struct mgmt_pending_cmd *cmd;
3037 struct hci_conn *conn;
3038 int err;
3040 BT_DBG("");
3042 memset(&rp, 0, sizeof(rp));
3043 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3044 rp.addr.type = cp->addr.type;
3046 if (!bdaddr_type_is_valid(cp->addr.type))
3047 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3048 MGMT_STATUS_INVALID_PARAMS,
3049 &rp, sizeof(rp));
3051 hci_dev_lock(hdev);
3053 if (!test_bit(HCI_UP, &hdev->flags)) {
3054 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3055 MGMT_STATUS_NOT_POWERED, &rp,
3056 sizeof(rp));
3057 goto failed;
3060 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3062 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3063 goto failed;
3066 if (cp->addr.type == BDADDR_BREDR)
3067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3068 &cp->addr.bdaddr);
3069 else
3070 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3072 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3074 MGMT_STATUS_NOT_CONNECTED, &rp,
3075 sizeof(rp));
3076 goto failed;
3079 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3080 if (!cmd) {
3081 err = -ENOMEM;
3082 goto failed;
3085 cmd->cmd_complete = generic_cmd_complete;
3087 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3088 if (err < 0)
3089 mgmt_pending_remove(cmd);
3091 failed:
3092 hci_dev_unlock(hdev);
3093 return err;
3096 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3098 switch (link_type) {
3099 case LE_LINK:
3100 switch (addr_type) {
3101 case ADDR_LE_DEV_PUBLIC:
3102 return BDADDR_LE_PUBLIC;
3104 default:
3105 /* Fallback to LE Random address type */
3106 return BDADDR_LE_RANDOM;
3109 default:
3110 /* Fallback to BR/EDR type */
3111 return BDADDR_BREDR;
3115 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3116 u16 data_len)
3118 struct mgmt_rp_get_connections *rp;
3119 struct hci_conn *c;
3120 size_t rp_len;
3121 int err;
3122 u16 i;
3124 BT_DBG("");
3126 hci_dev_lock(hdev);
3128 if (!hdev_is_powered(hdev)) {
3129 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3130 MGMT_STATUS_NOT_POWERED);
3131 goto unlock;
3134 i = 0;
3135 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3136 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3137 i++;
3140 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3141 rp = kmalloc(rp_len, GFP_KERNEL);
3142 if (!rp) {
3143 err = -ENOMEM;
3144 goto unlock;
3147 i = 0;
3148 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3149 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3150 continue;
3151 bacpy(&rp->addr[i].bdaddr, &c->dst);
3152 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3153 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3154 continue;
3155 i++;
3158 rp->conn_count = cpu_to_le16(i);
3160 /* Recalculate length in case of filtered SCO connections, etc */
3161 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3164 rp_len);
3166 kfree(rp);
3168 unlock:
3169 hci_dev_unlock(hdev);
3170 return err;
3173 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3174 struct mgmt_cp_pin_code_neg_reply *cp)
3176 struct mgmt_pending_cmd *cmd;
3177 int err;
3179 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3180 sizeof(*cp));
3181 if (!cmd)
3182 return -ENOMEM;
3184 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3185 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3186 if (err < 0)
3187 mgmt_pending_remove(cmd);
3189 return err;
3192 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3193 u16 len)
3195 struct hci_conn *conn;
3196 struct mgmt_cp_pin_code_reply *cp = data;
3197 struct hci_cp_pin_code_reply reply;
3198 struct mgmt_pending_cmd *cmd;
3199 int err;
3201 BT_DBG("");
3203 hci_dev_lock(hdev);
3205 if (!hdev_is_powered(hdev)) {
3206 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3207 MGMT_STATUS_NOT_POWERED);
3208 goto failed;
3211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3212 if (!conn) {
3213 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3214 MGMT_STATUS_NOT_CONNECTED);
3215 goto failed;
3218 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3219 struct mgmt_cp_pin_code_neg_reply ncp;
3221 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3223 BT_ERR("PIN code is not 16 bytes long");
3225 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3226 if (err >= 0)
3227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3228 MGMT_STATUS_INVALID_PARAMS);
3230 goto failed;
3233 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3234 if (!cmd) {
3235 err = -ENOMEM;
3236 goto failed;
3239 cmd->cmd_complete = addr_cmd_complete;
3241 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3242 reply.pin_len = cp->pin_len;
3243 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3245 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3246 if (err < 0)
3247 mgmt_pending_remove(cmd);
3249 failed:
3250 hci_dev_unlock(hdev);
3251 return err;
3254 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3255 u16 len)
3257 struct mgmt_cp_set_io_capability *cp = data;
3259 BT_DBG("");
3261 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3262 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3263 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3265 hci_dev_lock(hdev);
3267 hdev->io_capability = cp->io_capability;
3269 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3270 hdev->io_capability);
3272 hci_dev_unlock(hdev);
3274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3275 NULL, 0);
3278 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3280 struct hci_dev *hdev = conn->hdev;
3281 struct mgmt_pending_cmd *cmd;
3283 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3284 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3285 continue;
3287 if (cmd->user_data != conn)
3288 continue;
3290 return cmd;
3293 return NULL;
3296 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3298 struct mgmt_rp_pair_device rp;
3299 struct hci_conn *conn = cmd->user_data;
3300 int err;
3302 bacpy(&rp.addr.bdaddr, &conn->dst);
3303 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3305 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3306 status, &rp, sizeof(rp));
3308 /* So we don't get further callbacks for this connection */
3309 conn->connect_cfm_cb = NULL;
3310 conn->security_cfm_cb = NULL;
3311 conn->disconn_cfm_cb = NULL;
3313 hci_conn_drop(conn);
3315 /* The device is paired so there is no need to remove
3316 * its connection parameters anymore.
3318 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3320 hci_conn_put(conn);
3322 return err;
3325 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3327 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3328 struct mgmt_pending_cmd *cmd;
3330 cmd = find_pairing(conn);
3331 if (cmd) {
3332 cmd->cmd_complete(cmd, status);
3333 mgmt_pending_remove(cmd);
3337 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3339 struct mgmt_pending_cmd *cmd;
3341 BT_DBG("status %u", status);
3343 cmd = find_pairing(conn);
3344 if (!cmd) {
3345 BT_DBG("Unable to find a pending command");
3346 return;
3349 cmd->cmd_complete(cmd, mgmt_status(status));
3350 mgmt_pending_remove(cmd);
3353 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3355 struct mgmt_pending_cmd *cmd;
3357 BT_DBG("status %u", status);
3359 if (!status)
3360 return;
3362 cmd = find_pairing(conn);
3363 if (!cmd) {
3364 BT_DBG("Unable to find a pending command");
3365 return;
3368 cmd->cmd_complete(cmd, mgmt_status(status));
3369 mgmt_pending_remove(cmd);
3372 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3373 u16 len)
3375 struct mgmt_cp_pair_device *cp = data;
3376 struct mgmt_rp_pair_device rp;
3377 struct mgmt_pending_cmd *cmd;
3378 u8 sec_level, auth_type;
3379 struct hci_conn *conn;
3380 int err;
3382 BT_DBG("");
3384 memset(&rp, 0, sizeof(rp));
3385 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3386 rp.addr.type = cp->addr.type;
3388 if (!bdaddr_type_is_valid(cp->addr.type))
3389 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3390 MGMT_STATUS_INVALID_PARAMS,
3391 &rp, sizeof(rp));
3393 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3394 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3395 MGMT_STATUS_INVALID_PARAMS,
3396 &rp, sizeof(rp));
3398 hci_dev_lock(hdev);
3400 if (!hdev_is_powered(hdev)) {
3401 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3402 MGMT_STATUS_NOT_POWERED, &rp,
3403 sizeof(rp));
3404 goto unlock;
3407 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3409 MGMT_STATUS_ALREADY_PAIRED, &rp,
3410 sizeof(rp));
3411 goto unlock;
3414 sec_level = BT_SECURITY_MEDIUM;
3415 auth_type = HCI_AT_DEDICATED_BONDING;
3417 if (cp->addr.type == BDADDR_BREDR) {
3418 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3419 auth_type);
3420 } else {
3421 u8 addr_type;
3423 /* Convert from L2CAP channel address type to HCI address type
3425 if (cp->addr.type == BDADDR_LE_PUBLIC)
3426 addr_type = ADDR_LE_DEV_PUBLIC;
3427 else
3428 addr_type = ADDR_LE_DEV_RANDOM;
3430 /* When pairing a new device, it is expected to remember
3431 * this device for future connections. Adding the connection
3432 * parameter information ahead of time allows tracking
3433 * of the slave preferred values and will speed up any
3434 * further connection establishment.
3436 * If connection parameters already exist, then they
3437 * will be kept and this function does nothing.
3439 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3441 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3442 sec_level, HCI_LE_CONN_TIMEOUT,
3443 HCI_ROLE_MASTER);
3446 if (IS_ERR(conn)) {
3447 int status;
3449 if (PTR_ERR(conn) == -EBUSY)
3450 status = MGMT_STATUS_BUSY;
3451 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3452 status = MGMT_STATUS_NOT_SUPPORTED;
3453 else if (PTR_ERR(conn) == -ECONNREFUSED)
3454 status = MGMT_STATUS_REJECTED;
3455 else
3456 status = MGMT_STATUS_CONNECT_FAILED;
3458 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3459 status, &rp, sizeof(rp));
3460 goto unlock;
3463 if (conn->connect_cfm_cb) {
3464 hci_conn_drop(conn);
3465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3466 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3467 goto unlock;
3470 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3471 if (!cmd) {
3472 err = -ENOMEM;
3473 hci_conn_drop(conn);
3474 goto unlock;
3477 cmd->cmd_complete = pairing_complete;
3479 /* For LE, just connecting isn't a proof that the pairing finished */
3480 if (cp->addr.type == BDADDR_BREDR) {
3481 conn->connect_cfm_cb = pairing_complete_cb;
3482 conn->security_cfm_cb = pairing_complete_cb;
3483 conn->disconn_cfm_cb = pairing_complete_cb;
3484 } else {
3485 conn->connect_cfm_cb = le_pairing_complete_cb;
3486 conn->security_cfm_cb = le_pairing_complete_cb;
3487 conn->disconn_cfm_cb = le_pairing_complete_cb;
3490 conn->io_capability = cp->io_cap;
3491 cmd->user_data = hci_conn_get(conn);
3493 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3494 hci_conn_security(conn, sec_level, auth_type, true)) {
3495 cmd->cmd_complete(cmd, 0);
3496 mgmt_pending_remove(cmd);
3499 err = 0;
3501 unlock:
3502 hci_dev_unlock(hdev);
3503 return err;
3506 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3507 u16 len)
3509 struct mgmt_addr_info *addr = data;
3510 struct mgmt_pending_cmd *cmd;
3511 struct hci_conn *conn;
3512 int err;
3514 BT_DBG("");
3516 hci_dev_lock(hdev);
3518 if (!hdev_is_powered(hdev)) {
3519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3520 MGMT_STATUS_NOT_POWERED);
3521 goto unlock;
3524 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3525 if (!cmd) {
3526 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3527 MGMT_STATUS_INVALID_PARAMS);
3528 goto unlock;
3531 conn = cmd->user_data;
3533 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3534 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3535 MGMT_STATUS_INVALID_PARAMS);
3536 goto unlock;
3539 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3540 mgmt_pending_remove(cmd);
3542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3543 addr, sizeof(*addr));
3544 unlock:
3545 hci_dev_unlock(hdev);
3546 return err;
3549 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3550 struct mgmt_addr_info *addr, u16 mgmt_op,
3551 u16 hci_op, __le32 passkey)
3553 struct mgmt_pending_cmd *cmd;
3554 struct hci_conn *conn;
3555 int err;
3557 hci_dev_lock(hdev);
3559 if (!hdev_is_powered(hdev)) {
3560 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3561 MGMT_STATUS_NOT_POWERED, addr,
3562 sizeof(*addr));
3563 goto done;
3566 if (addr->type == BDADDR_BREDR)
3567 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3568 else
3569 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3571 if (!conn) {
3572 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3573 MGMT_STATUS_NOT_CONNECTED, addr,
3574 sizeof(*addr));
3575 goto done;
3578 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3579 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3580 if (!err)
3581 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3582 MGMT_STATUS_SUCCESS, addr,
3583 sizeof(*addr));
3584 else
3585 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3586 MGMT_STATUS_FAILED, addr,
3587 sizeof(*addr));
3589 goto done;
3592 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3593 if (!cmd) {
3594 err = -ENOMEM;
3595 goto done;
3598 cmd->cmd_complete = addr_cmd_complete;
3600 /* Continue with pairing via HCI */
3601 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3602 struct hci_cp_user_passkey_reply cp;
3604 bacpy(&cp.bdaddr, &addr->bdaddr);
3605 cp.passkey = passkey;
3606 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3607 } else
3608 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3609 &addr->bdaddr);
3611 if (err < 0)
3612 mgmt_pending_remove(cmd);
3614 done:
3615 hci_dev_unlock(hdev);
3616 return err;
3619 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3620 void *data, u16 len)
3622 struct mgmt_cp_pin_code_neg_reply *cp = data;
3624 BT_DBG("");
3626 return user_pairing_resp(sk, hdev, &cp->addr,
3627 MGMT_OP_PIN_CODE_NEG_REPLY,
3628 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3631 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3632 u16 len)
3634 struct mgmt_cp_user_confirm_reply *cp = data;
3636 BT_DBG("");
3638 if (len != sizeof(*cp))
3639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3640 MGMT_STATUS_INVALID_PARAMS);
3642 return user_pairing_resp(sk, hdev, &cp->addr,
3643 MGMT_OP_USER_CONFIRM_REPLY,
3644 HCI_OP_USER_CONFIRM_REPLY, 0);
3647 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3648 void *data, u16 len)
3650 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3652 BT_DBG("");
3654 return user_pairing_resp(sk, hdev, &cp->addr,
3655 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3656 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3659 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3660 u16 len)
3662 struct mgmt_cp_user_passkey_reply *cp = data;
3664 BT_DBG("");
3666 return user_pairing_resp(sk, hdev, &cp->addr,
3667 MGMT_OP_USER_PASSKEY_REPLY,
3668 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3671 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3672 void *data, u16 len)
3674 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3676 BT_DBG("");
3678 return user_pairing_resp(sk, hdev, &cp->addr,
3679 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3680 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3683 static void update_name(struct hci_request *req)
3685 struct hci_dev *hdev = req->hdev;
3686 struct hci_cp_write_local_name cp;
3688 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3690 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3693 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3695 struct mgmt_cp_set_local_name *cp;
3696 struct mgmt_pending_cmd *cmd;
3698 BT_DBG("status 0x%02x", status);
3700 hci_dev_lock(hdev);
3702 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3703 if (!cmd)
3704 goto unlock;
3706 cp = cmd->param;
3708 if (status)
3709 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3710 mgmt_status(status));
3711 else
3712 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3713 cp, sizeof(*cp));
3715 mgmt_pending_remove(cmd);
3717 unlock:
3718 hci_dev_unlock(hdev);
3721 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3722 u16 len)
3724 struct mgmt_cp_set_local_name *cp = data;
3725 struct mgmt_pending_cmd *cmd;
3726 struct hci_request req;
3727 int err;
3729 BT_DBG("");
3731 hci_dev_lock(hdev);
3733 /* If the old values are the same as the new ones just return a
3734 * direct command complete event.
3736 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3737 !memcmp(hdev->short_name, cp->short_name,
3738 sizeof(hdev->short_name))) {
3739 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3740 data, len);
3741 goto failed;
3744 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3746 if (!hdev_is_powered(hdev)) {
3747 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3750 data, len);
3751 if (err < 0)
3752 goto failed;
3754 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3755 data, len, sk);
3757 goto failed;
3760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3761 if (!cmd) {
3762 err = -ENOMEM;
3763 goto failed;
3766 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3768 hci_req_init(&req, hdev);
3770 if (lmp_bredr_capable(hdev)) {
3771 update_name(&req);
3772 update_eir(&req);
3775 /* The name is stored in the scan response data and so
3776 * no need to udpate the advertising data here.
3778 if (lmp_le_capable(hdev))
3779 update_scan_rsp_data(&req);
3781 err = hci_req_run(&req, set_name_complete);
3782 if (err < 0)
3783 mgmt_pending_remove(cmd);
3785 failed:
3786 hci_dev_unlock(hdev);
3787 return err;
3790 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3791 u16 opcode, struct sk_buff *skb)
3793 struct mgmt_rp_read_local_oob_data mgmt_rp;
3794 size_t rp_size = sizeof(mgmt_rp);
3795 struct mgmt_pending_cmd *cmd;
3797 BT_DBG("%s status %u", hdev->name, status);
3799 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3800 if (!cmd)
3801 return;
3803 if (status || !skb) {
3804 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3805 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3806 goto remove;
3809 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3811 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3812 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3814 if (skb->len < sizeof(*rp)) {
3815 mgmt_cmd_status(cmd->sk, hdev->id,
3816 MGMT_OP_READ_LOCAL_OOB_DATA,
3817 MGMT_STATUS_FAILED);
3818 goto remove;
3821 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3822 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3824 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3825 } else {
3826 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3828 if (skb->len < sizeof(*rp)) {
3829 mgmt_cmd_status(cmd->sk, hdev->id,
3830 MGMT_OP_READ_LOCAL_OOB_DATA,
3831 MGMT_STATUS_FAILED);
3832 goto remove;
3835 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3836 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3838 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3839 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3842 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3843 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3845 remove:
3846 mgmt_pending_remove(cmd);
3849 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3850 void *data, u16 data_len)
3852 struct mgmt_pending_cmd *cmd;
3853 struct hci_request req;
3854 int err;
3856 BT_DBG("%s", hdev->name);
3858 hci_dev_lock(hdev);
3860 if (!hdev_is_powered(hdev)) {
3861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3862 MGMT_STATUS_NOT_POWERED);
3863 goto unlock;
3866 if (!lmp_ssp_capable(hdev)) {
3867 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3868 MGMT_STATUS_NOT_SUPPORTED);
3869 goto unlock;
3872 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3873 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3874 MGMT_STATUS_BUSY);
3875 goto unlock;
3878 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3879 if (!cmd) {
3880 err = -ENOMEM;
3881 goto unlock;
3884 hci_req_init(&req, hdev);
3886 if (bredr_sc_enabled(hdev))
3887 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3888 else
3889 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3891 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3892 if (err < 0)
3893 mgmt_pending_remove(cmd);
3895 unlock:
3896 hci_dev_unlock(hdev);
3897 return err;
3900 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3901 void *data, u16 len)
3903 struct mgmt_addr_info *addr = data;
3904 int err;
3906 BT_DBG("%s ", hdev->name);
3908 if (!bdaddr_type_is_valid(addr->type))
3909 return mgmt_cmd_complete(sk, hdev->id,
3910 MGMT_OP_ADD_REMOTE_OOB_DATA,
3911 MGMT_STATUS_INVALID_PARAMS,
3912 addr, sizeof(*addr));
3914 hci_dev_lock(hdev);
3916 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3917 struct mgmt_cp_add_remote_oob_data *cp = data;
3918 u8 status;
3920 if (cp->addr.type != BDADDR_BREDR) {
3921 err = mgmt_cmd_complete(sk, hdev->id,
3922 MGMT_OP_ADD_REMOTE_OOB_DATA,
3923 MGMT_STATUS_INVALID_PARAMS,
3924 &cp->addr, sizeof(cp->addr));
3925 goto unlock;
3928 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3929 cp->addr.type, cp->hash,
3930 cp->rand, NULL, NULL);
3931 if (err < 0)
3932 status = MGMT_STATUS_FAILED;
3933 else
3934 status = MGMT_STATUS_SUCCESS;
3936 err = mgmt_cmd_complete(sk, hdev->id,
3937 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3938 &cp->addr, sizeof(cp->addr));
3939 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3940 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3941 u8 *rand192, *hash192, *rand256, *hash256;
3942 u8 status;
3944 if (bdaddr_type_is_le(cp->addr.type)) {
3945 /* Enforce zero-valued 192-bit parameters as
3946 * long as legacy SMP OOB isn't implemented.
3948 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3949 memcmp(cp->hash192, ZERO_KEY, 16)) {
3950 err = mgmt_cmd_complete(sk, hdev->id,
3951 MGMT_OP_ADD_REMOTE_OOB_DATA,
3952 MGMT_STATUS_INVALID_PARAMS,
3953 addr, sizeof(*addr));
3954 goto unlock;
3957 rand192 = NULL;
3958 hash192 = NULL;
3959 } else {
3960 /* In case one of the P-192 values is set to zero,
3961 * then just disable OOB data for P-192.
3963 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3964 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3965 rand192 = NULL;
3966 hash192 = NULL;
3967 } else {
3968 rand192 = cp->rand192;
3969 hash192 = cp->hash192;
3973 /* In case one of the P-256 values is set to zero, then just
3974 * disable OOB data for P-256.
3976 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3977 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3978 rand256 = NULL;
3979 hash256 = NULL;
3980 } else {
3981 rand256 = cp->rand256;
3982 hash256 = cp->hash256;
3985 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3986 cp->addr.type, hash192, rand192,
3987 hash256, rand256);
3988 if (err < 0)
3989 status = MGMT_STATUS_FAILED;
3990 else
3991 status = MGMT_STATUS_SUCCESS;
3993 err = mgmt_cmd_complete(sk, hdev->id,
3994 MGMT_OP_ADD_REMOTE_OOB_DATA,
3995 status, &cp->addr, sizeof(cp->addr));
3996 } else {
3997 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3998 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3999 MGMT_STATUS_INVALID_PARAMS);
4002 unlock:
4003 hci_dev_unlock(hdev);
4004 return err;
4007 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4008 void *data, u16 len)
4010 struct mgmt_cp_remove_remote_oob_data *cp = data;
4011 u8 status;
4012 int err;
4014 BT_DBG("%s", hdev->name);
4016 if (cp->addr.type != BDADDR_BREDR)
4017 return mgmt_cmd_complete(sk, hdev->id,
4018 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4019 MGMT_STATUS_INVALID_PARAMS,
4020 &cp->addr, sizeof(cp->addr));
4022 hci_dev_lock(hdev);
4024 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4025 hci_remote_oob_data_clear(hdev);
4026 status = MGMT_STATUS_SUCCESS;
4027 goto done;
4030 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4031 if (err < 0)
4032 status = MGMT_STATUS_INVALID_PARAMS;
4033 else
4034 status = MGMT_STATUS_SUCCESS;
4036 done:
4037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4038 status, &cp->addr, sizeof(cp->addr));
4040 hci_dev_unlock(hdev);
4041 return err;
4044 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4046 struct hci_dev *hdev = req->hdev;
4047 struct hci_cp_inquiry cp;
4048 /* General inquiry access code (GIAC) */
4049 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4051 *status = mgmt_bredr_support(hdev);
4052 if (*status)
4053 return false;
4055 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4056 *status = MGMT_STATUS_BUSY;
4057 return false;
4060 hci_inquiry_cache_flush(hdev);
4062 memset(&cp, 0, sizeof(cp));
4063 memcpy(&cp.lap, lap, sizeof(cp.lap));
4064 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4066 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4068 return true;
4071 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4073 struct hci_dev *hdev = req->hdev;
4074 struct hci_cp_le_set_scan_param param_cp;
4075 struct hci_cp_le_set_scan_enable enable_cp;
4076 u8 own_addr_type;
4077 int err;
4079 *status = mgmt_le_support(hdev);
4080 if (*status)
4081 return false;
4083 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4084 /* Don't let discovery abort an outgoing connection attempt
4085 * that's using directed advertising.
4087 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4088 *status = MGMT_STATUS_REJECTED;
4089 return false;
4092 disable_advertising(req);
4095 /* If controller is scanning, it means the background scanning is
4096 * running. Thus, we should temporarily stop it in order to set the
4097 * discovery scanning parameters.
4099 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4100 hci_req_add_le_scan_disable(req);
4102 /* All active scans will be done with either a resolvable private
4103 * address (when privacy feature has been enabled) or non-resolvable
4104 * private address.
4106 err = hci_update_random_address(req, true, &own_addr_type);
4107 if (err < 0) {
4108 *status = MGMT_STATUS_FAILED;
4109 return false;
4112 memset(&param_cp, 0, sizeof(param_cp));
4113 param_cp.type = LE_SCAN_ACTIVE;
4114 param_cp.interval = cpu_to_le16(interval);
4115 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4116 param_cp.own_address_type = own_addr_type;
4118 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4119 &param_cp);
4121 memset(&enable_cp, 0, sizeof(enable_cp));
4122 enable_cp.enable = LE_SCAN_ENABLE;
4123 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4125 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4126 &enable_cp);
4128 return true;
4131 static bool trigger_discovery(struct hci_request *req, u8 *status)
4133 struct hci_dev *hdev = req->hdev;
4135 switch (hdev->discovery.type) {
4136 case DISCOV_TYPE_BREDR:
4137 if (!trigger_bredr_inquiry(req, status))
4138 return false;
4139 break;
4141 case DISCOV_TYPE_INTERLEAVED:
4142 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4143 &hdev->quirks)) {
4144 /* During simultaneous discovery, we double LE scan
4145 * interval. We must leave some time for the controller
4146 * to do BR/EDR inquiry.
4148 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4149 status))
4150 return false;
4152 if (!trigger_bredr_inquiry(req, status))
4153 return false;
4155 return true;
4158 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4159 *status = MGMT_STATUS_NOT_SUPPORTED;
4160 return false;
4162 /* fall through */
4164 case DISCOV_TYPE_LE:
4165 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4166 return false;
4167 break;
4169 default:
4170 *status = MGMT_STATUS_INVALID_PARAMS;
4171 return false;
4174 return true;
4177 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4178 u16 opcode)
4180 struct mgmt_pending_cmd *cmd;
4181 unsigned long timeout;
4183 BT_DBG("status %d", status);
4185 hci_dev_lock(hdev);
4187 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4188 if (!cmd)
4189 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4191 if (cmd) {
4192 cmd->cmd_complete(cmd, mgmt_status(status));
4193 mgmt_pending_remove(cmd);
4196 if (status) {
4197 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4198 goto unlock;
4201 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4203 /* If the scan involves LE scan, pick proper timeout to schedule
4204 * hdev->le_scan_disable that will stop it.
4206 switch (hdev->discovery.type) {
4207 case DISCOV_TYPE_LE:
4208 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4209 break;
4210 case DISCOV_TYPE_INTERLEAVED:
4211 /* When running simultaneous discovery, the LE scanning time
4212 * should occupy the whole discovery time sine BR/EDR inquiry
4213 * and LE scanning are scheduled by the controller.
4215 * For interleaving discovery in comparison, BR/EDR inquiry
4216 * and LE scanning are done sequentially with separate
4217 * timeouts.
4219 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4220 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4221 else
4222 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4223 break;
4224 case DISCOV_TYPE_BREDR:
4225 timeout = 0;
4226 break;
4227 default:
4228 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4229 timeout = 0;
4230 break;
4233 if (timeout) {
4234 /* When service discovery is used and the controller has
4235 * a strict duplicate filter, it is important to remember
4236 * the start and duration of the scan. This is required
4237 * for restarting scanning during the discovery phase.
4239 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4240 &hdev->quirks) &&
4241 hdev->discovery.result_filtering) {
4242 hdev->discovery.scan_start = jiffies;
4243 hdev->discovery.scan_duration = timeout;
4246 queue_delayed_work(hdev->workqueue,
4247 &hdev->le_scan_disable, timeout);
4250 unlock:
4251 hci_dev_unlock(hdev);
4254 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4255 void *data, u16 len)
4257 struct mgmt_cp_start_discovery *cp = data;
4258 struct mgmt_pending_cmd *cmd;
4259 struct hci_request req;
4260 u8 status;
4261 int err;
4263 BT_DBG("%s", hdev->name);
4265 hci_dev_lock(hdev);
4267 if (!hdev_is_powered(hdev)) {
4268 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4269 MGMT_STATUS_NOT_POWERED,
4270 &cp->type, sizeof(cp->type));
4271 goto failed;
4274 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4275 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4276 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4277 MGMT_STATUS_BUSY, &cp->type,
4278 sizeof(cp->type));
4279 goto failed;
4282 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4283 if (!cmd) {
4284 err = -ENOMEM;
4285 goto failed;
4288 cmd->cmd_complete = generic_cmd_complete;
4290 /* Clear the discovery filter first to free any previously
4291 * allocated memory for the UUID list.
4293 hci_discovery_filter_clear(hdev);
4295 hdev->discovery.type = cp->type;
4296 hdev->discovery.report_invalid_rssi = false;
4298 hci_req_init(&req, hdev);
4300 if (!trigger_discovery(&req, &status)) {
4301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4302 status, &cp->type, sizeof(cp->type));
4303 mgmt_pending_remove(cmd);
4304 goto failed;
4307 err = hci_req_run(&req, start_discovery_complete);
4308 if (err < 0) {
4309 mgmt_pending_remove(cmd);
4310 goto failed;
4313 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4315 failed:
4316 hci_dev_unlock(hdev);
4317 return err;
4320 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4321 u8 status)
4323 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4324 cmd->param, 1);
4327 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 len)
4330 struct mgmt_cp_start_service_discovery *cp = data;
4331 struct mgmt_pending_cmd *cmd;
4332 struct hci_request req;
4333 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4334 u16 uuid_count, expected_len;
4335 u8 status;
4336 int err;
4338 BT_DBG("%s", hdev->name);
4340 hci_dev_lock(hdev);
4342 if (!hdev_is_powered(hdev)) {
4343 err = mgmt_cmd_complete(sk, hdev->id,
4344 MGMT_OP_START_SERVICE_DISCOVERY,
4345 MGMT_STATUS_NOT_POWERED,
4346 &cp->type, sizeof(cp->type));
4347 goto failed;
4350 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4351 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4352 err = mgmt_cmd_complete(sk, hdev->id,
4353 MGMT_OP_START_SERVICE_DISCOVERY,
4354 MGMT_STATUS_BUSY, &cp->type,
4355 sizeof(cp->type));
4356 goto failed;
4359 uuid_count = __le16_to_cpu(cp->uuid_count);
4360 if (uuid_count > max_uuid_count) {
4361 BT_ERR("service_discovery: too big uuid_count value %u",
4362 uuid_count);
4363 err = mgmt_cmd_complete(sk, hdev->id,
4364 MGMT_OP_START_SERVICE_DISCOVERY,
4365 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4366 sizeof(cp->type));
4367 goto failed;
4370 expected_len = sizeof(*cp) + uuid_count * 16;
4371 if (expected_len != len) {
4372 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4373 expected_len, len);
4374 err = mgmt_cmd_complete(sk, hdev->id,
4375 MGMT_OP_START_SERVICE_DISCOVERY,
4376 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4377 sizeof(cp->type));
4378 goto failed;
4381 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4382 hdev, data, len);
4383 if (!cmd) {
4384 err = -ENOMEM;
4385 goto failed;
4388 cmd->cmd_complete = service_discovery_cmd_complete;
4390 /* Clear the discovery filter first to free any previously
4391 * allocated memory for the UUID list.
4393 hci_discovery_filter_clear(hdev);
4395 hdev->discovery.result_filtering = true;
4396 hdev->discovery.type = cp->type;
4397 hdev->discovery.rssi = cp->rssi;
4398 hdev->discovery.uuid_count = uuid_count;
4400 if (uuid_count > 0) {
4401 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4402 GFP_KERNEL);
4403 if (!hdev->discovery.uuids) {
4404 err = mgmt_cmd_complete(sk, hdev->id,
4405 MGMT_OP_START_SERVICE_DISCOVERY,
4406 MGMT_STATUS_FAILED,
4407 &cp->type, sizeof(cp->type));
4408 mgmt_pending_remove(cmd);
4409 goto failed;
4413 hci_req_init(&req, hdev);
4415 if (!trigger_discovery(&req, &status)) {
4416 err = mgmt_cmd_complete(sk, hdev->id,
4417 MGMT_OP_START_SERVICE_DISCOVERY,
4418 status, &cp->type, sizeof(cp->type));
4419 mgmt_pending_remove(cmd);
4420 goto failed;
4423 err = hci_req_run(&req, start_discovery_complete);
4424 if (err < 0) {
4425 mgmt_pending_remove(cmd);
4426 goto failed;
4429 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4431 failed:
4432 hci_dev_unlock(hdev);
4433 return err;
4436 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4438 struct mgmt_pending_cmd *cmd;
4440 BT_DBG("status %d", status);
4442 hci_dev_lock(hdev);
4444 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4445 if (cmd) {
4446 cmd->cmd_complete(cmd, mgmt_status(status));
4447 mgmt_pending_remove(cmd);
4450 if (!status)
4451 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4453 hci_dev_unlock(hdev);
4456 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4457 u16 len)
4459 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4460 struct mgmt_pending_cmd *cmd;
4461 struct hci_request req;
4462 int err;
4464 BT_DBG("%s", hdev->name);
4466 hci_dev_lock(hdev);
4468 if (!hci_discovery_active(hdev)) {
4469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4470 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4471 sizeof(mgmt_cp->type));
4472 goto unlock;
4475 if (hdev->discovery.type != mgmt_cp->type) {
4476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4477 MGMT_STATUS_INVALID_PARAMS,
4478 &mgmt_cp->type, sizeof(mgmt_cp->type));
4479 goto unlock;
4482 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4483 if (!cmd) {
4484 err = -ENOMEM;
4485 goto unlock;
4488 cmd->cmd_complete = generic_cmd_complete;
4490 hci_req_init(&req, hdev);
4492 hci_stop_discovery(&req);
4494 err = hci_req_run(&req, stop_discovery_complete);
4495 if (!err) {
4496 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4497 goto unlock;
4500 mgmt_pending_remove(cmd);
4502 /* If no HCI commands were sent we're done */
4503 if (err == -ENODATA) {
4504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4505 &mgmt_cp->type, sizeof(mgmt_cp->type));
4506 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4509 unlock:
4510 hci_dev_unlock(hdev);
4511 return err;
4514 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4515 u16 len)
4517 struct mgmt_cp_confirm_name *cp = data;
4518 struct inquiry_entry *e;
4519 int err;
4521 BT_DBG("%s", hdev->name);
4523 hci_dev_lock(hdev);
4525 if (!hci_discovery_active(hdev)) {
4526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4527 MGMT_STATUS_FAILED, &cp->addr,
4528 sizeof(cp->addr));
4529 goto failed;
4532 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4533 if (!e) {
4534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4535 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4536 sizeof(cp->addr));
4537 goto failed;
4540 if (cp->name_known) {
4541 e->name_state = NAME_KNOWN;
4542 list_del(&e->list);
4543 } else {
4544 e->name_state = NAME_NEEDED;
4545 hci_inquiry_cache_update_resolve(hdev, e);
4548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4549 &cp->addr, sizeof(cp->addr));
4551 failed:
4552 hci_dev_unlock(hdev);
4553 return err;
4556 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4557 u16 len)
4559 struct mgmt_cp_block_device *cp = data;
4560 u8 status;
4561 int err;
4563 BT_DBG("%s", hdev->name);
4565 if (!bdaddr_type_is_valid(cp->addr.type))
4566 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4567 MGMT_STATUS_INVALID_PARAMS,
4568 &cp->addr, sizeof(cp->addr));
4570 hci_dev_lock(hdev);
4572 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4573 cp->addr.type);
4574 if (err < 0) {
4575 status = MGMT_STATUS_FAILED;
4576 goto done;
4579 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4580 sk);
4581 status = MGMT_STATUS_SUCCESS;
4583 done:
4584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4585 &cp->addr, sizeof(cp->addr));
4587 hci_dev_unlock(hdev);
4589 return err;
4592 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4593 u16 len)
4595 struct mgmt_cp_unblock_device *cp = data;
4596 u8 status;
4597 int err;
4599 BT_DBG("%s", hdev->name);
4601 if (!bdaddr_type_is_valid(cp->addr.type))
4602 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4603 MGMT_STATUS_INVALID_PARAMS,
4604 &cp->addr, sizeof(cp->addr));
4606 hci_dev_lock(hdev);
4608 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4609 cp->addr.type);
4610 if (err < 0) {
4611 status = MGMT_STATUS_INVALID_PARAMS;
4612 goto done;
4615 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4616 sk);
4617 status = MGMT_STATUS_SUCCESS;
4619 done:
4620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4621 &cp->addr, sizeof(cp->addr));
4623 hci_dev_unlock(hdev);
4625 return err;
4628 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4629 u16 len)
4631 struct mgmt_cp_set_device_id *cp = data;
4632 struct hci_request req;
4633 int err;
4634 __u16 source;
4636 BT_DBG("%s", hdev->name);
4638 source = __le16_to_cpu(cp->source);
4640 if (source > 0x0002)
4641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4642 MGMT_STATUS_INVALID_PARAMS);
4644 hci_dev_lock(hdev);
4646 hdev->devid_source = source;
4647 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4648 hdev->devid_product = __le16_to_cpu(cp->product);
4649 hdev->devid_version = __le16_to_cpu(cp->version);
4651 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4652 NULL, 0);
4654 hci_req_init(&req, hdev);
4655 update_eir(&req);
4656 hci_req_run(&req, NULL);
4658 hci_dev_unlock(hdev);
4660 return err;
4663 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4664 u16 opcode)
4666 BT_DBG("status %d", status);
4669 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4670 u16 opcode)
4672 struct cmd_lookup match = { NULL, hdev };
4673 struct hci_request req;
4675 hci_dev_lock(hdev);
4677 if (status) {
4678 u8 mgmt_err = mgmt_status(status);
4680 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4681 cmd_status_rsp, &mgmt_err);
4682 goto unlock;
4685 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4686 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4687 else
4688 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4690 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4691 &match);
4693 new_settings(hdev, match.sk);
4695 if (match.sk)
4696 sock_put(match.sk);
4698 /* If "Set Advertising" was just disabled and instance advertising was
4699 * set up earlier, then enable the advertising instance.
4701 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4702 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4703 goto unlock;
4705 hci_req_init(&req, hdev);
4707 update_adv_data(&req);
4708 enable_advertising(&req);
4710 if (hci_req_run(&req, enable_advertising_instance) < 0)
4711 BT_ERR("Failed to re-configure advertising");
4713 unlock:
4714 hci_dev_unlock(hdev);
4717 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4718 u16 len)
4720 struct mgmt_mode *cp = data;
4721 struct mgmt_pending_cmd *cmd;
4722 struct hci_request req;
4723 u8 val, status;
4724 int err;
4726 BT_DBG("request for %s", hdev->name);
4728 status = mgmt_le_support(hdev);
4729 if (status)
4730 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4731 status);
4733 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4735 MGMT_STATUS_INVALID_PARAMS);
4737 hci_dev_lock(hdev);
4739 val = !!cp->val;
4741 /* The following conditions are ones which mean that we should
4742 * not do any HCI communication but directly send a mgmt
4743 * response to user space (after toggling the flag if
4744 * necessary).
4746 if (!hdev_is_powered(hdev) ||
4747 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4748 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4749 hci_conn_num(hdev, LE_LINK) > 0 ||
4750 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4751 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4752 bool changed;
4754 if (cp->val) {
4755 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4756 if (cp->val == 0x02)
4757 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4758 else
4759 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4760 } else {
4761 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4762 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4765 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4766 if (err < 0)
4767 goto unlock;
4769 if (changed)
4770 err = new_settings(hdev, sk);
4772 goto unlock;
4775 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4776 pending_find(MGMT_OP_SET_LE, hdev)) {
4777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4778 MGMT_STATUS_BUSY);
4779 goto unlock;
4782 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4783 if (!cmd) {
4784 err = -ENOMEM;
4785 goto unlock;
4788 hci_req_init(&req, hdev);
4790 if (cp->val == 0x02)
4791 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4792 else
4793 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4795 if (val) {
4796 /* Switch to instance "0" for the Set Advertising setting. */
4797 update_adv_data_for_instance(&req, 0);
4798 update_scan_rsp_data_for_instance(&req, 0);
4799 enable_advertising(&req);
4800 } else {
4801 disable_advertising(&req);
4804 err = hci_req_run(&req, set_advertising_complete);
4805 if (err < 0)
4806 mgmt_pending_remove(cmd);
4808 unlock:
4809 hci_dev_unlock(hdev);
4810 return err;
4813 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4814 void *data, u16 len)
4816 struct mgmt_cp_set_static_address *cp = data;
4817 int err;
4819 BT_DBG("%s", hdev->name);
4821 if (!lmp_le_capable(hdev))
4822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4823 MGMT_STATUS_NOT_SUPPORTED);
4825 if (hdev_is_powered(hdev))
4826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4827 MGMT_STATUS_REJECTED);
4829 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4830 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4831 return mgmt_cmd_status(sk, hdev->id,
4832 MGMT_OP_SET_STATIC_ADDRESS,
4833 MGMT_STATUS_INVALID_PARAMS);
4835 /* Two most significant bits shall be set */
4836 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4837 return mgmt_cmd_status(sk, hdev->id,
4838 MGMT_OP_SET_STATIC_ADDRESS,
4839 MGMT_STATUS_INVALID_PARAMS);
4842 hci_dev_lock(hdev);
4844 bacpy(&hdev->static_addr, &cp->bdaddr);
4846 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4847 if (err < 0)
4848 goto unlock;
4850 err = new_settings(hdev, sk);
4852 unlock:
4853 hci_dev_unlock(hdev);
4854 return err;
4857 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4858 void *data, u16 len)
4860 struct mgmt_cp_set_scan_params *cp = data;
4861 __u16 interval, window;
4862 int err;
4864 BT_DBG("%s", hdev->name);
4866 if (!lmp_le_capable(hdev))
4867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4868 MGMT_STATUS_NOT_SUPPORTED);
4870 interval = __le16_to_cpu(cp->interval);
4872 if (interval < 0x0004 || interval > 0x4000)
4873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4874 MGMT_STATUS_INVALID_PARAMS);
4876 window = __le16_to_cpu(cp->window);
4878 if (window < 0x0004 || window > 0x4000)
4879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4880 MGMT_STATUS_INVALID_PARAMS);
4882 if (window > interval)
4883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4884 MGMT_STATUS_INVALID_PARAMS);
4886 hci_dev_lock(hdev);
4888 hdev->le_scan_interval = interval;
4889 hdev->le_scan_window = window;
4891 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4892 NULL, 0);
4894 /* If background scan is running, restart it so new parameters are
4895 * loaded.
4897 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4898 hdev->discovery.state == DISCOVERY_STOPPED) {
4899 struct hci_request req;
4901 hci_req_init(&req, hdev);
4903 hci_req_add_le_scan_disable(&req);
4904 hci_req_add_le_passive_scan(&req);
4906 hci_req_run(&req, NULL);
4909 hci_dev_unlock(hdev);
4911 return err;
4914 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4915 u16 opcode)
4917 struct mgmt_pending_cmd *cmd;
4919 BT_DBG("status 0x%02x", status);
4921 hci_dev_lock(hdev);
4923 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4924 if (!cmd)
4925 goto unlock;
4927 if (status) {
4928 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4929 mgmt_status(status));
4930 } else {
4931 struct mgmt_mode *cp = cmd->param;
4933 if (cp->val)
4934 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4935 else
4936 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4938 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4939 new_settings(hdev, cmd->sk);
4942 mgmt_pending_remove(cmd);
4944 unlock:
4945 hci_dev_unlock(hdev);
4948 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4949 void *data, u16 len)
4951 struct mgmt_mode *cp = data;
4952 struct mgmt_pending_cmd *cmd;
4953 struct hci_request req;
4954 int err;
4956 BT_DBG("%s", hdev->name);
4958 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4959 hdev->hci_ver < BLUETOOTH_VER_1_2)
4960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4961 MGMT_STATUS_NOT_SUPPORTED);
4963 if (cp->val != 0x00 && cp->val != 0x01)
4964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4965 MGMT_STATUS_INVALID_PARAMS);
4967 hci_dev_lock(hdev);
4969 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4970 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4971 MGMT_STATUS_BUSY);
4972 goto unlock;
4975 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4976 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4977 hdev);
4978 goto unlock;
4981 if (!hdev_is_powered(hdev)) {
4982 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4983 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4984 hdev);
4985 new_settings(hdev, sk);
4986 goto unlock;
4989 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4990 data, len);
4991 if (!cmd) {
4992 err = -ENOMEM;
4993 goto unlock;
4996 hci_req_init(&req, hdev);
4998 write_fast_connectable(&req, cp->val);
5000 err = hci_req_run(&req, fast_connectable_complete);
5001 if (err < 0) {
5002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5003 MGMT_STATUS_FAILED);
5004 mgmt_pending_remove(cmd);
5007 unlock:
5008 hci_dev_unlock(hdev);
5010 return err;
5013 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5015 struct mgmt_pending_cmd *cmd;
5017 BT_DBG("status 0x%02x", status);
5019 hci_dev_lock(hdev);
5021 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5022 if (!cmd)
5023 goto unlock;
5025 if (status) {
5026 u8 mgmt_err = mgmt_status(status);
5028 /* We need to restore the flag if related HCI commands
5029 * failed.
5031 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5033 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5034 } else {
5035 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5036 new_settings(hdev, cmd->sk);
5039 mgmt_pending_remove(cmd);
5041 unlock:
5042 hci_dev_unlock(hdev);
5045 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5047 struct mgmt_mode *cp = data;
5048 struct mgmt_pending_cmd *cmd;
5049 struct hci_request req;
5050 int err;
5052 BT_DBG("request for %s", hdev->name);
5054 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5055 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5056 MGMT_STATUS_NOT_SUPPORTED);
5058 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5060 MGMT_STATUS_REJECTED);
5062 if (cp->val != 0x00 && cp->val != 0x01)
5063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5064 MGMT_STATUS_INVALID_PARAMS);
5066 hci_dev_lock(hdev);
5068 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5069 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5070 goto unlock;
5073 if (!hdev_is_powered(hdev)) {
5074 if (!cp->val) {
5075 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5076 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5077 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5078 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5079 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5082 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5084 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5085 if (err < 0)
5086 goto unlock;
5088 err = new_settings(hdev, sk);
5089 goto unlock;
5092 /* Reject disabling when powered on */
5093 if (!cp->val) {
5094 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5095 MGMT_STATUS_REJECTED);
5096 goto unlock;
5097 } else {
5098 /* When configuring a dual-mode controller to operate
5099 * with LE only and using a static address, then switching
5100 * BR/EDR back on is not allowed.
5102 * Dual-mode controllers shall operate with the public
5103 * address as its identity address for BR/EDR and LE. So
5104 * reject the attempt to create an invalid configuration.
5106 * The same restrictions applies when secure connections
5107 * has been enabled. For BR/EDR this is a controller feature
5108 * while for LE it is a host stack feature. This means that
5109 * switching BR/EDR back on when secure connections has been
5110 * enabled is not a supported transaction.
5112 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5113 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5114 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5115 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5116 MGMT_STATUS_REJECTED);
5117 goto unlock;
5121 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5122 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5123 MGMT_STATUS_BUSY);
5124 goto unlock;
5127 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5128 if (!cmd) {
5129 err = -ENOMEM;
5130 goto unlock;
5133 /* We need to flip the bit already here so that update_adv_data
5134 * generates the correct flags.
5136 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5138 hci_req_init(&req, hdev);
5140 write_fast_connectable(&req, false);
5141 __hci_update_page_scan(&req);
5143 /* Since only the advertising data flags will change, there
5144 * is no need to update the scan response data.
5146 update_adv_data(&req);
5148 err = hci_req_run(&req, set_bredr_complete);
5149 if (err < 0)
5150 mgmt_pending_remove(cmd);
5152 unlock:
5153 hci_dev_unlock(hdev);
5154 return err;
5157 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5159 struct mgmt_pending_cmd *cmd;
5160 struct mgmt_mode *cp;
5162 BT_DBG("%s status %u", hdev->name, status);
5164 hci_dev_lock(hdev);
5166 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5167 if (!cmd)
5168 goto unlock;
5170 if (status) {
5171 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5172 mgmt_status(status));
5173 goto remove;
5176 cp = cmd->param;
5178 switch (cp->val) {
5179 case 0x00:
5180 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5181 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5182 break;
5183 case 0x01:
5184 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5185 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5186 break;
5187 case 0x02:
5188 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5189 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5190 break;
5193 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5194 new_settings(hdev, cmd->sk);
5196 remove:
5197 mgmt_pending_remove(cmd);
5198 unlock:
5199 hci_dev_unlock(hdev);
5202 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5203 void *data, u16 len)
5205 struct mgmt_mode *cp = data;
5206 struct mgmt_pending_cmd *cmd;
5207 struct hci_request req;
5208 u8 val;
5209 int err;
5211 BT_DBG("request for %s", hdev->name);
5213 if (!lmp_sc_capable(hdev) &&
5214 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5216 MGMT_STATUS_NOT_SUPPORTED);
5218 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5219 lmp_sc_capable(hdev) &&
5220 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5222 MGMT_STATUS_REJECTED);
5224 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5226 MGMT_STATUS_INVALID_PARAMS);
5228 hci_dev_lock(hdev);
5230 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5231 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5232 bool changed;
5234 if (cp->val) {
5235 changed = !hci_dev_test_and_set_flag(hdev,
5236 HCI_SC_ENABLED);
5237 if (cp->val == 0x02)
5238 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5239 else
5240 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5241 } else {
5242 changed = hci_dev_test_and_clear_flag(hdev,
5243 HCI_SC_ENABLED);
5244 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5247 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5248 if (err < 0)
5249 goto failed;
5251 if (changed)
5252 err = new_settings(hdev, sk);
5254 goto failed;
5257 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5259 MGMT_STATUS_BUSY);
5260 goto failed;
5263 val = !!cp->val;
5265 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5266 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5267 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5268 goto failed;
5271 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5272 if (!cmd) {
5273 err = -ENOMEM;
5274 goto failed;
5277 hci_req_init(&req, hdev);
5278 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5279 err = hci_req_run(&req, sc_enable_complete);
5280 if (err < 0) {
5281 mgmt_pending_remove(cmd);
5282 goto failed;
5285 failed:
5286 hci_dev_unlock(hdev);
5287 return err;
5290 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5291 void *data, u16 len)
5293 struct mgmt_mode *cp = data;
5294 bool changed, use_changed;
5295 int err;
5297 BT_DBG("request for %s", hdev->name);
5299 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5301 MGMT_STATUS_INVALID_PARAMS);
5303 hci_dev_lock(hdev);
5305 if (cp->val)
5306 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5307 else
5308 changed = hci_dev_test_and_clear_flag(hdev,
5309 HCI_KEEP_DEBUG_KEYS);
5311 if (cp->val == 0x02)
5312 use_changed = !hci_dev_test_and_set_flag(hdev,
5313 HCI_USE_DEBUG_KEYS);
5314 else
5315 use_changed = hci_dev_test_and_clear_flag(hdev,
5316 HCI_USE_DEBUG_KEYS);
5318 if (hdev_is_powered(hdev) && use_changed &&
5319 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5320 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5321 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5322 sizeof(mode), &mode);
5325 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5326 if (err < 0)
5327 goto unlock;
5329 if (changed)
5330 err = new_settings(hdev, sk);
5332 unlock:
5333 hci_dev_unlock(hdev);
5334 return err;
5337 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5338 u16 len)
5340 struct mgmt_cp_set_privacy *cp = cp_data;
5341 bool changed;
5342 int err;
5344 BT_DBG("request for %s", hdev->name);
5346 if (!lmp_le_capable(hdev))
5347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5348 MGMT_STATUS_NOT_SUPPORTED);
5350 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5352 MGMT_STATUS_INVALID_PARAMS);
5354 if (hdev_is_powered(hdev))
5355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5356 MGMT_STATUS_REJECTED);
5358 hci_dev_lock(hdev);
5360 /* If user space supports this command it is also expected to
5361 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5363 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5365 if (cp->privacy) {
5366 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5367 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5368 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5369 } else {
5370 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5371 memset(hdev->irk, 0, sizeof(hdev->irk));
5372 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5375 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5376 if (err < 0)
5377 goto unlock;
5379 if (changed)
5380 err = new_settings(hdev, sk);
5382 unlock:
5383 hci_dev_unlock(hdev);
5384 return err;
5387 static bool irk_is_valid(struct mgmt_irk_info *irk)
5389 switch (irk->addr.type) {
5390 case BDADDR_LE_PUBLIC:
5391 return true;
5393 case BDADDR_LE_RANDOM:
5394 /* Two most significant bits shall be set */
5395 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5396 return false;
5397 return true;
5400 return false;
5403 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5404 u16 len)
5406 struct mgmt_cp_load_irks *cp = cp_data;
5407 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5408 sizeof(struct mgmt_irk_info));
5409 u16 irk_count, expected_len;
5410 int i, err;
5412 BT_DBG("request for %s", hdev->name);
5414 if (!lmp_le_capable(hdev))
5415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5416 MGMT_STATUS_NOT_SUPPORTED);
5418 irk_count = __le16_to_cpu(cp->irk_count);
5419 if (irk_count > max_irk_count) {
5420 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5422 MGMT_STATUS_INVALID_PARAMS);
5425 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5426 if (expected_len != len) {
5427 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5428 expected_len, len);
5429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5430 MGMT_STATUS_INVALID_PARAMS);
5433 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5435 for (i = 0; i < irk_count; i++) {
5436 struct mgmt_irk_info *key = &cp->irks[i];
5438 if (!irk_is_valid(key))
5439 return mgmt_cmd_status(sk, hdev->id,
5440 MGMT_OP_LOAD_IRKS,
5441 MGMT_STATUS_INVALID_PARAMS);
5444 hci_dev_lock(hdev);
5446 hci_smp_irks_clear(hdev);
5448 for (i = 0; i < irk_count; i++) {
5449 struct mgmt_irk_info *irk = &cp->irks[i];
5450 u8 addr_type;
5452 if (irk->addr.type == BDADDR_LE_PUBLIC)
5453 addr_type = ADDR_LE_DEV_PUBLIC;
5454 else
5455 addr_type = ADDR_LE_DEV_RANDOM;
5457 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5458 BDADDR_ANY);
5461 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5465 hci_dev_unlock(hdev);
5467 return err;
5470 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5472 if (key->master != 0x00 && key->master != 0x01)
5473 return false;
5475 switch (key->addr.type) {
5476 case BDADDR_LE_PUBLIC:
5477 return true;
5479 case BDADDR_LE_RANDOM:
5480 /* Two most significant bits shall be set */
5481 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5482 return false;
5483 return true;
5486 return false;
5489 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5490 void *cp_data, u16 len)
5492 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5493 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5494 sizeof(struct mgmt_ltk_info));
5495 u16 key_count, expected_len;
5496 int i, err;
5498 BT_DBG("request for %s", hdev->name);
5500 if (!lmp_le_capable(hdev))
5501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5502 MGMT_STATUS_NOT_SUPPORTED);
5504 key_count = __le16_to_cpu(cp->key_count);
5505 if (key_count > max_key_count) {
5506 BT_ERR("load_ltks: too big key_count value %u", key_count);
5507 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5508 MGMT_STATUS_INVALID_PARAMS);
5511 expected_len = sizeof(*cp) + key_count *
5512 sizeof(struct mgmt_ltk_info);
5513 if (expected_len != len) {
5514 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5515 expected_len, len);
5516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5517 MGMT_STATUS_INVALID_PARAMS);
5520 BT_DBG("%s key_count %u", hdev->name, key_count);
5522 for (i = 0; i < key_count; i++) {
5523 struct mgmt_ltk_info *key = &cp->keys[i];
5525 if (!ltk_is_valid(key))
5526 return mgmt_cmd_status(sk, hdev->id,
5527 MGMT_OP_LOAD_LONG_TERM_KEYS,
5528 MGMT_STATUS_INVALID_PARAMS);
5531 hci_dev_lock(hdev);
5533 hci_smp_ltks_clear(hdev);
5535 for (i = 0; i < key_count; i++) {
5536 struct mgmt_ltk_info *key = &cp->keys[i];
5537 u8 type, addr_type, authenticated;
5539 if (key->addr.type == BDADDR_LE_PUBLIC)
5540 addr_type = ADDR_LE_DEV_PUBLIC;
5541 else
5542 addr_type = ADDR_LE_DEV_RANDOM;
5544 switch (key->type) {
5545 case MGMT_LTK_UNAUTHENTICATED:
5546 authenticated = 0x00;
5547 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5548 break;
5549 case MGMT_LTK_AUTHENTICATED:
5550 authenticated = 0x01;
5551 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5552 break;
5553 case MGMT_LTK_P256_UNAUTH:
5554 authenticated = 0x00;
5555 type = SMP_LTK_P256;
5556 break;
5557 case MGMT_LTK_P256_AUTH:
5558 authenticated = 0x01;
5559 type = SMP_LTK_P256;
5560 break;
5561 case MGMT_LTK_P256_DEBUG:
5562 authenticated = 0x00;
5563 type = SMP_LTK_P256_DEBUG;
5564 default:
5565 continue;
5568 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5569 authenticated, key->val, key->enc_size, key->ediv,
5570 key->rand);
5573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5574 NULL, 0);
5576 hci_dev_unlock(hdev);
5578 return err;
5581 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5583 struct hci_conn *conn = cmd->user_data;
5584 struct mgmt_rp_get_conn_info rp;
5585 int err;
5587 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5589 if (status == MGMT_STATUS_SUCCESS) {
5590 rp.rssi = conn->rssi;
5591 rp.tx_power = conn->tx_power;
5592 rp.max_tx_power = conn->max_tx_power;
5593 } else {
5594 rp.rssi = HCI_RSSI_INVALID;
5595 rp.tx_power = HCI_TX_POWER_INVALID;
5596 rp.max_tx_power = HCI_TX_POWER_INVALID;
5599 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5600 status, &rp, sizeof(rp));
5602 hci_conn_drop(conn);
5603 hci_conn_put(conn);
5605 return err;
5608 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5609 u16 opcode)
5611 struct hci_cp_read_rssi *cp;
5612 struct mgmt_pending_cmd *cmd;
5613 struct hci_conn *conn;
5614 u16 handle;
5615 u8 status;
5617 BT_DBG("status 0x%02x", hci_status);
5619 hci_dev_lock(hdev);
5621 /* Commands sent in request are either Read RSSI or Read Transmit Power
5622 * Level so we check which one was last sent to retrieve connection
5623 * handle. Both commands have handle as first parameter so it's safe to
5624 * cast data on the same command struct.
5626 * First command sent is always Read RSSI and we fail only if it fails.
5627 * In other case we simply override error to indicate success as we
5628 * already remembered if TX power value is actually valid.
5630 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5631 if (!cp) {
5632 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5633 status = MGMT_STATUS_SUCCESS;
5634 } else {
5635 status = mgmt_status(hci_status);
5638 if (!cp) {
5639 BT_ERR("invalid sent_cmd in conn_info response");
5640 goto unlock;
5643 handle = __le16_to_cpu(cp->handle);
5644 conn = hci_conn_hash_lookup_handle(hdev, handle);
5645 if (!conn) {
5646 BT_ERR("unknown handle (%d) in conn_info response", handle);
5647 goto unlock;
5650 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5651 if (!cmd)
5652 goto unlock;
5654 cmd->cmd_complete(cmd, status);
5655 mgmt_pending_remove(cmd);
5657 unlock:
5658 hci_dev_unlock(hdev);
5661 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5662 u16 len)
5664 struct mgmt_cp_get_conn_info *cp = data;
5665 struct mgmt_rp_get_conn_info rp;
5666 struct hci_conn *conn;
5667 unsigned long conn_info_age;
5668 int err = 0;
5670 BT_DBG("%s", hdev->name);
5672 memset(&rp, 0, sizeof(rp));
5673 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5674 rp.addr.type = cp->addr.type;
5676 if (!bdaddr_type_is_valid(cp->addr.type))
5677 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5678 MGMT_STATUS_INVALID_PARAMS,
5679 &rp, sizeof(rp));
5681 hci_dev_lock(hdev);
5683 if (!hdev_is_powered(hdev)) {
5684 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5685 MGMT_STATUS_NOT_POWERED, &rp,
5686 sizeof(rp));
5687 goto unlock;
5690 if (cp->addr.type == BDADDR_BREDR)
5691 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5692 &cp->addr.bdaddr);
5693 else
5694 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5696 if (!conn || conn->state != BT_CONNECTED) {
5697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5698 MGMT_STATUS_NOT_CONNECTED, &rp,
5699 sizeof(rp));
5700 goto unlock;
5703 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5705 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5706 goto unlock;
5709 /* To avoid client trying to guess when to poll again for information we
5710 * calculate conn info age as random value between min/max set in hdev.
5712 conn_info_age = hdev->conn_info_min_age +
5713 prandom_u32_max(hdev->conn_info_max_age -
5714 hdev->conn_info_min_age);
5716 /* Query controller to refresh cached values if they are too old or were
5717 * never read.
5719 if (time_after(jiffies, conn->conn_info_timestamp +
5720 msecs_to_jiffies(conn_info_age)) ||
5721 !conn->conn_info_timestamp) {
5722 struct hci_request req;
5723 struct hci_cp_read_tx_power req_txp_cp;
5724 struct hci_cp_read_rssi req_rssi_cp;
5725 struct mgmt_pending_cmd *cmd;
5727 hci_req_init(&req, hdev);
5728 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5729 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5730 &req_rssi_cp);
5732 /* For LE links TX power does not change thus we don't need to
5733 * query for it once value is known.
5735 if (!bdaddr_type_is_le(cp->addr.type) ||
5736 conn->tx_power == HCI_TX_POWER_INVALID) {
5737 req_txp_cp.handle = cpu_to_le16(conn->handle);
5738 req_txp_cp.type = 0x00;
5739 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5740 sizeof(req_txp_cp), &req_txp_cp);
5743 /* Max TX power needs to be read only once per connection */
5744 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5745 req_txp_cp.handle = cpu_to_le16(conn->handle);
5746 req_txp_cp.type = 0x01;
5747 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5748 sizeof(req_txp_cp), &req_txp_cp);
5751 err = hci_req_run(&req, conn_info_refresh_complete);
5752 if (err < 0)
5753 goto unlock;
5755 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5756 data, len);
5757 if (!cmd) {
5758 err = -ENOMEM;
5759 goto unlock;
5762 hci_conn_hold(conn);
5763 cmd->user_data = hci_conn_get(conn);
5764 cmd->cmd_complete = conn_info_cmd_complete;
5766 conn->conn_info_timestamp = jiffies;
5767 } else {
5768 /* Cache is valid, just reply with values cached in hci_conn */
5769 rp.rssi = conn->rssi;
5770 rp.tx_power = conn->tx_power;
5771 rp.max_tx_power = conn->max_tx_power;
5773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5774 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5777 unlock:
5778 hci_dev_unlock(hdev);
5779 return err;
5782 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5784 struct hci_conn *conn = cmd->user_data;
5785 struct mgmt_rp_get_clock_info rp;
5786 struct hci_dev *hdev;
5787 int err;
5789 memset(&rp, 0, sizeof(rp));
5790 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5792 if (status)
5793 goto complete;
5795 hdev = hci_dev_get(cmd->index);
5796 if (hdev) {
5797 rp.local_clock = cpu_to_le32(hdev->clock);
5798 hci_dev_put(hdev);
5801 if (conn) {
5802 rp.piconet_clock = cpu_to_le32(conn->clock);
5803 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5806 complete:
5807 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5808 sizeof(rp));
5810 if (conn) {
5811 hci_conn_drop(conn);
5812 hci_conn_put(conn);
5815 return err;
5818 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5820 struct hci_cp_read_clock *hci_cp;
5821 struct mgmt_pending_cmd *cmd;
5822 struct hci_conn *conn;
5824 BT_DBG("%s status %u", hdev->name, status);
5826 hci_dev_lock(hdev);
5828 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5829 if (!hci_cp)
5830 goto unlock;
5832 if (hci_cp->which) {
5833 u16 handle = __le16_to_cpu(hci_cp->handle);
5834 conn = hci_conn_hash_lookup_handle(hdev, handle);
5835 } else {
5836 conn = NULL;
5839 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5840 if (!cmd)
5841 goto unlock;
5843 cmd->cmd_complete(cmd, mgmt_status(status));
5844 mgmt_pending_remove(cmd);
5846 unlock:
5847 hci_dev_unlock(hdev);
5850 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5851 u16 len)
5853 struct mgmt_cp_get_clock_info *cp = data;
5854 struct mgmt_rp_get_clock_info rp;
5855 struct hci_cp_read_clock hci_cp;
5856 struct mgmt_pending_cmd *cmd;
5857 struct hci_request req;
5858 struct hci_conn *conn;
5859 int err;
5861 BT_DBG("%s", hdev->name);
5863 memset(&rp, 0, sizeof(rp));
5864 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5865 rp.addr.type = cp->addr.type;
5867 if (cp->addr.type != BDADDR_BREDR)
5868 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5869 MGMT_STATUS_INVALID_PARAMS,
5870 &rp, sizeof(rp));
5872 hci_dev_lock(hdev);
5874 if (!hdev_is_powered(hdev)) {
5875 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5876 MGMT_STATUS_NOT_POWERED, &rp,
5877 sizeof(rp));
5878 goto unlock;
5881 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5882 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5883 &cp->addr.bdaddr);
5884 if (!conn || conn->state != BT_CONNECTED) {
5885 err = mgmt_cmd_complete(sk, hdev->id,
5886 MGMT_OP_GET_CLOCK_INFO,
5887 MGMT_STATUS_NOT_CONNECTED,
5888 &rp, sizeof(rp));
5889 goto unlock;
5891 } else {
5892 conn = NULL;
5895 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5896 if (!cmd) {
5897 err = -ENOMEM;
5898 goto unlock;
5901 cmd->cmd_complete = clock_info_cmd_complete;
5903 hci_req_init(&req, hdev);
5905 memset(&hci_cp, 0, sizeof(hci_cp));
5906 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5908 if (conn) {
5909 hci_conn_hold(conn);
5910 cmd->user_data = hci_conn_get(conn);
5912 hci_cp.handle = cpu_to_le16(conn->handle);
5913 hci_cp.which = 0x01; /* Piconet clock */
5914 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5917 err = hci_req_run(&req, get_clock_info_complete);
5918 if (err < 0)
5919 mgmt_pending_remove(cmd);
5921 unlock:
5922 hci_dev_unlock(hdev);
5923 return err;
5926 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5928 struct hci_conn *conn;
5930 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5931 if (!conn)
5932 return false;
5934 if (conn->dst_type != type)
5935 return false;
5937 if (conn->state != BT_CONNECTED)
5938 return false;
5940 return true;
5943 /* This function requires the caller holds hdev->lock */
5944 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5945 u8 addr_type, u8 auto_connect)
5947 struct hci_dev *hdev = req->hdev;
5948 struct hci_conn_params *params;
5950 params = hci_conn_params_add(hdev, addr, addr_type);
5951 if (!params)
5952 return -EIO;
5954 if (params->auto_connect == auto_connect)
5955 return 0;
5957 list_del_init(&params->action);
5959 switch (auto_connect) {
5960 case HCI_AUTO_CONN_DISABLED:
5961 case HCI_AUTO_CONN_LINK_LOSS:
5962 __hci_update_background_scan(req);
5963 break;
5964 case HCI_AUTO_CONN_REPORT:
5965 list_add(&params->action, &hdev->pend_le_reports);
5966 __hci_update_background_scan(req);
5967 break;
5968 case HCI_AUTO_CONN_DIRECT:
5969 case HCI_AUTO_CONN_ALWAYS:
5970 if (!is_connected(hdev, addr, addr_type)) {
5971 list_add(&params->action, &hdev->pend_le_conns);
5972 __hci_update_background_scan(req);
5974 break;
5977 params->auto_connect = auto_connect;
5979 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5980 auto_connect);
5982 return 0;
5985 static void device_added(struct sock *sk, struct hci_dev *hdev,
5986 bdaddr_t *bdaddr, u8 type, u8 action)
5988 struct mgmt_ev_device_added ev;
5990 bacpy(&ev.addr.bdaddr, bdaddr);
5991 ev.addr.type = type;
5992 ev.action = action;
5994 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5997 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5999 struct mgmt_pending_cmd *cmd;
6001 BT_DBG("status 0x%02x", status);
6003 hci_dev_lock(hdev);
6005 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6006 if (!cmd)
6007 goto unlock;
6009 cmd->cmd_complete(cmd, mgmt_status(status));
6010 mgmt_pending_remove(cmd);
6012 unlock:
6013 hci_dev_unlock(hdev);
6016 static int add_device(struct sock *sk, struct hci_dev *hdev,
6017 void *data, u16 len)
6019 struct mgmt_cp_add_device *cp = data;
6020 struct mgmt_pending_cmd *cmd;
6021 struct hci_request req;
6022 u8 auto_conn, addr_type;
6023 int err;
6025 BT_DBG("%s", hdev->name);
6027 if (!bdaddr_type_is_valid(cp->addr.type) ||
6028 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6030 MGMT_STATUS_INVALID_PARAMS,
6031 &cp->addr, sizeof(cp->addr));
6033 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6034 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6035 MGMT_STATUS_INVALID_PARAMS,
6036 &cp->addr, sizeof(cp->addr));
6038 hci_req_init(&req, hdev);
6040 hci_dev_lock(hdev);
6042 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6043 if (!cmd) {
6044 err = -ENOMEM;
6045 goto unlock;
6048 cmd->cmd_complete = addr_cmd_complete;
6050 if (cp->addr.type == BDADDR_BREDR) {
6051 /* Only incoming connections action is supported for now */
6052 if (cp->action != 0x01) {
6053 err = cmd->cmd_complete(cmd,
6054 MGMT_STATUS_INVALID_PARAMS);
6055 mgmt_pending_remove(cmd);
6056 goto unlock;
6059 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6060 cp->addr.type);
6061 if (err)
6062 goto unlock;
6064 __hci_update_page_scan(&req);
6066 goto added;
6069 if (cp->addr.type == BDADDR_LE_PUBLIC)
6070 addr_type = ADDR_LE_DEV_PUBLIC;
6071 else
6072 addr_type = ADDR_LE_DEV_RANDOM;
6074 if (cp->action == 0x02)
6075 auto_conn = HCI_AUTO_CONN_ALWAYS;
6076 else if (cp->action == 0x01)
6077 auto_conn = HCI_AUTO_CONN_DIRECT;
6078 else
6079 auto_conn = HCI_AUTO_CONN_REPORT;
6081 /* If the connection parameters don't exist for this device,
6082 * they will be created and configured with defaults.
6084 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6085 auto_conn) < 0) {
6086 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6087 mgmt_pending_remove(cmd);
6088 goto unlock;
6091 added:
6092 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6094 err = hci_req_run(&req, add_device_complete);
6095 if (err < 0) {
6096 /* ENODATA means no HCI commands were needed (e.g. if
6097 * the adapter is powered off).
6099 if (err == -ENODATA)
6100 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6101 mgmt_pending_remove(cmd);
6104 unlock:
6105 hci_dev_unlock(hdev);
6106 return err;
6109 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6110 bdaddr_t *bdaddr, u8 type)
6112 struct mgmt_ev_device_removed ev;
6114 bacpy(&ev.addr.bdaddr, bdaddr);
6115 ev.addr.type = type;
6117 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6120 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6122 struct mgmt_pending_cmd *cmd;
6124 BT_DBG("status 0x%02x", status);
6126 hci_dev_lock(hdev);
6128 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6129 if (!cmd)
6130 goto unlock;
6132 cmd->cmd_complete(cmd, mgmt_status(status));
6133 mgmt_pending_remove(cmd);
6135 unlock:
6136 hci_dev_unlock(hdev);
6139 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6140 void *data, u16 len)
6142 struct mgmt_cp_remove_device *cp = data;
6143 struct mgmt_pending_cmd *cmd;
6144 struct hci_request req;
6145 int err;
6147 BT_DBG("%s", hdev->name);
6149 hci_req_init(&req, hdev);
6151 hci_dev_lock(hdev);
6153 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6154 if (!cmd) {
6155 err = -ENOMEM;
6156 goto unlock;
6159 cmd->cmd_complete = addr_cmd_complete;
6161 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6162 struct hci_conn_params *params;
6163 u8 addr_type;
6165 if (!bdaddr_type_is_valid(cp->addr.type)) {
6166 err = cmd->cmd_complete(cmd,
6167 MGMT_STATUS_INVALID_PARAMS);
6168 mgmt_pending_remove(cmd);
6169 goto unlock;
6172 if (cp->addr.type == BDADDR_BREDR) {
6173 err = hci_bdaddr_list_del(&hdev->whitelist,
6174 &cp->addr.bdaddr,
6175 cp->addr.type);
6176 if (err) {
6177 err = cmd->cmd_complete(cmd,
6178 MGMT_STATUS_INVALID_PARAMS);
6179 mgmt_pending_remove(cmd);
6180 goto unlock;
6183 __hci_update_page_scan(&req);
6185 device_removed(sk, hdev, &cp->addr.bdaddr,
6186 cp->addr.type);
6187 goto complete;
6190 if (cp->addr.type == BDADDR_LE_PUBLIC)
6191 addr_type = ADDR_LE_DEV_PUBLIC;
6192 else
6193 addr_type = ADDR_LE_DEV_RANDOM;
6195 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6196 addr_type);
6197 if (!params) {
6198 err = cmd->cmd_complete(cmd,
6199 MGMT_STATUS_INVALID_PARAMS);
6200 mgmt_pending_remove(cmd);
6201 goto unlock;
6204 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6205 err = cmd->cmd_complete(cmd,
6206 MGMT_STATUS_INVALID_PARAMS);
6207 mgmt_pending_remove(cmd);
6208 goto unlock;
6211 list_del(&params->action);
6212 list_del(&params->list);
6213 kfree(params);
6214 __hci_update_background_scan(&req);
6216 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6217 } else {
6218 struct hci_conn_params *p, *tmp;
6219 struct bdaddr_list *b, *btmp;
6221 if (cp->addr.type) {
6222 err = cmd->cmd_complete(cmd,
6223 MGMT_STATUS_INVALID_PARAMS);
6224 mgmt_pending_remove(cmd);
6225 goto unlock;
6228 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6229 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6230 list_del(&b->list);
6231 kfree(b);
6234 __hci_update_page_scan(&req);
6236 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6237 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6238 continue;
6239 device_removed(sk, hdev, &p->addr, p->addr_type);
6240 list_del(&p->action);
6241 list_del(&p->list);
6242 kfree(p);
6245 BT_DBG("All LE connection parameters were removed");
6247 __hci_update_background_scan(&req);
6250 complete:
6251 err = hci_req_run(&req, remove_device_complete);
6252 if (err < 0) {
6253 /* ENODATA means no HCI commands were needed (e.g. if
6254 * the adapter is powered off).
6256 if (err == -ENODATA)
6257 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6258 mgmt_pending_remove(cmd);
6261 unlock:
6262 hci_dev_unlock(hdev);
6263 return err;
6266 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6267 u16 len)
6269 struct mgmt_cp_load_conn_param *cp = data;
6270 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6271 sizeof(struct mgmt_conn_param));
6272 u16 param_count, expected_len;
6273 int i;
6275 if (!lmp_le_capable(hdev))
6276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6277 MGMT_STATUS_NOT_SUPPORTED);
6279 param_count = __le16_to_cpu(cp->param_count);
6280 if (param_count > max_param_count) {
6281 BT_ERR("load_conn_param: too big param_count value %u",
6282 param_count);
6283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6284 MGMT_STATUS_INVALID_PARAMS);
6287 expected_len = sizeof(*cp) + param_count *
6288 sizeof(struct mgmt_conn_param);
6289 if (expected_len != len) {
6290 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6291 expected_len, len);
6292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6293 MGMT_STATUS_INVALID_PARAMS);
6296 BT_DBG("%s param_count %u", hdev->name, param_count);
6298 hci_dev_lock(hdev);
6300 hci_conn_params_clear_disabled(hdev);
6302 for (i = 0; i < param_count; i++) {
6303 struct mgmt_conn_param *param = &cp->params[i];
6304 struct hci_conn_params *hci_param;
6305 u16 min, max, latency, timeout;
6306 u8 addr_type;
6308 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6309 param->addr.type);
6311 if (param->addr.type == BDADDR_LE_PUBLIC) {
6312 addr_type = ADDR_LE_DEV_PUBLIC;
6313 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6314 addr_type = ADDR_LE_DEV_RANDOM;
6315 } else {
6316 BT_ERR("Ignoring invalid connection parameters");
6317 continue;
6320 min = le16_to_cpu(param->min_interval);
6321 max = le16_to_cpu(param->max_interval);
6322 latency = le16_to_cpu(param->latency);
6323 timeout = le16_to_cpu(param->timeout);
6325 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6326 min, max, latency, timeout);
6328 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6329 BT_ERR("Ignoring invalid connection parameters");
6330 continue;
6333 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6334 addr_type);
6335 if (!hci_param) {
6336 BT_ERR("Failed to add connection parameters");
6337 continue;
6340 hci_param->conn_min_interval = min;
6341 hci_param->conn_max_interval = max;
6342 hci_param->conn_latency = latency;
6343 hci_param->supervision_timeout = timeout;
6346 hci_dev_unlock(hdev);
6348 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6349 NULL, 0);
6352 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6353 void *data, u16 len)
6355 struct mgmt_cp_set_external_config *cp = data;
6356 bool changed;
6357 int err;
6359 BT_DBG("%s", hdev->name);
6361 if (hdev_is_powered(hdev))
6362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6363 MGMT_STATUS_REJECTED);
6365 if (cp->config != 0x00 && cp->config != 0x01)
6366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6367 MGMT_STATUS_INVALID_PARAMS);
6369 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6371 MGMT_STATUS_NOT_SUPPORTED);
6373 hci_dev_lock(hdev);
6375 if (cp->config)
6376 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6377 else
6378 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6380 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6381 if (err < 0)
6382 goto unlock;
6384 if (!changed)
6385 goto unlock;
6387 err = new_options(hdev, sk);
6389 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6390 mgmt_index_removed(hdev);
6392 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6393 hci_dev_set_flag(hdev, HCI_CONFIG);
6394 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6396 queue_work(hdev->req_workqueue, &hdev->power_on);
6397 } else {
6398 set_bit(HCI_RAW, &hdev->flags);
6399 mgmt_index_added(hdev);
6403 unlock:
6404 hci_dev_unlock(hdev);
6405 return err;
6408 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6409 void *data, u16 len)
6411 struct mgmt_cp_set_public_address *cp = data;
6412 bool changed;
6413 int err;
6415 BT_DBG("%s", hdev->name);
6417 if (hdev_is_powered(hdev))
6418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6419 MGMT_STATUS_REJECTED);
6421 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6422 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6423 MGMT_STATUS_INVALID_PARAMS);
6425 if (!hdev->set_bdaddr)
6426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6427 MGMT_STATUS_NOT_SUPPORTED);
6429 hci_dev_lock(hdev);
6431 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6432 bacpy(&hdev->public_addr, &cp->bdaddr);
6434 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6435 if (err < 0)
6436 goto unlock;
6438 if (!changed)
6439 goto unlock;
6441 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6442 err = new_options(hdev, sk);
6444 if (is_configured(hdev)) {
6445 mgmt_index_removed(hdev);
6447 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6449 hci_dev_set_flag(hdev, HCI_CONFIG);
6450 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6452 queue_work(hdev->req_workqueue, &hdev->power_on);
6455 unlock:
6456 hci_dev_unlock(hdev);
6457 return err;
6460 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6461 u8 data_len)
6463 eir[eir_len++] = sizeof(type) + data_len;
6464 eir[eir_len++] = type;
6465 memcpy(&eir[eir_len], data, data_len);
6466 eir_len += data_len;
6468 return eir_len;
6471 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6472 u16 opcode, struct sk_buff *skb)
6474 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6475 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6476 u8 *h192, *r192, *h256, *r256;
6477 struct mgmt_pending_cmd *cmd;
6478 u16 eir_len;
6479 int err;
6481 BT_DBG("%s status %u", hdev->name, status);
6483 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6484 if (!cmd)
6485 return;
6487 mgmt_cp = cmd->param;
6489 if (status) {
6490 status = mgmt_status(status);
6491 eir_len = 0;
6493 h192 = NULL;
6494 r192 = NULL;
6495 h256 = NULL;
6496 r256 = NULL;
6497 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6498 struct hci_rp_read_local_oob_data *rp;
6500 if (skb->len != sizeof(*rp)) {
6501 status = MGMT_STATUS_FAILED;
6502 eir_len = 0;
6503 } else {
6504 status = MGMT_STATUS_SUCCESS;
6505 rp = (void *)skb->data;
6507 eir_len = 5 + 18 + 18;
6508 h192 = rp->hash;
6509 r192 = rp->rand;
6510 h256 = NULL;
6511 r256 = NULL;
6513 } else {
6514 struct hci_rp_read_local_oob_ext_data *rp;
6516 if (skb->len != sizeof(*rp)) {
6517 status = MGMT_STATUS_FAILED;
6518 eir_len = 0;
6519 } else {
6520 status = MGMT_STATUS_SUCCESS;
6521 rp = (void *)skb->data;
6523 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6524 eir_len = 5 + 18 + 18;
6525 h192 = NULL;
6526 r192 = NULL;
6527 } else {
6528 eir_len = 5 + 18 + 18 + 18 + 18;
6529 h192 = rp->hash192;
6530 r192 = rp->rand192;
6533 h256 = rp->hash256;
6534 r256 = rp->rand256;
6538 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6539 if (!mgmt_rp)
6540 goto done;
6542 if (status)
6543 goto send_rsp;
6545 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6546 hdev->dev_class, 3);
6548 if (h192 && r192) {
6549 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6550 EIR_SSP_HASH_C192, h192, 16);
6551 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6552 EIR_SSP_RAND_R192, r192, 16);
6555 if (h256 && r256) {
6556 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6557 EIR_SSP_HASH_C256, h256, 16);
6558 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6559 EIR_SSP_RAND_R256, r256, 16);
6562 send_rsp:
6563 mgmt_rp->type = mgmt_cp->type;
6564 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6566 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6567 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6568 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6569 if (err < 0 || status)
6570 goto done;
6572 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6574 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6575 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6576 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6577 done:
6578 kfree(mgmt_rp);
6579 mgmt_pending_remove(cmd);
6582 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6583 struct mgmt_cp_read_local_oob_ext_data *cp)
6585 struct mgmt_pending_cmd *cmd;
6586 struct hci_request req;
6587 int err;
6589 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6590 cp, sizeof(*cp));
6591 if (!cmd)
6592 return -ENOMEM;
6594 hci_req_init(&req, hdev);
6596 if (bredr_sc_enabled(hdev))
6597 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6598 else
6599 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6601 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6602 if (err < 0) {
6603 mgmt_pending_remove(cmd);
6604 return err;
6607 return 0;
6610 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6611 void *data, u16 data_len)
6613 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6614 struct mgmt_rp_read_local_oob_ext_data *rp;
6615 size_t rp_len;
6616 u16 eir_len;
6617 u8 status, flags, role, addr[7], hash[16], rand[16];
6618 int err;
6620 BT_DBG("%s", hdev->name);
6622 if (hdev_is_powered(hdev)) {
6623 switch (cp->type) {
6624 case BIT(BDADDR_BREDR):
6625 status = mgmt_bredr_support(hdev);
6626 if (status)
6627 eir_len = 0;
6628 else
6629 eir_len = 5;
6630 break;
6631 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6632 status = mgmt_le_support(hdev);
6633 if (status)
6634 eir_len = 0;
6635 else
6636 eir_len = 9 + 3 + 18 + 18 + 3;
6637 break;
6638 default:
6639 status = MGMT_STATUS_INVALID_PARAMS;
6640 eir_len = 0;
6641 break;
6643 } else {
6644 status = MGMT_STATUS_NOT_POWERED;
6645 eir_len = 0;
6648 rp_len = sizeof(*rp) + eir_len;
6649 rp = kmalloc(rp_len, GFP_ATOMIC);
6650 if (!rp)
6651 return -ENOMEM;
6653 if (status)
6654 goto complete;
6656 hci_dev_lock(hdev);
6658 eir_len = 0;
6659 switch (cp->type) {
6660 case BIT(BDADDR_BREDR):
6661 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6662 err = read_local_ssp_oob_req(hdev, sk, cp);
6663 hci_dev_unlock(hdev);
6664 if (!err)
6665 goto done;
6667 status = MGMT_STATUS_FAILED;
6668 goto complete;
6669 } else {
6670 eir_len = eir_append_data(rp->eir, eir_len,
6671 EIR_CLASS_OF_DEV,
6672 hdev->dev_class, 3);
6674 break;
6675 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6676 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6677 smp_generate_oob(hdev, hash, rand) < 0) {
6678 hci_dev_unlock(hdev);
6679 status = MGMT_STATUS_FAILED;
6680 goto complete;
6683 /* This should return the active RPA, but since the RPA
6684 * is only programmed on demand, it is really hard to fill
6685 * this in at the moment. For now disallow retrieving
6686 * local out-of-band data when privacy is in use.
6688 * Returning the identity address will not help here since
6689 * pairing happens before the identity resolving key is
6690 * known and thus the connection establishment happens
6691 * based on the RPA and not the identity address.
6693 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6694 hci_dev_unlock(hdev);
6695 status = MGMT_STATUS_REJECTED;
6696 goto complete;
6699 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6700 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6701 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6702 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6703 memcpy(addr, &hdev->static_addr, 6);
6704 addr[6] = 0x01;
6705 } else {
6706 memcpy(addr, &hdev->bdaddr, 6);
6707 addr[6] = 0x00;
6710 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6711 addr, sizeof(addr));
6713 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6714 role = 0x02;
6715 else
6716 role = 0x01;
6718 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6719 &role, sizeof(role));
6721 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6722 eir_len = eir_append_data(rp->eir, eir_len,
6723 EIR_LE_SC_CONFIRM,
6724 hash, sizeof(hash));
6726 eir_len = eir_append_data(rp->eir, eir_len,
6727 EIR_LE_SC_RANDOM,
6728 rand, sizeof(rand));
6731 flags = get_adv_discov_flags(hdev);
6733 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6734 flags |= LE_AD_NO_BREDR;
6736 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6737 &flags, sizeof(flags));
6738 break;
6741 hci_dev_unlock(hdev);
6743 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6745 status = MGMT_STATUS_SUCCESS;
6747 complete:
6748 rp->type = cp->type;
6749 rp->eir_len = cpu_to_le16(eir_len);
6751 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6752 status, rp, sizeof(*rp) + eir_len);
6753 if (err < 0 || status)
6754 goto done;
6756 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6757 rp, sizeof(*rp) + eir_len,
6758 HCI_MGMT_OOB_DATA_EVENTS, sk);
6760 done:
6761 kfree(rp);
6763 return err;
6766 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6768 u32 flags = 0;
6770 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6771 flags |= MGMT_ADV_FLAG_DISCOV;
6772 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6773 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6775 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6776 flags |= MGMT_ADV_FLAG_TX_POWER;
6778 return flags;
6781 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6782 void *data, u16 data_len)
6784 struct mgmt_rp_read_adv_features *rp;
6785 size_t rp_len;
6786 int err;
6787 bool instance;
6788 u32 supported_flags;
6790 BT_DBG("%s", hdev->name);
6792 if (!lmp_le_capable(hdev))
6793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6794 MGMT_STATUS_REJECTED);
6796 hci_dev_lock(hdev);
6798 rp_len = sizeof(*rp);
6800 /* Currently only one instance is supported, so just add 1 to the
6801 * response length.
6803 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6804 if (instance)
6805 rp_len++;
6807 rp = kmalloc(rp_len, GFP_ATOMIC);
6808 if (!rp) {
6809 hci_dev_unlock(hdev);
6810 return -ENOMEM;
6813 supported_flags = get_supported_adv_flags(hdev);
6815 rp->supported_flags = cpu_to_le32(supported_flags);
6816 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6817 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6818 rp->max_instances = 1;
6820 /* Currently only one instance is supported, so simply return the
6821 * current instance number.
6823 if (instance) {
6824 rp->num_instances = 1;
6825 rp->instance[0] = 1;
6826 } else {
6827 rp->num_instances = 0;
6830 hci_dev_unlock(hdev);
6832 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6833 MGMT_STATUS_SUCCESS, rp, rp_len);
6835 kfree(rp);
6837 return err;
6840 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6841 u8 len, bool is_adv_data)
6843 u8 max_len = HCI_MAX_AD_LENGTH;
6844 int i, cur_len;
6845 bool flags_managed = false;
6846 bool tx_power_managed = false;
6847 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6848 MGMT_ADV_FLAG_MANAGED_FLAGS;
6850 if (is_adv_data && (adv_flags & flags_params)) {
6851 flags_managed = true;
6852 max_len -= 3;
6855 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6856 tx_power_managed = true;
6857 max_len -= 3;
6860 if (len > max_len)
6861 return false;
6863 /* Make sure that the data is correctly formatted. */
6864 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6865 cur_len = data[i];
6867 if (flags_managed && data[i + 1] == EIR_FLAGS)
6868 return false;
6870 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6871 return false;
6873 /* If the current field length would exceed the total data
6874 * length, then it's invalid.
6876 if (i + cur_len >= len)
6877 return false;
6880 return true;
6883 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6884 u16 opcode)
6886 struct mgmt_pending_cmd *cmd;
6887 struct mgmt_rp_add_advertising rp;
6889 BT_DBG("status %d", status);
6891 hci_dev_lock(hdev);
6893 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6895 if (status) {
6896 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6897 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6898 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6901 if (!cmd)
6902 goto unlock;
6904 rp.instance = 0x01;
6906 if (status)
6907 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6908 mgmt_status(status));
6909 else
6910 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6911 mgmt_status(status), &rp, sizeof(rp));
6913 mgmt_pending_remove(cmd);
6915 unlock:
6916 hci_dev_unlock(hdev);
6919 static void adv_timeout_expired(struct work_struct *work)
6921 struct hci_dev *hdev = container_of(work, struct hci_dev,
6922 adv_instance.timeout_exp.work);
6924 hdev->adv_instance.timeout = 0;
6926 hci_dev_lock(hdev);
6927 clear_adv_instance(hdev);
6928 hci_dev_unlock(hdev);
6931 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6932 void *data, u16 data_len)
6934 struct mgmt_cp_add_advertising *cp = data;
6935 struct mgmt_rp_add_advertising rp;
6936 u32 flags;
6937 u32 supported_flags;
6938 u8 status;
6939 u16 timeout;
6940 int err;
6941 struct mgmt_pending_cmd *cmd;
6942 struct hci_request req;
6944 BT_DBG("%s", hdev->name);
6946 status = mgmt_le_support(hdev);
6947 if (status)
6948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6949 status);
6951 flags = __le32_to_cpu(cp->flags);
6952 timeout = __le16_to_cpu(cp->timeout);
6954 /* The current implementation only supports adding one instance and only
6955 * a subset of the specified flags.
6957 supported_flags = get_supported_adv_flags(hdev);
6958 if (cp->instance != 0x01 || (flags & ~supported_flags))
6959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6960 MGMT_STATUS_INVALID_PARAMS);
6962 hci_dev_lock(hdev);
6964 if (timeout && !hdev_is_powered(hdev)) {
6965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6966 MGMT_STATUS_REJECTED);
6967 goto unlock;
6970 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6971 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6972 pending_find(MGMT_OP_SET_LE, hdev)) {
6973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6974 MGMT_STATUS_BUSY);
6975 goto unlock;
6978 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6979 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6980 cp->scan_rsp_len, false)) {
6981 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6982 MGMT_STATUS_INVALID_PARAMS);
6983 goto unlock;
6986 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6988 hdev->adv_instance.flags = flags;
6989 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6990 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6992 if (cp->adv_data_len)
6993 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6995 if (cp->scan_rsp_len)
6996 memcpy(hdev->adv_instance.scan_rsp_data,
6997 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6999 if (hdev->adv_instance.timeout)
7000 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
7002 hdev->adv_instance.timeout = timeout;
7004 if (timeout)
7005 queue_delayed_work(hdev->workqueue,
7006 &hdev->adv_instance.timeout_exp,
7007 msecs_to_jiffies(timeout * 1000));
7009 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
7010 advertising_added(sk, hdev, 1);
7012 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7013 * we have no HCI communication to make. Simply return.
7015 if (!hdev_is_powered(hdev) ||
7016 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7017 rp.instance = 0x01;
7018 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7019 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7020 goto unlock;
7023 /* We're good to go, update advertising data, parameters, and start
7024 * advertising.
7026 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7027 data_len);
7028 if (!cmd) {
7029 err = -ENOMEM;
7030 goto unlock;
7033 hci_req_init(&req, hdev);
7035 update_adv_data(&req);
7036 update_scan_rsp_data(&req);
7037 enable_advertising(&req);
7039 err = hci_req_run(&req, add_advertising_complete);
7040 if (err < 0)
7041 mgmt_pending_remove(cmd);
7043 unlock:
7044 hci_dev_unlock(hdev);
7046 return err;
7049 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7050 u16 opcode)
7052 struct mgmt_pending_cmd *cmd;
7053 struct mgmt_rp_remove_advertising rp;
7055 BT_DBG("status %d", status);
7057 hci_dev_lock(hdev);
7059 /* A failure status here only means that we failed to disable
7060 * advertising. Otherwise, the advertising instance has been removed,
7061 * so report success.
7063 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7064 if (!cmd)
7065 goto unlock;
7067 rp.instance = 1;
7069 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7070 &rp, sizeof(rp));
7071 mgmt_pending_remove(cmd);
7073 unlock:
7074 hci_dev_unlock(hdev);
7077 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7078 void *data, u16 data_len)
7080 struct mgmt_cp_remove_advertising *cp = data;
7081 struct mgmt_rp_remove_advertising rp;
7082 int err;
7083 struct mgmt_pending_cmd *cmd;
7084 struct hci_request req;
7086 BT_DBG("%s", hdev->name);
7088 /* The current implementation only allows modifying instance no 1. A
7089 * value of 0 indicates that all instances should be cleared.
7091 if (cp->instance > 1)
7092 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7093 MGMT_STATUS_INVALID_PARAMS);
7095 hci_dev_lock(hdev);
7097 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7098 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7099 pending_find(MGMT_OP_SET_LE, hdev)) {
7100 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7101 MGMT_STATUS_BUSY);
7102 goto unlock;
7105 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7107 MGMT_STATUS_INVALID_PARAMS);
7108 goto unlock;
7111 if (hdev->adv_instance.timeout)
7112 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
7114 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
7116 advertising_removed(sk, hdev, 1);
7118 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7120 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7121 * we have no HCI communication to make. Simply return.
7123 if (!hdev_is_powered(hdev) ||
7124 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7125 rp.instance = 1;
7126 err = mgmt_cmd_complete(sk, hdev->id,
7127 MGMT_OP_REMOVE_ADVERTISING,
7128 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7129 goto unlock;
7132 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7133 data_len);
7134 if (!cmd) {
7135 err = -ENOMEM;
7136 goto unlock;
7139 hci_req_init(&req, hdev);
7140 disable_advertising(&req);
7142 err = hci_req_run(&req, remove_advertising_complete);
7143 if (err < 0)
7144 mgmt_pending_remove(cmd);
7146 unlock:
7147 hci_dev_unlock(hdev);
7149 return err;
7152 static const struct hci_mgmt_handler mgmt_handlers[] = {
7153 { NULL }, /* 0x0000 (no command) */
7154 { read_version, MGMT_READ_VERSION_SIZE,
7155 HCI_MGMT_NO_HDEV |
7156 HCI_MGMT_UNTRUSTED },
7157 { read_commands, MGMT_READ_COMMANDS_SIZE,
7158 HCI_MGMT_NO_HDEV |
7159 HCI_MGMT_UNTRUSTED },
7160 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7161 HCI_MGMT_NO_HDEV |
7162 HCI_MGMT_UNTRUSTED },
7163 { read_controller_info, MGMT_READ_INFO_SIZE,
7164 HCI_MGMT_UNTRUSTED },
7165 { set_powered, MGMT_SETTING_SIZE },
7166 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7167 { set_connectable, MGMT_SETTING_SIZE },
7168 { set_fast_connectable, MGMT_SETTING_SIZE },
7169 { set_bondable, MGMT_SETTING_SIZE },
7170 { set_link_security, MGMT_SETTING_SIZE },
7171 { set_ssp, MGMT_SETTING_SIZE },
7172 { set_hs, MGMT_SETTING_SIZE },
7173 { set_le, MGMT_SETTING_SIZE },
7174 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7175 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7176 { add_uuid, MGMT_ADD_UUID_SIZE },
7177 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7178 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7179 HCI_MGMT_VAR_LEN },
7180 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7181 HCI_MGMT_VAR_LEN },
7182 { disconnect, MGMT_DISCONNECT_SIZE },
7183 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7184 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7185 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7186 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7187 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7188 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7189 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7190 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7191 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7192 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7193 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7194 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7195 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7196 HCI_MGMT_VAR_LEN },
7197 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7198 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7199 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7200 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7201 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7202 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7203 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7204 { set_advertising, MGMT_SETTING_SIZE },
7205 { set_bredr, MGMT_SETTING_SIZE },
7206 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7207 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7208 { set_secure_conn, MGMT_SETTING_SIZE },
7209 { set_debug_keys, MGMT_SETTING_SIZE },
7210 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7211 { load_irks, MGMT_LOAD_IRKS_SIZE,
7212 HCI_MGMT_VAR_LEN },
7213 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7214 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7215 { add_device, MGMT_ADD_DEVICE_SIZE },
7216 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7217 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7218 HCI_MGMT_VAR_LEN },
7219 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7220 HCI_MGMT_NO_HDEV |
7221 HCI_MGMT_UNTRUSTED },
7222 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7223 HCI_MGMT_UNCONFIGURED |
7224 HCI_MGMT_UNTRUSTED },
7225 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7226 HCI_MGMT_UNCONFIGURED },
7227 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7228 HCI_MGMT_UNCONFIGURED },
7229 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7230 HCI_MGMT_VAR_LEN },
7231 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7232 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7233 HCI_MGMT_NO_HDEV |
7234 HCI_MGMT_UNTRUSTED },
7235 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7236 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7237 HCI_MGMT_VAR_LEN },
7238 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7241 void mgmt_index_added(struct hci_dev *hdev)
7243 struct mgmt_ev_ext_index ev;
7245 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7246 return;
7248 switch (hdev->dev_type) {
7249 case HCI_BREDR:
7250 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7251 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7252 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7253 ev.type = 0x01;
7254 } else {
7255 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7256 HCI_MGMT_INDEX_EVENTS);
7257 ev.type = 0x00;
7259 break;
7260 case HCI_AMP:
7261 ev.type = 0x02;
7262 break;
7263 default:
7264 return;
7267 ev.bus = hdev->bus;
7269 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7270 HCI_MGMT_EXT_INDEX_EVENTS);
7273 void mgmt_index_removed(struct hci_dev *hdev)
7275 struct mgmt_ev_ext_index ev;
7276 u8 status = MGMT_STATUS_INVALID_INDEX;
7278 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7279 return;
7281 switch (hdev->dev_type) {
7282 case HCI_BREDR:
7283 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7285 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7286 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7287 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7288 ev.type = 0x01;
7289 } else {
7290 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7291 HCI_MGMT_INDEX_EVENTS);
7292 ev.type = 0x00;
7294 break;
7295 case HCI_AMP:
7296 ev.type = 0x02;
7297 break;
7298 default:
7299 return;
7302 ev.bus = hdev->bus;
7304 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7305 HCI_MGMT_EXT_INDEX_EVENTS);
7308 /* This function requires the caller holds hdev->lock */
7309 static void restart_le_actions(struct hci_request *req)
7311 struct hci_dev *hdev = req->hdev;
7312 struct hci_conn_params *p;
7314 list_for_each_entry(p, &hdev->le_conn_params, list) {
7315 /* Needed for AUTO_OFF case where might not "really"
7316 * have been powered off.
7318 list_del_init(&p->action);
7320 switch (p->auto_connect) {
7321 case HCI_AUTO_CONN_DIRECT:
7322 case HCI_AUTO_CONN_ALWAYS:
7323 list_add(&p->action, &hdev->pend_le_conns);
7324 break;
7325 case HCI_AUTO_CONN_REPORT:
7326 list_add(&p->action, &hdev->pend_le_reports);
7327 break;
7328 default:
7329 break;
7333 __hci_update_background_scan(req);
7336 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7338 struct cmd_lookup match = { NULL, hdev };
7340 BT_DBG("status 0x%02x", status);
7342 if (!status) {
7343 /* Register the available SMP channels (BR/EDR and LE) only
7344 * when successfully powering on the controller. This late
7345 * registration is required so that LE SMP can clearly
7346 * decide if the public address or static address is used.
7348 smp_register(hdev);
7351 hci_dev_lock(hdev);
7353 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7355 new_settings(hdev, match.sk);
7357 hci_dev_unlock(hdev);
7359 if (match.sk)
7360 sock_put(match.sk);
7363 static int powered_update_hci(struct hci_dev *hdev)
7365 struct hci_request req;
7366 u8 link_sec;
7368 hci_req_init(&req, hdev);
7370 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7371 !lmp_host_ssp_capable(hdev)) {
7372 u8 mode = 0x01;
7374 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7376 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7377 u8 support = 0x01;
7379 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7380 sizeof(support), &support);
7384 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7385 lmp_bredr_capable(hdev)) {
7386 struct hci_cp_write_le_host_supported cp;
7388 cp.le = 0x01;
7389 cp.simul = 0x00;
7391 /* Check first if we already have the right
7392 * host state (host features set)
7394 if (cp.le != lmp_host_le_capable(hdev) ||
7395 cp.simul != lmp_host_le_br_capable(hdev))
7396 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7397 sizeof(cp), &cp);
7400 if (lmp_le_capable(hdev)) {
7401 /* Make sure the controller has a good default for
7402 * advertising data. This also applies to the case
7403 * where BR/EDR was toggled during the AUTO_OFF phase.
7405 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7406 update_adv_data(&req);
7407 update_scan_rsp_data(&req);
7410 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7411 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7412 enable_advertising(&req);
7414 restart_le_actions(&req);
7417 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7418 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7419 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7420 sizeof(link_sec), &link_sec);
7422 if (lmp_bredr_capable(hdev)) {
7423 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7424 write_fast_connectable(&req, true);
7425 else
7426 write_fast_connectable(&req, false);
7427 __hci_update_page_scan(&req);
7428 update_class(&req);
7429 update_name(&req);
7430 update_eir(&req);
7433 return hci_req_run(&req, powered_complete);
7436 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7438 struct cmd_lookup match = { NULL, hdev };
7439 u8 status, zero_cod[] = { 0, 0, 0 };
7440 int err;
7442 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7443 return 0;
7445 if (powered) {
7446 if (powered_update_hci(hdev) == 0)
7447 return 0;
7449 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7450 &match);
7451 goto new_settings;
7454 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7456 /* If the power off is because of hdev unregistration let
7457 * use the appropriate INVALID_INDEX status. Otherwise use
7458 * NOT_POWERED. We cover both scenarios here since later in
7459 * mgmt_index_removed() any hci_conn callbacks will have already
7460 * been triggered, potentially causing misleading DISCONNECTED
7461 * status responses.
7463 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7464 status = MGMT_STATUS_INVALID_INDEX;
7465 else
7466 status = MGMT_STATUS_NOT_POWERED;
7468 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7470 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7471 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7472 zero_cod, sizeof(zero_cod), NULL);
7474 new_settings:
7475 err = new_settings(hdev, match.sk);
7477 if (match.sk)
7478 sock_put(match.sk);
7480 return err;
7483 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7485 struct mgmt_pending_cmd *cmd;
7486 u8 status;
7488 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7489 if (!cmd)
7490 return;
7492 if (err == -ERFKILL)
7493 status = MGMT_STATUS_RFKILLED;
7494 else
7495 status = MGMT_STATUS_FAILED;
7497 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7499 mgmt_pending_remove(cmd);
7502 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7504 struct hci_request req;
7506 hci_dev_lock(hdev);
7508 /* When discoverable timeout triggers, then just make sure
7509 * the limited discoverable flag is cleared. Even in the case
7510 * of a timeout triggered from general discoverable, it is
7511 * safe to unconditionally clear the flag.
7513 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7514 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7516 hci_req_init(&req, hdev);
7517 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7518 u8 scan = SCAN_PAGE;
7519 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7520 sizeof(scan), &scan);
7522 update_class(&req);
7524 /* Advertising instances don't use the global discoverable setting, so
7525 * only update AD if advertising was enabled using Set Advertising.
7527 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7528 update_adv_data(&req);
7530 hci_req_run(&req, NULL);
7532 hdev->discov_timeout = 0;
7534 new_settings(hdev, NULL);
7536 hci_dev_unlock(hdev);
7539 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7540 bool persistent)
7542 struct mgmt_ev_new_link_key ev;
7544 memset(&ev, 0, sizeof(ev));
7546 ev.store_hint = persistent;
7547 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7548 ev.key.addr.type = BDADDR_BREDR;
7549 ev.key.type = key->type;
7550 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7551 ev.key.pin_len = key->pin_len;
7553 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7556 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7558 switch (ltk->type) {
7559 case SMP_LTK:
7560 case SMP_LTK_SLAVE:
7561 if (ltk->authenticated)
7562 return MGMT_LTK_AUTHENTICATED;
7563 return MGMT_LTK_UNAUTHENTICATED;
7564 case SMP_LTK_P256:
7565 if (ltk->authenticated)
7566 return MGMT_LTK_P256_AUTH;
7567 return MGMT_LTK_P256_UNAUTH;
7568 case SMP_LTK_P256_DEBUG:
7569 return MGMT_LTK_P256_DEBUG;
7572 return MGMT_LTK_UNAUTHENTICATED;
7575 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7577 struct mgmt_ev_new_long_term_key ev;
7579 memset(&ev, 0, sizeof(ev));
7581 /* Devices using resolvable or non-resolvable random addresses
7582 * without providing an indentity resolving key don't require
7583 * to store long term keys. Their addresses will change the
7584 * next time around.
7586 * Only when a remote device provides an identity address
7587 * make sure the long term key is stored. If the remote
7588 * identity is known, the long term keys are internally
7589 * mapped to the identity address. So allow static random
7590 * and public addresses here.
7592 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7593 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7594 ev.store_hint = 0x00;
7595 else
7596 ev.store_hint = persistent;
7598 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7599 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7600 ev.key.type = mgmt_ltk_type(key);
7601 ev.key.enc_size = key->enc_size;
7602 ev.key.ediv = key->ediv;
7603 ev.key.rand = key->rand;
7605 if (key->type == SMP_LTK)
7606 ev.key.master = 1;
7608 memcpy(ev.key.val, key->val, sizeof(key->val));
7610 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7613 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7615 struct mgmt_ev_new_irk ev;
7617 memset(&ev, 0, sizeof(ev));
7619 /* For identity resolving keys from devices that are already
7620 * using a public address or static random address, do not
7621 * ask for storing this key. The identity resolving key really
7622 * is only mandatory for devices using resovlable random
7623 * addresses.
7625 * Storing all identity resolving keys has the downside that
7626 * they will be also loaded on next boot of they system. More
7627 * identity resolving keys, means more time during scanning is
7628 * needed to actually resolve these addresses.
7630 if (bacmp(&irk->rpa, BDADDR_ANY))
7631 ev.store_hint = 0x01;
7632 else
7633 ev.store_hint = 0x00;
7635 bacpy(&ev.rpa, &irk->rpa);
7636 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7637 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7638 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7640 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7643 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7644 bool persistent)
7646 struct mgmt_ev_new_csrk ev;
7648 memset(&ev, 0, sizeof(ev));
7650 /* Devices using resolvable or non-resolvable random addresses
7651 * without providing an indentity resolving key don't require
7652 * to store signature resolving keys. Their addresses will change
7653 * the next time around.
7655 * Only when a remote device provides an identity address
7656 * make sure the signature resolving key is stored. So allow
7657 * static random and public addresses here.
7659 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7660 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7661 ev.store_hint = 0x00;
7662 else
7663 ev.store_hint = persistent;
7665 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7666 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7667 ev.key.type = csrk->type;
7668 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7670 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7673 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7674 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7675 u16 max_interval, u16 latency, u16 timeout)
7677 struct mgmt_ev_new_conn_param ev;
7679 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7680 return;
7682 memset(&ev, 0, sizeof(ev));
7683 bacpy(&ev.addr.bdaddr, bdaddr);
7684 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7685 ev.store_hint = store_hint;
7686 ev.min_interval = cpu_to_le16(min_interval);
7687 ev.max_interval = cpu_to_le16(max_interval);
7688 ev.latency = cpu_to_le16(latency);
7689 ev.timeout = cpu_to_le16(timeout);
7691 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7694 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7695 u32 flags, u8 *name, u8 name_len)
7697 char buf[512];
7698 struct mgmt_ev_device_connected *ev = (void *) buf;
7699 u16 eir_len = 0;
7701 bacpy(&ev->addr.bdaddr, &conn->dst);
7702 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7704 ev->flags = __cpu_to_le32(flags);
7706 /* We must ensure that the EIR Data fields are ordered and
7707 * unique. Keep it simple for now and avoid the problem by not
7708 * adding any BR/EDR data to the LE adv.
7710 if (conn->le_adv_data_len > 0) {
7711 memcpy(&ev->eir[eir_len],
7712 conn->le_adv_data, conn->le_adv_data_len);
7713 eir_len = conn->le_adv_data_len;
7714 } else {
7715 if (name_len > 0)
7716 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7717 name, name_len);
7719 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7720 eir_len = eir_append_data(ev->eir, eir_len,
7721 EIR_CLASS_OF_DEV,
7722 conn->dev_class, 3);
7725 ev->eir_len = cpu_to_le16(eir_len);
7727 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7728 sizeof(*ev) + eir_len, NULL);
7731 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7733 struct sock **sk = data;
7735 cmd->cmd_complete(cmd, 0);
7737 *sk = cmd->sk;
7738 sock_hold(*sk);
7740 mgmt_pending_remove(cmd);
7743 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7745 struct hci_dev *hdev = data;
7746 struct mgmt_cp_unpair_device *cp = cmd->param;
7748 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7750 cmd->cmd_complete(cmd, 0);
7751 mgmt_pending_remove(cmd);
7754 bool mgmt_powering_down(struct hci_dev *hdev)
7756 struct mgmt_pending_cmd *cmd;
7757 struct mgmt_mode *cp;
7759 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7760 if (!cmd)
7761 return false;
7763 cp = cmd->param;
7764 if (!cp->val)
7765 return true;
7767 return false;
7770 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7771 u8 link_type, u8 addr_type, u8 reason,
7772 bool mgmt_connected)
7774 struct mgmt_ev_device_disconnected ev;
7775 struct sock *sk = NULL;
7777 /* The connection is still in hci_conn_hash so test for 1
7778 * instead of 0 to know if this is the last one.
7780 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7781 cancel_delayed_work(&hdev->power_off);
7782 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7785 if (!mgmt_connected)
7786 return;
7788 if (link_type != ACL_LINK && link_type != LE_LINK)
7789 return;
7791 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7793 bacpy(&ev.addr.bdaddr, bdaddr);
7794 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7795 ev.reason = reason;
7797 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7799 if (sk)
7800 sock_put(sk);
7802 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7803 hdev);
7806 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7807 u8 link_type, u8 addr_type, u8 status)
7809 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7810 struct mgmt_cp_disconnect *cp;
7811 struct mgmt_pending_cmd *cmd;
7813 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7814 hdev);
7816 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7817 if (!cmd)
7818 return;
7820 cp = cmd->param;
7822 if (bacmp(bdaddr, &cp->addr.bdaddr))
7823 return;
7825 if (cp->addr.type != bdaddr_type)
7826 return;
7828 cmd->cmd_complete(cmd, mgmt_status(status));
7829 mgmt_pending_remove(cmd);
7832 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7833 u8 addr_type, u8 status)
7835 struct mgmt_ev_connect_failed ev;
7837 /* The connection is still in hci_conn_hash so test for 1
7838 * instead of 0 to know if this is the last one.
7840 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7841 cancel_delayed_work(&hdev->power_off);
7842 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7845 bacpy(&ev.addr.bdaddr, bdaddr);
7846 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7847 ev.status = mgmt_status(status);
7849 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7852 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7854 struct mgmt_ev_pin_code_request ev;
7856 bacpy(&ev.addr.bdaddr, bdaddr);
7857 ev.addr.type = BDADDR_BREDR;
7858 ev.secure = secure;
7860 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7863 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7864 u8 status)
7866 struct mgmt_pending_cmd *cmd;
7868 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7869 if (!cmd)
7870 return;
7872 cmd->cmd_complete(cmd, mgmt_status(status));
7873 mgmt_pending_remove(cmd);
7876 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7877 u8 status)
7879 struct mgmt_pending_cmd *cmd;
7881 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7882 if (!cmd)
7883 return;
7885 cmd->cmd_complete(cmd, mgmt_status(status));
7886 mgmt_pending_remove(cmd);
7889 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7890 u8 link_type, u8 addr_type, u32 value,
7891 u8 confirm_hint)
7893 struct mgmt_ev_user_confirm_request ev;
7895 BT_DBG("%s", hdev->name);
7897 bacpy(&ev.addr.bdaddr, bdaddr);
7898 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7899 ev.confirm_hint = confirm_hint;
7900 ev.value = cpu_to_le32(value);
7902 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7903 NULL);
7906 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7907 u8 link_type, u8 addr_type)
7909 struct mgmt_ev_user_passkey_request ev;
7911 BT_DBG("%s", hdev->name);
7913 bacpy(&ev.addr.bdaddr, bdaddr);
7914 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7916 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7917 NULL);
7920 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7921 u8 link_type, u8 addr_type, u8 status,
7922 u8 opcode)
7924 struct mgmt_pending_cmd *cmd;
7926 cmd = pending_find(opcode, hdev);
7927 if (!cmd)
7928 return -ENOENT;
7930 cmd->cmd_complete(cmd, mgmt_status(status));
7931 mgmt_pending_remove(cmd);
7933 return 0;
7936 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7937 u8 link_type, u8 addr_type, u8 status)
7939 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7940 status, MGMT_OP_USER_CONFIRM_REPLY);
7943 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7944 u8 link_type, u8 addr_type, u8 status)
7946 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7947 status,
7948 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7951 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7952 u8 link_type, u8 addr_type, u8 status)
7954 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7955 status, MGMT_OP_USER_PASSKEY_REPLY);
7958 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7959 u8 link_type, u8 addr_type, u8 status)
7961 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7962 status,
7963 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7966 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7967 u8 link_type, u8 addr_type, u32 passkey,
7968 u8 entered)
7970 struct mgmt_ev_passkey_notify ev;
7972 BT_DBG("%s", hdev->name);
7974 bacpy(&ev.addr.bdaddr, bdaddr);
7975 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7976 ev.passkey = __cpu_to_le32(passkey);
7977 ev.entered = entered;
7979 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7982 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7984 struct mgmt_ev_auth_failed ev;
7985 struct mgmt_pending_cmd *cmd;
7986 u8 status = mgmt_status(hci_status);
7988 bacpy(&ev.addr.bdaddr, &conn->dst);
7989 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7990 ev.status = status;
7992 cmd = find_pairing(conn);
7994 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7995 cmd ? cmd->sk : NULL);
7997 if (cmd) {
7998 cmd->cmd_complete(cmd, status);
7999 mgmt_pending_remove(cmd);
8003 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8005 struct cmd_lookup match = { NULL, hdev };
8006 bool changed;
8008 if (status) {
8009 u8 mgmt_err = mgmt_status(status);
8010 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8011 cmd_status_rsp, &mgmt_err);
8012 return;
8015 if (test_bit(HCI_AUTH, &hdev->flags))
8016 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8017 else
8018 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8020 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8021 &match);
8023 if (changed)
8024 new_settings(hdev, match.sk);
8026 if (match.sk)
8027 sock_put(match.sk);
8030 static void clear_eir(struct hci_request *req)
8032 struct hci_dev *hdev = req->hdev;
8033 struct hci_cp_write_eir cp;
8035 if (!lmp_ext_inq_capable(hdev))
8036 return;
8038 memset(hdev->eir, 0, sizeof(hdev->eir));
8040 memset(&cp, 0, sizeof(cp));
8042 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8045 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8047 struct cmd_lookup match = { NULL, hdev };
8048 struct hci_request req;
8049 bool changed = false;
8051 if (status) {
8052 u8 mgmt_err = mgmt_status(status);
8054 if (enable && hci_dev_test_and_clear_flag(hdev,
8055 HCI_SSP_ENABLED)) {
8056 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8057 new_settings(hdev, NULL);
8060 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8061 &mgmt_err);
8062 return;
8065 if (enable) {
8066 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8067 } else {
8068 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8069 if (!changed)
8070 changed = hci_dev_test_and_clear_flag(hdev,
8071 HCI_HS_ENABLED);
8072 else
8073 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8076 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8078 if (changed)
8079 new_settings(hdev, match.sk);
8081 if (match.sk)
8082 sock_put(match.sk);
8084 hci_req_init(&req, hdev);
8086 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8087 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8088 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8089 sizeof(enable), &enable);
8090 update_eir(&req);
8091 } else {
8092 clear_eir(&req);
8095 hci_req_run(&req, NULL);
8098 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8100 struct cmd_lookup *match = data;
8102 if (match->sk == NULL) {
8103 match->sk = cmd->sk;
8104 sock_hold(match->sk);
8108 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8109 u8 status)
8111 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8113 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8114 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8115 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8117 if (!status)
8118 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8119 dev_class, 3, NULL);
8121 if (match.sk)
8122 sock_put(match.sk);
8125 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8127 struct mgmt_cp_set_local_name ev;
8128 struct mgmt_pending_cmd *cmd;
8130 if (status)
8131 return;
8133 memset(&ev, 0, sizeof(ev));
8134 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8135 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8137 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8138 if (!cmd) {
8139 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8141 /* If this is a HCI command related to powering on the
8142 * HCI dev don't send any mgmt signals.
8144 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8145 return;
8148 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8149 cmd ? cmd->sk : NULL);
8152 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8154 int i;
8156 for (i = 0; i < uuid_count; i++) {
8157 if (!memcmp(uuid, uuids[i], 16))
8158 return true;
8161 return false;
8164 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8166 u16 parsed = 0;
8168 while (parsed < eir_len) {
8169 u8 field_len = eir[0];
8170 u8 uuid[16];
8171 int i;
8173 if (field_len == 0)
8174 break;
8176 if (eir_len - parsed < field_len + 1)
8177 break;
8179 switch (eir[1]) {
8180 case EIR_UUID16_ALL:
8181 case EIR_UUID16_SOME:
8182 for (i = 0; i + 3 <= field_len; i += 2) {
8183 memcpy(uuid, bluetooth_base_uuid, 16);
8184 uuid[13] = eir[i + 3];
8185 uuid[12] = eir[i + 2];
8186 if (has_uuid(uuid, uuid_count, uuids))
8187 return true;
8189 break;
8190 case EIR_UUID32_ALL:
8191 case EIR_UUID32_SOME:
8192 for (i = 0; i + 5 <= field_len; i += 4) {
8193 memcpy(uuid, bluetooth_base_uuid, 16);
8194 uuid[15] = eir[i + 5];
8195 uuid[14] = eir[i + 4];
8196 uuid[13] = eir[i + 3];
8197 uuid[12] = eir[i + 2];
8198 if (has_uuid(uuid, uuid_count, uuids))
8199 return true;
8201 break;
8202 case EIR_UUID128_ALL:
8203 case EIR_UUID128_SOME:
8204 for (i = 0; i + 17 <= field_len; i += 16) {
8205 memcpy(uuid, eir + i + 2, 16);
8206 if (has_uuid(uuid, uuid_count, uuids))
8207 return true;
8209 break;
8212 parsed += field_len + 1;
8213 eir += field_len + 1;
8216 return false;
8219 static void restart_le_scan(struct hci_dev *hdev)
8221 /* If controller is not scanning we are done. */
8222 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8223 return;
8225 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8226 hdev->discovery.scan_start +
8227 hdev->discovery.scan_duration))
8228 return;
8230 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8231 DISCOV_LE_RESTART_DELAY);
8234 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8235 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8237 /* If a RSSI threshold has been specified, and
8238 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8239 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8240 * is set, let it through for further processing, as we might need to
8241 * restart the scan.
8243 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8244 * the results are also dropped.
8246 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8247 (rssi == HCI_RSSI_INVALID ||
8248 (rssi < hdev->discovery.rssi &&
8249 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8250 return false;
8252 if (hdev->discovery.uuid_count != 0) {
8253 /* If a list of UUIDs is provided in filter, results with no
8254 * matching UUID should be dropped.
8256 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8257 hdev->discovery.uuids) &&
8258 !eir_has_uuids(scan_rsp, scan_rsp_len,
8259 hdev->discovery.uuid_count,
8260 hdev->discovery.uuids))
8261 return false;
8264 /* If duplicate filtering does not report RSSI changes, then restart
8265 * scanning to ensure updated result with updated RSSI values.
8267 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8268 restart_le_scan(hdev);
8270 /* Validate RSSI value against the RSSI threshold once more. */
8271 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8272 rssi < hdev->discovery.rssi)
8273 return false;
8276 return true;
8279 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8280 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8281 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8283 char buf[512];
8284 struct mgmt_ev_device_found *ev = (void *)buf;
8285 size_t ev_size;
8287 /* Don't send events for a non-kernel initiated discovery. With
8288 * LE one exception is if we have pend_le_reports > 0 in which
8289 * case we're doing passive scanning and want these events.
8291 if (!hci_discovery_active(hdev)) {
8292 if (link_type == ACL_LINK)
8293 return;
8294 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8295 return;
8298 if (hdev->discovery.result_filtering) {
8299 /* We are using service discovery */
8300 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8301 scan_rsp_len))
8302 return;
8305 /* Make sure that the buffer is big enough. The 5 extra bytes
8306 * are for the potential CoD field.
8308 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8309 return;
8311 memset(buf, 0, sizeof(buf));
8313 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8314 * RSSI value was reported as 0 when not available. This behavior
8315 * is kept when using device discovery. This is required for full
8316 * backwards compatibility with the API.
8318 * However when using service discovery, the value 127 will be
8319 * returned when the RSSI is not available.
8321 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8322 link_type == ACL_LINK)
8323 rssi = 0;
8325 bacpy(&ev->addr.bdaddr, bdaddr);
8326 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8327 ev->rssi = rssi;
8328 ev->flags = cpu_to_le32(flags);
8330 if (eir_len > 0)
8331 /* Copy EIR or advertising data into event */
8332 memcpy(ev->eir, eir, eir_len);
8334 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8335 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8336 dev_class, 3);
8338 if (scan_rsp_len > 0)
8339 /* Append scan response data to event */
8340 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8342 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8343 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8345 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8348 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8349 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8351 struct mgmt_ev_device_found *ev;
8352 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8353 u16 eir_len;
8355 ev = (struct mgmt_ev_device_found *) buf;
8357 memset(buf, 0, sizeof(buf));
8359 bacpy(&ev->addr.bdaddr, bdaddr);
8360 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8361 ev->rssi = rssi;
8363 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8364 name_len);
8366 ev->eir_len = cpu_to_le16(eir_len);
8368 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8371 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8373 struct mgmt_ev_discovering ev;
8375 BT_DBG("%s discovering %u", hdev->name, discovering);
8377 memset(&ev, 0, sizeof(ev));
8378 ev.type = hdev->discovery.type;
8379 ev.discovering = discovering;
8381 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8384 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8386 BT_DBG("%s status %u", hdev->name, status);
8389 void mgmt_reenable_advertising(struct hci_dev *hdev)
8391 struct hci_request req;
8393 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8394 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8395 return;
8397 hci_req_init(&req, hdev);
8398 enable_advertising(&req);
8399 hci_req_run(&req, adv_enable_complete);
8402 static struct hci_mgmt_chan chan = {
8403 .channel = HCI_CHANNEL_CONTROL,
8404 .handler_count = ARRAY_SIZE(mgmt_handlers),
8405 .handlers = mgmt_handlers,
8406 .hdev_init = mgmt_init_hdev,
8409 int mgmt_init(void)
8411 return hci_mgmt_chan_register(&chan);
8414 void mgmt_exit(void)
8416 hci_mgmt_chan_unregister(&chan);