Btrfs: fix list transaction->pending_ordered corruption
[linux/fpc-iii.git] / net / bluetooth / mgmt.c
blobefb71b022ab6520527b3087dbf917ddcb925438e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "smp.h"
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 struct pending_cmd {
133 struct list_head list;
134 u16 opcode;
135 int index;
136 void *param;
137 struct sock *sk;
138 void *user_data;
141 /* HCI to MGMT error code conversion table */
142 static u8 mgmt_status_table[] = {
143 MGMT_STATUS_SUCCESS,
144 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
145 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
146 MGMT_STATUS_FAILED, /* Hardware Failure */
147 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
148 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
149 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
150 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
151 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
154 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
155 MGMT_STATUS_BUSY, /* Command Disallowed */
156 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
157 MGMT_STATUS_REJECTED, /* Rejected Security */
158 MGMT_STATUS_REJECTED, /* Rejected Personal */
159 MGMT_STATUS_TIMEOUT, /* Host Timeout */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
162 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
163 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
164 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
165 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
166 MGMT_STATUS_BUSY, /* Repeated Attempts */
167 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
168 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
170 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
171 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
172 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
173 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
174 MGMT_STATUS_FAILED, /* Unspecified Error */
175 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
176 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
177 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
178 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
179 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
180 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
181 MGMT_STATUS_FAILED, /* Unit Link Key Used */
182 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
183 MGMT_STATUS_TIMEOUT, /* Instant Passed */
184 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
185 MGMT_STATUS_FAILED, /* Transaction Collision */
186 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
187 MGMT_STATUS_REJECTED, /* QoS Rejected */
188 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
189 MGMT_STATUS_REJECTED, /* Insufficient Security */
190 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
191 MGMT_STATUS_BUSY, /* Role Switch Pending */
192 MGMT_STATUS_FAILED, /* Slot Violation */
193 MGMT_STATUS_FAILED, /* Role Switch Failed */
194 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
195 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
196 MGMT_STATUS_BUSY, /* Host Busy Pairing */
197 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
198 MGMT_STATUS_BUSY, /* Controller Busy */
199 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
200 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
201 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
202 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
203 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
206 static u8 mgmt_status(u8 hci_status)
208 if (hci_status < ARRAY_SIZE(mgmt_status_table))
209 return mgmt_status_table[hci_status];
211 return MGMT_STATUS_FAILED;
214 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
215 struct sock *skip_sk)
217 struct sk_buff *skb;
218 struct mgmt_hdr *hdr;
220 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
221 if (!skb)
222 return -ENOMEM;
224 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = cpu_to_le16(event);
226 if (hdev)
227 hdr->index = cpu_to_le16(hdev->id);
228 else
229 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
230 hdr->len = cpu_to_le16(data_len);
232 if (data)
233 memcpy(skb_put(skb, data_len), data, data_len);
235 /* Time stamp */
236 __net_timestamp(skb);
238 hci_send_to_control(skb, skip_sk);
239 kfree_skb(skb);
241 return 0;
244 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
246 struct sk_buff *skb;
247 struct mgmt_hdr *hdr;
248 struct mgmt_ev_cmd_status *ev;
249 int err;
251 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
253 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
254 if (!skb)
255 return -ENOMEM;
257 hdr = (void *) skb_put(skb, sizeof(*hdr));
259 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
260 hdr->index = cpu_to_le16(index);
261 hdr->len = cpu_to_le16(sizeof(*ev));
263 ev = (void *) skb_put(skb, sizeof(*ev));
264 ev->status = status;
265 ev->opcode = cpu_to_le16(cmd);
267 err = sock_queue_rcv_skb(sk, skb);
268 if (err < 0)
269 kfree_skb(skb);
271 return err;
274 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
275 void *rp, size_t rp_len)
277 struct sk_buff *skb;
278 struct mgmt_hdr *hdr;
279 struct mgmt_ev_cmd_complete *ev;
280 int err;
282 BT_DBG("sock %p", sk);
284 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
285 if (!skb)
286 return -ENOMEM;
288 hdr = (void *) skb_put(skb, sizeof(*hdr));
290 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
291 hdr->index = cpu_to_le16(index);
292 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
294 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
295 ev->opcode = cpu_to_le16(cmd);
296 ev->status = status;
298 if (rp)
299 memcpy(ev->data, rp, rp_len);
301 err = sock_queue_rcv_skb(sk, skb);
302 if (err < 0)
303 kfree_skb(skb);
305 return err;
308 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
309 u16 data_len)
311 struct mgmt_rp_read_version rp;
313 BT_DBG("sock %p", sk);
315 rp.version = MGMT_VERSION;
316 rp.revision = cpu_to_le16(MGMT_REVISION);
318 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
319 sizeof(rp));
322 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
323 u16 data_len)
325 struct mgmt_rp_read_commands *rp;
326 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
327 const u16 num_events = ARRAY_SIZE(mgmt_events);
328 __le16 *opcode;
329 size_t rp_size;
330 int i, err;
332 BT_DBG("sock %p", sk);
334 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
336 rp = kmalloc(rp_size, GFP_KERNEL);
337 if (!rp)
338 return -ENOMEM;
340 rp->num_commands = cpu_to_le16(num_commands);
341 rp->num_events = cpu_to_le16(num_events);
343 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
344 put_unaligned_le16(mgmt_commands[i], opcode);
346 for (i = 0; i < num_events; i++, opcode++)
347 put_unaligned_le16(mgmt_events[i], opcode);
349 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
350 rp_size);
351 kfree(rp);
353 return err;
356 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
357 u16 data_len)
359 struct mgmt_rp_read_index_list *rp;
360 struct hci_dev *d;
361 size_t rp_len;
362 u16 count;
363 int err;
365 BT_DBG("sock %p", sk);
367 read_lock(&hci_dev_list_lock);
369 count = 0;
370 list_for_each_entry(d, &hci_dev_list, list) {
371 if (d->dev_type == HCI_BREDR &&
372 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
373 count++;
376 rp_len = sizeof(*rp) + (2 * count);
377 rp = kmalloc(rp_len, GFP_ATOMIC);
378 if (!rp) {
379 read_unlock(&hci_dev_list_lock);
380 return -ENOMEM;
383 count = 0;
384 list_for_each_entry(d, &hci_dev_list, list) {
385 if (test_bit(HCI_SETUP, &d->dev_flags) ||
386 test_bit(HCI_CONFIG, &d->dev_flags) ||
387 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
388 continue;
390 /* Devices marked as raw-only are neither configured
391 * nor unconfigured controllers.
393 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
394 continue;
396 if (d->dev_type == HCI_BREDR &&
397 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
398 rp->index[count++] = cpu_to_le16(d->id);
399 BT_DBG("Added hci%u", d->id);
403 rp->num_controllers = cpu_to_le16(count);
404 rp_len = sizeof(*rp) + (2 * count);
406 read_unlock(&hci_dev_list_lock);
408 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
409 rp_len);
411 kfree(rp);
413 return err;
416 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
417 void *data, u16 data_len)
419 struct mgmt_rp_read_unconf_index_list *rp;
420 struct hci_dev *d;
421 size_t rp_len;
422 u16 count;
423 int err;
425 BT_DBG("sock %p", sk);
427 read_lock(&hci_dev_list_lock);
429 count = 0;
430 list_for_each_entry(d, &hci_dev_list, list) {
431 if (d->dev_type == HCI_BREDR &&
432 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
433 count++;
436 rp_len = sizeof(*rp) + (2 * count);
437 rp = kmalloc(rp_len, GFP_ATOMIC);
438 if (!rp) {
439 read_unlock(&hci_dev_list_lock);
440 return -ENOMEM;
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (test_bit(HCI_SETUP, &d->dev_flags) ||
446 test_bit(HCI_CONFIG, &d->dev_flags) ||
447 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
448 continue;
450 /* Devices marked as raw-only are neither configured
451 * nor unconfigured controllers.
453 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
454 continue;
456 if (d->dev_type == HCI_BREDR &&
457 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
458 rp->index[count++] = cpu_to_le16(d->id);
459 BT_DBG("Added hci%u", d->id);
463 rp->num_controllers = cpu_to_le16(count);
464 rp_len = sizeof(*rp) + (2 * count);
466 read_unlock(&hci_dev_list_lock);
468 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
469 0, rp, rp_len);
471 kfree(rp);
473 return err;
476 static bool is_configured(struct hci_dev *hdev)
478 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
479 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
480 return false;
482 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
483 !bacmp(&hdev->public_addr, BDADDR_ANY))
484 return false;
486 return true;
489 static __le32 get_missing_options(struct hci_dev *hdev)
491 u32 options = 0;
493 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
494 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
495 options |= MGMT_OPTION_EXTERNAL_CONFIG;
497 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
498 !bacmp(&hdev->public_addr, BDADDR_ANY))
499 options |= MGMT_OPTION_PUBLIC_ADDRESS;
501 return cpu_to_le32(options);
504 static int new_options(struct hci_dev *hdev, struct sock *skip)
506 __le32 options = get_missing_options(hdev);
508 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
509 sizeof(options), skip);
512 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
514 __le32 options = get_missing_options(hdev);
516 return cmd_complete(sk, hdev->id, opcode, 0, &options,
517 sizeof(options));
520 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
521 void *data, u16 data_len)
523 struct mgmt_rp_read_config_info rp;
524 u32 options = 0;
526 BT_DBG("sock %p %s", sk, hdev->name);
528 hci_dev_lock(hdev);
530 memset(&rp, 0, sizeof(rp));
531 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
533 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
534 options |= MGMT_OPTION_EXTERNAL_CONFIG;
536 if (hdev->set_bdaddr)
537 options |= MGMT_OPTION_PUBLIC_ADDRESS;
539 rp.supported_options = cpu_to_le32(options);
540 rp.missing_options = get_missing_options(hdev);
542 hci_dev_unlock(hdev);
544 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
545 sizeof(rp));
548 static u32 get_supported_settings(struct hci_dev *hdev)
550 u32 settings = 0;
552 settings |= MGMT_SETTING_POWERED;
553 settings |= MGMT_SETTING_BONDABLE;
554 settings |= MGMT_SETTING_DEBUG_KEYS;
555 settings |= MGMT_SETTING_CONNECTABLE;
556 settings |= MGMT_SETTING_DISCOVERABLE;
558 if (lmp_bredr_capable(hdev)) {
559 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
560 settings |= MGMT_SETTING_FAST_CONNECTABLE;
561 settings |= MGMT_SETTING_BREDR;
562 settings |= MGMT_SETTING_LINK_SECURITY;
564 if (lmp_ssp_capable(hdev)) {
565 settings |= MGMT_SETTING_SSP;
566 settings |= MGMT_SETTING_HS;
569 if (lmp_sc_capable(hdev) ||
570 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
571 settings |= MGMT_SETTING_SECURE_CONN;
574 if (lmp_le_capable(hdev)) {
575 settings |= MGMT_SETTING_LE;
576 settings |= MGMT_SETTING_ADVERTISING;
577 settings |= MGMT_SETTING_PRIVACY;
580 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
581 hdev->set_bdaddr)
582 settings |= MGMT_SETTING_CONFIGURATION;
584 return settings;
587 static u32 get_current_settings(struct hci_dev *hdev)
589 u32 settings = 0;
591 if (hdev_is_powered(hdev))
592 settings |= MGMT_SETTING_POWERED;
594 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
595 settings |= MGMT_SETTING_CONNECTABLE;
597 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_FAST_CONNECTABLE;
600 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_DISCOVERABLE;
603 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_BONDABLE;
606 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
607 settings |= MGMT_SETTING_BREDR;
609 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_LE;
612 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LINK_SECURITY;
615 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
616 settings |= MGMT_SETTING_SSP;
618 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_HS;
621 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
622 settings |= MGMT_SETTING_ADVERTISING;
624 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
625 settings |= MGMT_SETTING_SECURE_CONN;
627 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
628 settings |= MGMT_SETTING_DEBUG_KEYS;
630 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
631 settings |= MGMT_SETTING_PRIVACY;
633 return settings;
636 #define PNP_INFO_SVCLASS_ID 0x1200
638 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
640 u8 *ptr = data, *uuids_start = NULL;
641 struct bt_uuid *uuid;
643 if (len < 4)
644 return ptr;
646 list_for_each_entry(uuid, &hdev->uuids, list) {
647 u16 uuid16;
649 if (uuid->size != 16)
650 continue;
652 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
653 if (uuid16 < 0x1100)
654 continue;
656 if (uuid16 == PNP_INFO_SVCLASS_ID)
657 continue;
659 if (!uuids_start) {
660 uuids_start = ptr;
661 uuids_start[0] = 1;
662 uuids_start[1] = EIR_UUID16_ALL;
663 ptr += 2;
666 /* Stop if not enough space to put next UUID */
667 if ((ptr - data) + sizeof(u16) > len) {
668 uuids_start[1] = EIR_UUID16_SOME;
669 break;
672 *ptr++ = (uuid16 & 0x00ff);
673 *ptr++ = (uuid16 & 0xff00) >> 8;
674 uuids_start[0] += sizeof(uuid16);
677 return ptr;
680 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
682 u8 *ptr = data, *uuids_start = NULL;
683 struct bt_uuid *uuid;
685 if (len < 6)
686 return ptr;
688 list_for_each_entry(uuid, &hdev->uuids, list) {
689 if (uuid->size != 32)
690 continue;
692 if (!uuids_start) {
693 uuids_start = ptr;
694 uuids_start[0] = 1;
695 uuids_start[1] = EIR_UUID32_ALL;
696 ptr += 2;
699 /* Stop if not enough space to put next UUID */
700 if ((ptr - data) + sizeof(u32) > len) {
701 uuids_start[1] = EIR_UUID32_SOME;
702 break;
705 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
706 ptr += sizeof(u32);
707 uuids_start[0] += sizeof(u32);
710 return ptr;
713 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
715 u8 *ptr = data, *uuids_start = NULL;
716 struct bt_uuid *uuid;
718 if (len < 18)
719 return ptr;
721 list_for_each_entry(uuid, &hdev->uuids, list) {
722 if (uuid->size != 128)
723 continue;
725 if (!uuids_start) {
726 uuids_start = ptr;
727 uuids_start[0] = 1;
728 uuids_start[1] = EIR_UUID128_ALL;
729 ptr += 2;
732 /* Stop if not enough space to put next UUID */
733 if ((ptr - data) + 16 > len) {
734 uuids_start[1] = EIR_UUID128_SOME;
735 break;
738 memcpy(ptr, uuid->uuid, 16);
739 ptr += 16;
740 uuids_start[0] += 16;
743 return ptr;
746 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
748 struct pending_cmd *cmd;
750 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
751 if (cmd->opcode == opcode)
752 return cmd;
755 return NULL;
758 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
759 struct hci_dev *hdev,
760 const void *data)
762 struct pending_cmd *cmd;
764 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
765 if (cmd->user_data != data)
766 continue;
767 if (cmd->opcode == opcode)
768 return cmd;
771 return NULL;
774 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
776 u8 ad_len = 0;
777 size_t name_len;
779 name_len = strlen(hdev->dev_name);
780 if (name_len > 0) {
781 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
783 if (name_len > max_len) {
784 name_len = max_len;
785 ptr[1] = EIR_NAME_SHORT;
786 } else
787 ptr[1] = EIR_NAME_COMPLETE;
789 ptr[0] = name_len + 1;
791 memcpy(ptr + 2, hdev->dev_name, name_len);
793 ad_len += (name_len + 2);
794 ptr += (name_len + 2);
797 return ad_len;
800 static void update_scan_rsp_data(struct hci_request *req)
802 struct hci_dev *hdev = req->hdev;
803 struct hci_cp_le_set_scan_rsp_data cp;
804 u8 len;
806 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
807 return;
809 memset(&cp, 0, sizeof(cp));
811 len = create_scan_rsp_data(hdev, cp.data);
813 if (hdev->scan_rsp_data_len == len &&
814 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
815 return;
817 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
818 hdev->scan_rsp_data_len = len;
820 cp.length = len;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
825 static u8 get_adv_discov_flags(struct hci_dev *hdev)
827 struct pending_cmd *cmd;
829 /* If there's a pending mgmt command the flags will not yet have
830 * their final values, so check for this first.
832 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
833 if (cmd) {
834 struct mgmt_mode *cp = cmd->param;
835 if (cp->val == 0x01)
836 return LE_AD_GENERAL;
837 else if (cp->val == 0x02)
838 return LE_AD_LIMITED;
839 } else {
840 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
841 return LE_AD_LIMITED;
842 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
843 return LE_AD_GENERAL;
846 return 0;
849 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
851 u8 ad_len = 0, flags = 0;
853 flags |= get_adv_discov_flags(hdev);
855 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
856 flags |= LE_AD_NO_BREDR;
858 if (flags) {
859 BT_DBG("adv flags 0x%02x", flags);
861 ptr[0] = 2;
862 ptr[1] = EIR_FLAGS;
863 ptr[2] = flags;
865 ad_len += 3;
866 ptr += 3;
869 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
870 ptr[0] = 2;
871 ptr[1] = EIR_TX_POWER;
872 ptr[2] = (u8) hdev->adv_tx_power;
874 ad_len += 3;
875 ptr += 3;
878 return ad_len;
881 static void update_adv_data(struct hci_request *req)
883 struct hci_dev *hdev = req->hdev;
884 struct hci_cp_le_set_adv_data cp;
885 u8 len;
887 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
888 return;
890 memset(&cp, 0, sizeof(cp));
892 len = create_adv_data(hdev, cp.data);
894 if (hdev->adv_data_len == len &&
895 memcmp(cp.data, hdev->adv_data, len) == 0)
896 return;
898 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
899 hdev->adv_data_len = len;
901 cp.length = len;
903 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
906 int mgmt_update_adv_data(struct hci_dev *hdev)
908 struct hci_request req;
910 hci_req_init(&req, hdev);
911 update_adv_data(&req);
913 return hci_req_run(&req, NULL);
916 static void create_eir(struct hci_dev *hdev, u8 *data)
918 u8 *ptr = data;
919 size_t name_len;
921 name_len = strlen(hdev->dev_name);
923 if (name_len > 0) {
924 /* EIR Data type */
925 if (name_len > 48) {
926 name_len = 48;
927 ptr[1] = EIR_NAME_SHORT;
928 } else
929 ptr[1] = EIR_NAME_COMPLETE;
931 /* EIR Data length */
932 ptr[0] = name_len + 1;
934 memcpy(ptr + 2, hdev->dev_name, name_len);
936 ptr += (name_len + 2);
939 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
940 ptr[0] = 2;
941 ptr[1] = EIR_TX_POWER;
942 ptr[2] = (u8) hdev->inq_tx_power;
944 ptr += 3;
947 if (hdev->devid_source > 0) {
948 ptr[0] = 9;
949 ptr[1] = EIR_DEVICE_ID;
951 put_unaligned_le16(hdev->devid_source, ptr + 2);
952 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
953 put_unaligned_le16(hdev->devid_product, ptr + 6);
954 put_unaligned_le16(hdev->devid_version, ptr + 8);
956 ptr += 10;
959 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
960 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
961 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 static void update_eir(struct hci_request *req)
966 struct hci_dev *hdev = req->hdev;
967 struct hci_cp_write_eir cp;
969 if (!hdev_is_powered(hdev))
970 return;
972 if (!lmp_ext_inq_capable(hdev))
973 return;
975 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
976 return;
978 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
979 return;
981 memset(&cp, 0, sizeof(cp));
983 create_eir(hdev, cp.data);
985 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
986 return;
988 memcpy(hdev->eir, cp.data, sizeof(cp.data));
990 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
993 static u8 get_service_classes(struct hci_dev *hdev)
995 struct bt_uuid *uuid;
996 u8 val = 0;
998 list_for_each_entry(uuid, &hdev->uuids, list)
999 val |= uuid->svc_hint;
1001 return val;
1004 static void update_class(struct hci_request *req)
1006 struct hci_dev *hdev = req->hdev;
1007 u8 cod[3];
1009 BT_DBG("%s", hdev->name);
1011 if (!hdev_is_powered(hdev))
1012 return;
1014 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1015 return;
1017 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1018 return;
1020 cod[0] = hdev->minor_class;
1021 cod[1] = hdev->major_class;
1022 cod[2] = get_service_classes(hdev);
1024 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1025 cod[1] |= 0x20;
1027 if (memcmp(cod, hdev->dev_class, 3) == 0)
1028 return;
1030 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1033 static bool get_connectable(struct hci_dev *hdev)
1035 struct pending_cmd *cmd;
1037 /* If there's a pending mgmt command the flag will not yet have
1038 * it's final value, so check for this first.
1040 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1041 if (cmd) {
1042 struct mgmt_mode *cp = cmd->param;
1043 return cp->val;
1046 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1049 static void disable_advertising(struct hci_request *req)
1051 u8 enable = 0x00;
1053 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1056 static void enable_advertising(struct hci_request *req)
1058 struct hci_dev *hdev = req->hdev;
1059 struct hci_cp_le_set_adv_param cp;
1060 u8 own_addr_type, enable = 0x01;
1061 bool connectable;
1063 if (hci_conn_num(hdev, LE_LINK) > 0)
1064 return;
1066 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1067 disable_advertising(req);
1069 /* Clear the HCI_LE_ADV bit temporarily so that the
1070 * hci_update_random_address knows that it's safe to go ahead
1071 * and write a new random address. The flag will be set back on
1072 * as soon as the SET_ADV_ENABLE HCI command completes.
1074 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1076 connectable = get_connectable(hdev);
1078 /* Set require_privacy to true only when non-connectable
1079 * advertising is used. In that case it is fine to use a
1080 * non-resolvable private address.
1082 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1083 return;
1085 memset(&cp, 0, sizeof(cp));
1086 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1087 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1088 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1089 cp.own_address_type = own_addr_type;
1090 cp.channel_map = hdev->le_adv_channel_map;
1092 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1094 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1097 static void service_cache_off(struct work_struct *work)
1099 struct hci_dev *hdev = container_of(work, struct hci_dev,
1100 service_cache.work);
1101 struct hci_request req;
1103 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1104 return;
1106 hci_req_init(&req, hdev);
1108 hci_dev_lock(hdev);
1110 update_eir(&req);
1111 update_class(&req);
1113 hci_dev_unlock(hdev);
1115 hci_req_run(&req, NULL);
1118 static void rpa_expired(struct work_struct *work)
1120 struct hci_dev *hdev = container_of(work, struct hci_dev,
1121 rpa_expired.work);
1122 struct hci_request req;
1124 BT_DBG("");
1126 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1128 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1129 return;
1131 /* The generation of a new RPA and programming it into the
1132 * controller happens in the enable_advertising() function.
1134 hci_req_init(&req, hdev);
1135 enable_advertising(&req);
1136 hci_req_run(&req, NULL);
1139 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1141 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1142 return;
1144 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1145 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1147 /* Non-mgmt controlled devices get this bit set
1148 * implicitly so that pairing works for them, however
1149 * for mgmt we require user-space to explicitly enable
1150 * it
1152 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1155 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1156 void *data, u16 data_len)
1158 struct mgmt_rp_read_info rp;
1160 BT_DBG("sock %p %s", sk, hdev->name);
1162 hci_dev_lock(hdev);
1164 memset(&rp, 0, sizeof(rp));
1166 bacpy(&rp.bdaddr, &hdev->bdaddr);
1168 rp.version = hdev->hci_ver;
1169 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1171 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1172 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1174 memcpy(rp.dev_class, hdev->dev_class, 3);
1176 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1177 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1179 hci_dev_unlock(hdev);
1181 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1182 sizeof(rp));
1185 static void mgmt_pending_free(struct pending_cmd *cmd)
1187 sock_put(cmd->sk);
1188 kfree(cmd->param);
1189 kfree(cmd);
1192 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1193 struct hci_dev *hdev, void *data,
1194 u16 len)
1196 struct pending_cmd *cmd;
1198 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1199 if (!cmd)
1200 return NULL;
1202 cmd->opcode = opcode;
1203 cmd->index = hdev->id;
1205 cmd->param = kmalloc(len, GFP_KERNEL);
1206 if (!cmd->param) {
1207 kfree(cmd);
1208 return NULL;
1211 if (data)
1212 memcpy(cmd->param, data, len);
1214 cmd->sk = sk;
1215 sock_hold(sk);
1217 list_add(&cmd->list, &hdev->mgmt_pending);
1219 return cmd;
1222 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1223 void (*cb)(struct pending_cmd *cmd,
1224 void *data),
1225 void *data)
1227 struct pending_cmd *cmd, *tmp;
1229 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1230 if (opcode > 0 && cmd->opcode != opcode)
1231 continue;
1233 cb(cmd, data);
1237 static void mgmt_pending_remove(struct pending_cmd *cmd)
1239 list_del(&cmd->list);
1240 mgmt_pending_free(cmd);
1243 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1245 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1247 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1248 sizeof(settings));
1251 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1253 BT_DBG("%s status 0x%02x", hdev->name, status);
1255 if (hci_conn_count(hdev) == 0) {
1256 cancel_delayed_work(&hdev->power_off);
1257 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261 static bool hci_stop_discovery(struct hci_request *req)
1263 struct hci_dev *hdev = req->hdev;
1264 struct hci_cp_remote_name_req_cancel cp;
1265 struct inquiry_entry *e;
1267 switch (hdev->discovery.state) {
1268 case DISCOVERY_FINDING:
1269 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1270 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1271 } else {
1272 cancel_delayed_work(&hdev->le_scan_disable);
1273 hci_req_add_le_scan_disable(req);
1276 return true;
1278 case DISCOVERY_RESOLVING:
1279 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1280 NAME_PENDING);
1281 if (!e)
1282 break;
1284 bacpy(&cp.bdaddr, &e->data.bdaddr);
1285 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1286 &cp);
1288 return true;
1290 default:
1291 /* Passive scanning */
1292 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1293 hci_req_add_le_scan_disable(req);
1294 return true;
1297 break;
1300 return false;
1303 static int clean_up_hci_state(struct hci_dev *hdev)
1305 struct hci_request req;
1306 struct hci_conn *conn;
1307 bool discov_stopped;
1308 int err;
1310 hci_req_init(&req, hdev);
1312 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1313 test_bit(HCI_PSCAN, &hdev->flags)) {
1314 u8 scan = 0x00;
1315 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1318 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1319 disable_advertising(&req);
1321 discov_stopped = hci_stop_discovery(&req);
1323 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1324 struct hci_cp_disconnect dc;
1325 struct hci_cp_reject_conn_req rej;
1327 switch (conn->state) {
1328 case BT_CONNECTED:
1329 case BT_CONFIG:
1330 dc.handle = cpu_to_le16(conn->handle);
1331 dc.reason = 0x15; /* Terminated due to Power Off */
1332 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1333 break;
1334 case BT_CONNECT:
1335 if (conn->type == LE_LINK)
1336 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1337 0, NULL);
1338 else if (conn->type == ACL_LINK)
1339 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1340 6, &conn->dst);
1341 break;
1342 case BT_CONNECT2:
1343 bacpy(&rej.bdaddr, &conn->dst);
1344 rej.reason = 0x15; /* Terminated due to Power Off */
1345 if (conn->type == ACL_LINK)
1346 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1347 sizeof(rej), &rej);
1348 else if (conn->type == SCO_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1350 sizeof(rej), &rej);
1351 break;
1355 err = hci_req_run(&req, clean_up_hci_complete);
1356 if (!err && discov_stopped)
1357 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1359 return err;
1362 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1363 u16 len)
1365 struct mgmt_mode *cp = data;
1366 struct pending_cmd *cmd;
1367 int err;
1369 BT_DBG("request for %s", hdev->name);
1371 if (cp->val != 0x00 && cp->val != 0x01)
1372 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1373 MGMT_STATUS_INVALID_PARAMS);
1375 hci_dev_lock(hdev);
1377 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1378 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 MGMT_STATUS_BUSY);
1380 goto failed;
1383 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1384 cancel_delayed_work(&hdev->power_off);
1386 if (cp->val) {
1387 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1388 data, len);
1389 err = mgmt_powered(hdev, 1);
1390 goto failed;
1394 if (!!cp->val == hdev_is_powered(hdev)) {
1395 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396 goto failed;
1399 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1400 if (!cmd) {
1401 err = -ENOMEM;
1402 goto failed;
1405 if (cp->val) {
1406 queue_work(hdev->req_workqueue, &hdev->power_on);
1407 err = 0;
1408 } else {
1409 /* Disconnect connections, stop scans, etc */
1410 err = clean_up_hci_state(hdev);
1411 if (!err)
1412 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1413 HCI_POWER_OFF_TIMEOUT);
1415 /* ENODATA means there were no HCI commands queued */
1416 if (err == -ENODATA) {
1417 cancel_delayed_work(&hdev->power_off);
1418 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1419 err = 0;
1423 failed:
1424 hci_dev_unlock(hdev);
1425 return err;
1428 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1430 __le32 ev;
1432 ev = cpu_to_le32(get_current_settings(hdev));
1434 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1437 int mgmt_new_settings(struct hci_dev *hdev)
1439 return new_settings(hdev, NULL);
1442 struct cmd_lookup {
1443 struct sock *sk;
1444 struct hci_dev *hdev;
1445 u8 mgmt_status;
1448 static void settings_rsp(struct pending_cmd *cmd, void *data)
1450 struct cmd_lookup *match = data;
1452 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1454 list_del(&cmd->list);
1456 if (match->sk == NULL) {
1457 match->sk = cmd->sk;
1458 sock_hold(match->sk);
1461 mgmt_pending_free(cmd);
1464 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1466 u8 *status = data;
1468 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1469 mgmt_pending_remove(cmd);
1472 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1474 if (!lmp_bredr_capable(hdev))
1475 return MGMT_STATUS_NOT_SUPPORTED;
1476 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1477 return MGMT_STATUS_REJECTED;
1478 else
1479 return MGMT_STATUS_SUCCESS;
1482 static u8 mgmt_le_support(struct hci_dev *hdev)
1484 if (!lmp_le_capable(hdev))
1485 return MGMT_STATUS_NOT_SUPPORTED;
1486 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1487 return MGMT_STATUS_REJECTED;
1488 else
1489 return MGMT_STATUS_SUCCESS;
1492 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1494 struct pending_cmd *cmd;
1495 struct mgmt_mode *cp;
1496 struct hci_request req;
1497 bool changed;
1499 BT_DBG("status 0x%02x", status);
1501 hci_dev_lock(hdev);
1503 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1504 if (!cmd)
1505 goto unlock;
1507 if (status) {
1508 u8 mgmt_err = mgmt_status(status);
1509 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1511 goto remove_cmd;
1514 cp = cmd->param;
1515 if (cp->val) {
1516 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1517 &hdev->dev_flags);
1519 if (hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1522 to);
1524 } else {
1525 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1526 &hdev->dev_flags);
1529 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1531 if (changed)
1532 new_settings(hdev, cmd->sk);
1534 /* When the discoverable mode gets changed, make sure
1535 * that class of device has the limited discoverable
1536 * bit correctly set. Also update page scan based on whitelist
1537 * entries.
1539 hci_req_init(&req, hdev);
1540 hci_update_page_scan(hdev, &req);
1541 update_class(&req);
1542 hci_req_run(&req, NULL);
1544 remove_cmd:
1545 mgmt_pending_remove(cmd);
1547 unlock:
1548 hci_dev_unlock(hdev);
1551 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 u16 len)
1554 struct mgmt_cp_set_discoverable *cp = data;
1555 struct pending_cmd *cmd;
1556 struct hci_request req;
1557 u16 timeout;
1558 u8 scan;
1559 int err;
1561 BT_DBG("request for %s", hdev->name);
1563 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1564 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1565 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1572 timeout = __le16_to_cpu(cp->timeout);
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1582 hci_dev_lock(hdev);
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1587 goto failed;
1590 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 MGMT_STATUS_BUSY);
1594 goto failed;
1597 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1598 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1600 goto failed;
1603 if (!hdev_is_powered(hdev)) {
1604 bool changed = false;
1606 /* Setting limited discoverable when powered off is
1607 * not a valid operation since it requires a timeout
1608 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1610 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1611 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1612 changed = true;
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 if (err < 0)
1617 goto failed;
1619 if (changed)
1620 err = new_settings(hdev, sk);
1622 goto failed;
1625 /* If the current mode is the same, then just update the timeout
1626 * value with the new value. And if only the timeout gets updated,
1627 * then no need for any HCI transactions.
1629 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1630 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1631 &hdev->dev_flags)) {
1632 cancel_delayed_work(&hdev->discov_off);
1633 hdev->discov_timeout = timeout;
1635 if (cp->val && hdev->discov_timeout > 0) {
1636 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1637 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1638 to);
1641 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1642 goto failed;
1645 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1646 if (!cmd) {
1647 err = -ENOMEM;
1648 goto failed;
1651 /* Cancel any potential discoverable timeout that might be
1652 * still active and store new timeout value. The arming of
1653 * the timeout happens in the complete handler.
1655 cancel_delayed_work(&hdev->discov_off);
1656 hdev->discov_timeout = timeout;
1658 /* Limited discoverable mode */
1659 if (cp->val == 0x02)
1660 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1661 else
1662 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1664 hci_req_init(&req, hdev);
1666 /* The procedure for LE-only controllers is much simpler - just
1667 * update the advertising data.
1669 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1670 goto update_ad;
1672 scan = SCAN_PAGE;
1674 if (cp->val) {
1675 struct hci_cp_write_current_iac_lap hci_cp;
1677 if (cp->val == 0x02) {
1678 /* Limited discoverable mode */
1679 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1680 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1681 hci_cp.iac_lap[1] = 0x8b;
1682 hci_cp.iac_lap[2] = 0x9e;
1683 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1684 hci_cp.iac_lap[4] = 0x8b;
1685 hci_cp.iac_lap[5] = 0x9e;
1686 } else {
1687 /* General discoverable mode */
1688 hci_cp.num_iac = 1;
1689 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1690 hci_cp.iac_lap[1] = 0x8b;
1691 hci_cp.iac_lap[2] = 0x9e;
1694 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1695 (hci_cp.num_iac * 3) + 1, &hci_cp);
1697 scan |= SCAN_INQUIRY;
1698 } else {
1699 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1702 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1704 update_ad:
1705 update_adv_data(&req);
1707 err = hci_req_run(&req, set_discoverable_complete);
1708 if (err < 0)
1709 mgmt_pending_remove(cmd);
1711 failed:
1712 hci_dev_unlock(hdev);
1713 return err;
1716 static void write_fast_connectable(struct hci_request *req, bool enable)
1718 struct hci_dev *hdev = req->hdev;
1719 struct hci_cp_write_page_scan_activity acp;
1720 u8 type;
1722 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1723 return;
1725 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1726 return;
1728 if (enable) {
1729 type = PAGE_SCAN_TYPE_INTERLACED;
1731 /* 160 msec page scan interval */
1732 acp.interval = cpu_to_le16(0x0100);
1733 } else {
1734 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1736 /* default 1.28 sec page scan */
1737 acp.interval = cpu_to_le16(0x0800);
1740 acp.window = cpu_to_le16(0x0012);
1742 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1743 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1744 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1745 sizeof(acp), &acp);
1747 if (hdev->page_scan_type != type)
1748 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1751 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1753 struct pending_cmd *cmd;
1754 struct mgmt_mode *cp;
1755 bool conn_changed, discov_changed;
1757 BT_DBG("status 0x%02x", status);
1759 hci_dev_lock(hdev);
1761 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1762 if (!cmd)
1763 goto unlock;
1765 if (status) {
1766 u8 mgmt_err = mgmt_status(status);
1767 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1768 goto remove_cmd;
1771 cp = cmd->param;
1772 if (cp->val) {
1773 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1774 &hdev->dev_flags);
1775 discov_changed = false;
1776 } else {
1777 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1778 &hdev->dev_flags);
1779 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1780 &hdev->dev_flags);
1783 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1785 if (conn_changed || discov_changed) {
1786 new_settings(hdev, cmd->sk);
1787 hci_update_page_scan(hdev, NULL);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1793 remove_cmd:
1794 mgmt_pending_remove(cmd);
1796 unlock:
1797 hci_dev_unlock(hdev);
1800 static int set_connectable_update_settings(struct hci_dev *hdev,
1801 struct sock *sk, u8 val)
1803 bool changed = false;
1804 int err;
1806 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1807 changed = true;
1809 if (val) {
1810 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1811 } else {
1812 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1816 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1817 if (err < 0)
1818 return err;
1820 if (changed) {
1821 hci_update_page_scan(hdev, NULL);
1822 hci_update_background_scan(hdev);
1823 return new_settings(hdev, sk);
1826 return 0;
1829 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1830 u16 len)
1832 struct mgmt_mode *cp = data;
1833 struct pending_cmd *cmd;
1834 struct hci_request req;
1835 u8 scan;
1836 int err;
1838 BT_DBG("request for %s", hdev->name);
1840 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1841 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1842 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1843 MGMT_STATUS_REJECTED);
1845 if (cp->val != 0x00 && cp->val != 0x01)
1846 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1847 MGMT_STATUS_INVALID_PARAMS);
1849 hci_dev_lock(hdev);
1851 if (!hdev_is_powered(hdev)) {
1852 err = set_connectable_update_settings(hdev, sk, cp->val);
1853 goto failed;
1856 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1857 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1858 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1859 MGMT_STATUS_BUSY);
1860 goto failed;
1863 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1864 if (!cmd) {
1865 err = -ENOMEM;
1866 goto failed;
1869 hci_req_init(&req, hdev);
1871 /* If BR/EDR is not enabled and we disable advertising as a
1872 * by-product of disabling connectable, we need to update the
1873 * advertising flags.
1875 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1876 if (!cp->val) {
1877 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1878 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1880 update_adv_data(&req);
1881 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1882 if (cp->val) {
1883 scan = SCAN_PAGE;
1884 } else {
1885 /* If we don't have any whitelist entries just
1886 * disable all scanning. If there are entries
1887 * and we had both page and inquiry scanning
1888 * enabled then fall back to only page scanning.
1889 * Otherwise no changes are needed.
1891 if (list_empty(&hdev->whitelist))
1892 scan = SCAN_DISABLED;
1893 else if (test_bit(HCI_ISCAN, &hdev->flags))
1894 scan = SCAN_PAGE;
1895 else
1896 goto no_scan_update;
1898 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1899 hdev->discov_timeout > 0)
1900 cancel_delayed_work(&hdev->discov_off);
1903 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1906 no_scan_update:
1907 /* If we're going from non-connectable to connectable or
1908 * vice-versa when fast connectable is enabled ensure that fast
1909 * connectable gets disabled. write_fast_connectable won't do
1910 * anything if the page scan parameters are already what they
1911 * should be.
1913 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1914 write_fast_connectable(&req, false);
1916 /* Update the advertising parameters if necessary */
1917 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1918 enable_advertising(&req);
1920 err = hci_req_run(&req, set_connectable_complete);
1921 if (err < 0) {
1922 mgmt_pending_remove(cmd);
1923 if (err == -ENODATA)
1924 err = set_connectable_update_settings(hdev, sk,
1925 cp->val);
1926 goto failed;
1929 failed:
1930 hci_dev_unlock(hdev);
1931 return err;
1934 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1935 u16 len)
1937 struct mgmt_mode *cp = data;
1938 bool changed;
1939 int err;
1941 BT_DBG("request for %s", hdev->name);
1943 if (cp->val != 0x00 && cp->val != 0x01)
1944 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1945 MGMT_STATUS_INVALID_PARAMS);
1947 hci_dev_lock(hdev);
1949 if (cp->val)
1950 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1951 else
1952 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1954 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1955 if (err < 0)
1956 goto unlock;
1958 if (changed)
1959 err = new_settings(hdev, sk);
1961 unlock:
1962 hci_dev_unlock(hdev);
1963 return err;
1966 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1967 u16 len)
1969 struct mgmt_mode *cp = data;
1970 struct pending_cmd *cmd;
1971 u8 val, status;
1972 int err;
1974 BT_DBG("request for %s", hdev->name);
1976 status = mgmt_bredr_support(hdev);
1977 if (status)
1978 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1979 status);
1981 if (cp->val != 0x00 && cp->val != 0x01)
1982 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1983 MGMT_STATUS_INVALID_PARAMS);
1985 hci_dev_lock(hdev);
1987 if (!hdev_is_powered(hdev)) {
1988 bool changed = false;
1990 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1991 &hdev->dev_flags)) {
1992 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1993 changed = true;
1996 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1997 if (err < 0)
1998 goto failed;
2000 if (changed)
2001 err = new_settings(hdev, sk);
2003 goto failed;
2006 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2007 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2008 MGMT_STATUS_BUSY);
2009 goto failed;
2012 val = !!cp->val;
2014 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2015 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2016 goto failed;
2019 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2020 if (!cmd) {
2021 err = -ENOMEM;
2022 goto failed;
2025 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2026 if (err < 0) {
2027 mgmt_pending_remove(cmd);
2028 goto failed;
2031 failed:
2032 hci_dev_unlock(hdev);
2033 return err;
2036 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2038 struct mgmt_mode *cp = data;
2039 struct pending_cmd *cmd;
2040 u8 status;
2041 int err;
2043 BT_DBG("request for %s", hdev->name);
2045 status = mgmt_bredr_support(hdev);
2046 if (status)
2047 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2049 if (!lmp_ssp_capable(hdev))
2050 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2051 MGMT_STATUS_NOT_SUPPORTED);
2053 if (cp->val != 0x00 && cp->val != 0x01)
2054 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2055 MGMT_STATUS_INVALID_PARAMS);
2057 hci_dev_lock(hdev);
2059 if (!hdev_is_powered(hdev)) {
2060 bool changed;
2062 if (cp->val) {
2063 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2064 &hdev->dev_flags);
2065 } else {
2066 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2067 &hdev->dev_flags);
2068 if (!changed)
2069 changed = test_and_clear_bit(HCI_HS_ENABLED,
2070 &hdev->dev_flags);
2071 else
2072 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2075 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2076 if (err < 0)
2077 goto failed;
2079 if (changed)
2080 err = new_settings(hdev, sk);
2082 goto failed;
2085 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2086 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2087 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2088 MGMT_STATUS_BUSY);
2089 goto failed;
2092 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2093 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2094 goto failed;
2097 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2098 if (!cmd) {
2099 err = -ENOMEM;
2100 goto failed;
2103 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2104 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2105 sizeof(cp->val), &cp->val);
2107 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2108 if (err < 0) {
2109 mgmt_pending_remove(cmd);
2110 goto failed;
2113 failed:
2114 hci_dev_unlock(hdev);
2115 return err;
2118 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2120 struct mgmt_mode *cp = data;
2121 bool changed;
2122 u8 status;
2123 int err;
2125 BT_DBG("request for %s", hdev->name);
2127 status = mgmt_bredr_support(hdev);
2128 if (status)
2129 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2131 if (!lmp_ssp_capable(hdev))
2132 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2133 MGMT_STATUS_NOT_SUPPORTED);
2135 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2136 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2137 MGMT_STATUS_REJECTED);
2139 if (cp->val != 0x00 && cp->val != 0x01)
2140 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2141 MGMT_STATUS_INVALID_PARAMS);
2143 hci_dev_lock(hdev);
2145 if (cp->val) {
2146 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2147 } else {
2148 if (hdev_is_powered(hdev)) {
2149 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2150 MGMT_STATUS_REJECTED);
2151 goto unlock;
2154 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2157 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2158 if (err < 0)
2159 goto unlock;
2161 if (changed)
2162 err = new_settings(hdev, sk);
2164 unlock:
2165 hci_dev_unlock(hdev);
2166 return err;
2169 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2171 struct cmd_lookup match = { NULL, hdev };
2173 if (status) {
2174 u8 mgmt_err = mgmt_status(status);
2176 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2177 &mgmt_err);
2178 return;
2181 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2183 new_settings(hdev, match.sk);
2185 if (match.sk)
2186 sock_put(match.sk);
2188 /* Make sure the controller has a good default for
2189 * advertising data. Restrict the update to when LE
2190 * has actually been enabled. During power on, the
2191 * update in powered_update_hci will take care of it.
2193 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2194 struct hci_request req;
2196 hci_dev_lock(hdev);
2198 hci_req_init(&req, hdev);
2199 update_adv_data(&req);
2200 update_scan_rsp_data(&req);
2201 hci_req_run(&req, NULL);
2203 hci_update_background_scan(hdev);
2205 hci_dev_unlock(hdev);
2209 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2211 struct mgmt_mode *cp = data;
2212 struct hci_cp_write_le_host_supported hci_cp;
2213 struct pending_cmd *cmd;
2214 struct hci_request req;
2215 int err;
2216 u8 val, enabled;
2218 BT_DBG("request for %s", hdev->name);
2220 if (!lmp_le_capable(hdev))
2221 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2222 MGMT_STATUS_NOT_SUPPORTED);
2224 if (cp->val != 0x00 && cp->val != 0x01)
2225 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2226 MGMT_STATUS_INVALID_PARAMS);
2228 /* LE-only devices do not allow toggling LE on/off */
2229 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2230 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2231 MGMT_STATUS_REJECTED);
2233 hci_dev_lock(hdev);
2235 val = !!cp->val;
2236 enabled = lmp_host_le_capable(hdev);
2238 if (!hdev_is_powered(hdev) || val == enabled) {
2239 bool changed = false;
2241 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2242 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2243 changed = true;
2246 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2247 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2248 changed = true;
2251 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2252 if (err < 0)
2253 goto unlock;
2255 if (changed)
2256 err = new_settings(hdev, sk);
2258 goto unlock;
2261 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2262 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2263 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2264 MGMT_STATUS_BUSY);
2265 goto unlock;
2268 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2269 if (!cmd) {
2270 err = -ENOMEM;
2271 goto unlock;
2274 hci_req_init(&req, hdev);
2276 memset(&hci_cp, 0, sizeof(hci_cp));
2278 if (val) {
2279 hci_cp.le = val;
2280 hci_cp.simul = 0x00;
2281 } else {
2282 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2283 disable_advertising(&req);
2286 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2287 &hci_cp);
2289 err = hci_req_run(&req, le_enable_complete);
2290 if (err < 0)
2291 mgmt_pending_remove(cmd);
2293 unlock:
2294 hci_dev_unlock(hdev);
2295 return err;
2298 /* This is a helper function to test for pending mgmt commands that can
2299 * cause CoD or EIR HCI commands. We can only allow one such pending
2300 * mgmt command at a time since otherwise we cannot easily track what
2301 * the current values are, will be, and based on that calculate if a new
2302 * HCI command needs to be sent and if yes with what value.
2304 static bool pending_eir_or_class(struct hci_dev *hdev)
2306 struct pending_cmd *cmd;
2308 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2309 switch (cmd->opcode) {
2310 case MGMT_OP_ADD_UUID:
2311 case MGMT_OP_REMOVE_UUID:
2312 case MGMT_OP_SET_DEV_CLASS:
2313 case MGMT_OP_SET_POWERED:
2314 return true;
2318 return false;
2321 static const u8 bluetooth_base_uuid[] = {
2322 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2323 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2326 static u8 get_uuid_size(const u8 *uuid)
2328 u32 val;
2330 if (memcmp(uuid, bluetooth_base_uuid, 12))
2331 return 128;
2333 val = get_unaligned_le32(&uuid[12]);
2334 if (val > 0xffff)
2335 return 32;
2337 return 16;
2340 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2342 struct pending_cmd *cmd;
2344 hci_dev_lock(hdev);
2346 cmd = mgmt_pending_find(mgmt_op, hdev);
2347 if (!cmd)
2348 goto unlock;
2350 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2351 hdev->dev_class, 3);
2353 mgmt_pending_remove(cmd);
2355 unlock:
2356 hci_dev_unlock(hdev);
2359 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2361 BT_DBG("status 0x%02x", status);
2363 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2366 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2368 struct mgmt_cp_add_uuid *cp = data;
2369 struct pending_cmd *cmd;
2370 struct hci_request req;
2371 struct bt_uuid *uuid;
2372 int err;
2374 BT_DBG("request for %s", hdev->name);
2376 hci_dev_lock(hdev);
2378 if (pending_eir_or_class(hdev)) {
2379 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2380 MGMT_STATUS_BUSY);
2381 goto failed;
2384 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2385 if (!uuid) {
2386 err = -ENOMEM;
2387 goto failed;
2390 memcpy(uuid->uuid, cp->uuid, 16);
2391 uuid->svc_hint = cp->svc_hint;
2392 uuid->size = get_uuid_size(cp->uuid);
2394 list_add_tail(&uuid->list, &hdev->uuids);
2396 hci_req_init(&req, hdev);
2398 update_class(&req);
2399 update_eir(&req);
2401 err = hci_req_run(&req, add_uuid_complete);
2402 if (err < 0) {
2403 if (err != -ENODATA)
2404 goto failed;
2406 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2407 hdev->dev_class, 3);
2408 goto failed;
2411 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2412 if (!cmd) {
2413 err = -ENOMEM;
2414 goto failed;
2417 err = 0;
2419 failed:
2420 hci_dev_unlock(hdev);
2421 return err;
2424 static bool enable_service_cache(struct hci_dev *hdev)
2426 if (!hdev_is_powered(hdev))
2427 return false;
2429 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2430 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2431 CACHE_TIMEOUT);
2432 return true;
2435 return false;
2438 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2440 BT_DBG("status 0x%02x", status);
2442 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2445 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2446 u16 len)
2448 struct mgmt_cp_remove_uuid *cp = data;
2449 struct pending_cmd *cmd;
2450 struct bt_uuid *match, *tmp;
2451 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2452 struct hci_request req;
2453 int err, found;
2455 BT_DBG("request for %s", hdev->name);
2457 hci_dev_lock(hdev);
2459 if (pending_eir_or_class(hdev)) {
2460 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2461 MGMT_STATUS_BUSY);
2462 goto unlock;
2465 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2466 hci_uuids_clear(hdev);
2468 if (enable_service_cache(hdev)) {
2469 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2470 0, hdev->dev_class, 3);
2471 goto unlock;
2474 goto update_class;
2477 found = 0;
2479 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2480 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2481 continue;
2483 list_del(&match->list);
2484 kfree(match);
2485 found++;
2488 if (found == 0) {
2489 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2490 MGMT_STATUS_INVALID_PARAMS);
2491 goto unlock;
2494 update_class:
2495 hci_req_init(&req, hdev);
2497 update_class(&req);
2498 update_eir(&req);
2500 err = hci_req_run(&req, remove_uuid_complete);
2501 if (err < 0) {
2502 if (err != -ENODATA)
2503 goto unlock;
2505 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2506 hdev->dev_class, 3);
2507 goto unlock;
2510 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2511 if (!cmd) {
2512 err = -ENOMEM;
2513 goto unlock;
2516 err = 0;
2518 unlock:
2519 hci_dev_unlock(hdev);
2520 return err;
2523 static void set_class_complete(struct hci_dev *hdev, u8 status)
2525 BT_DBG("status 0x%02x", status);
2527 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2530 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2531 u16 len)
2533 struct mgmt_cp_set_dev_class *cp = data;
2534 struct pending_cmd *cmd;
2535 struct hci_request req;
2536 int err;
2538 BT_DBG("request for %s", hdev->name);
2540 if (!lmp_bredr_capable(hdev))
2541 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2542 MGMT_STATUS_NOT_SUPPORTED);
2544 hci_dev_lock(hdev);
2546 if (pending_eir_or_class(hdev)) {
2547 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2548 MGMT_STATUS_BUSY);
2549 goto unlock;
2552 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2553 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2554 MGMT_STATUS_INVALID_PARAMS);
2555 goto unlock;
2558 hdev->major_class = cp->major;
2559 hdev->minor_class = cp->minor;
2561 if (!hdev_is_powered(hdev)) {
2562 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2563 hdev->dev_class, 3);
2564 goto unlock;
2567 hci_req_init(&req, hdev);
2569 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2570 hci_dev_unlock(hdev);
2571 cancel_delayed_work_sync(&hdev->service_cache);
2572 hci_dev_lock(hdev);
2573 update_eir(&req);
2576 update_class(&req);
2578 err = hci_req_run(&req, set_class_complete);
2579 if (err < 0) {
2580 if (err != -ENODATA)
2581 goto unlock;
2583 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2584 hdev->dev_class, 3);
2585 goto unlock;
2588 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2589 if (!cmd) {
2590 err = -ENOMEM;
2591 goto unlock;
2594 err = 0;
2596 unlock:
2597 hci_dev_unlock(hdev);
2598 return err;
2601 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2602 u16 len)
2604 struct mgmt_cp_load_link_keys *cp = data;
2605 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2606 sizeof(struct mgmt_link_key_info));
2607 u16 key_count, expected_len;
2608 bool changed;
2609 int i;
2611 BT_DBG("request for %s", hdev->name);
2613 if (!lmp_bredr_capable(hdev))
2614 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2615 MGMT_STATUS_NOT_SUPPORTED);
2617 key_count = __le16_to_cpu(cp->key_count);
2618 if (key_count > max_key_count) {
2619 BT_ERR("load_link_keys: too big key_count value %u",
2620 key_count);
2621 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2622 MGMT_STATUS_INVALID_PARAMS);
2625 expected_len = sizeof(*cp) + key_count *
2626 sizeof(struct mgmt_link_key_info);
2627 if (expected_len != len) {
2628 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2629 expected_len, len);
2630 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2631 MGMT_STATUS_INVALID_PARAMS);
2634 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2635 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2636 MGMT_STATUS_INVALID_PARAMS);
2638 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2639 key_count);
2641 for (i = 0; i < key_count; i++) {
2642 struct mgmt_link_key_info *key = &cp->keys[i];
2644 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2646 MGMT_STATUS_INVALID_PARAMS);
2649 hci_dev_lock(hdev);
2651 hci_link_keys_clear(hdev);
2653 if (cp->debug_keys)
2654 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2655 &hdev->dev_flags);
2656 else
2657 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2658 &hdev->dev_flags);
2660 if (changed)
2661 new_settings(hdev, NULL);
2663 for (i = 0; i < key_count; i++) {
2664 struct mgmt_link_key_info *key = &cp->keys[i];
2666 /* Always ignore debug keys and require a new pairing if
2667 * the user wants to use them.
2669 if (key->type == HCI_LK_DEBUG_COMBINATION)
2670 continue;
2672 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2673 key->type, key->pin_len, NULL);
2676 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2678 hci_dev_unlock(hdev);
2680 return 0;
2683 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2684 u8 addr_type, struct sock *skip_sk)
2686 struct mgmt_ev_device_unpaired ev;
2688 bacpy(&ev.addr.bdaddr, bdaddr);
2689 ev.addr.type = addr_type;
2691 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2692 skip_sk);
2695 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2696 u16 len)
2698 struct mgmt_cp_unpair_device *cp = data;
2699 struct mgmt_rp_unpair_device rp;
2700 struct hci_cp_disconnect dc;
2701 struct pending_cmd *cmd;
2702 struct hci_conn *conn;
2703 int err;
2705 memset(&rp, 0, sizeof(rp));
2706 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2707 rp.addr.type = cp->addr.type;
2709 if (!bdaddr_type_is_valid(cp->addr.type))
2710 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2711 MGMT_STATUS_INVALID_PARAMS,
2712 &rp, sizeof(rp));
2714 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2715 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2716 MGMT_STATUS_INVALID_PARAMS,
2717 &rp, sizeof(rp));
2719 hci_dev_lock(hdev);
2721 if (!hdev_is_powered(hdev)) {
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2723 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2724 goto unlock;
2727 if (cp->addr.type == BDADDR_BREDR) {
2728 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2729 } else {
2730 u8 addr_type;
2732 if (cp->addr.type == BDADDR_LE_PUBLIC)
2733 addr_type = ADDR_LE_DEV_PUBLIC;
2734 else
2735 addr_type = ADDR_LE_DEV_RANDOM;
2737 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2739 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2741 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2744 if (err < 0) {
2745 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2746 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2747 goto unlock;
2750 if (cp->disconnect) {
2751 if (cp->addr.type == BDADDR_BREDR)
2752 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2753 &cp->addr.bdaddr);
2754 else
2755 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2756 &cp->addr.bdaddr);
2757 } else {
2758 conn = NULL;
2761 if (!conn) {
2762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2763 &rp, sizeof(rp));
2764 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2765 goto unlock;
2768 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2769 sizeof(*cp));
2770 if (!cmd) {
2771 err = -ENOMEM;
2772 goto unlock;
2775 dc.handle = cpu_to_le16(conn->handle);
2776 dc.reason = 0x13; /* Remote User Terminated Connection */
2777 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2778 if (err < 0)
2779 mgmt_pending_remove(cmd);
2781 unlock:
2782 hci_dev_unlock(hdev);
2783 return err;
2786 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2787 u16 len)
2789 struct mgmt_cp_disconnect *cp = data;
2790 struct mgmt_rp_disconnect rp;
2791 struct pending_cmd *cmd;
2792 struct hci_conn *conn;
2793 int err;
2795 BT_DBG("");
2797 memset(&rp, 0, sizeof(rp));
2798 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2799 rp.addr.type = cp->addr.type;
2801 if (!bdaddr_type_is_valid(cp->addr.type))
2802 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2803 MGMT_STATUS_INVALID_PARAMS,
2804 &rp, sizeof(rp));
2806 hci_dev_lock(hdev);
2808 if (!test_bit(HCI_UP, &hdev->flags)) {
2809 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2810 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2811 goto failed;
2814 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2815 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2816 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2817 goto failed;
2820 if (cp->addr.type == BDADDR_BREDR)
2821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2822 &cp->addr.bdaddr);
2823 else
2824 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2826 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2827 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2828 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2829 goto failed;
2832 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2833 if (!cmd) {
2834 err = -ENOMEM;
2835 goto failed;
2838 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2839 if (err < 0)
2840 mgmt_pending_remove(cmd);
2842 failed:
2843 hci_dev_unlock(hdev);
2844 return err;
2847 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2849 switch (link_type) {
2850 case LE_LINK:
2851 switch (addr_type) {
2852 case ADDR_LE_DEV_PUBLIC:
2853 return BDADDR_LE_PUBLIC;
2855 default:
2856 /* Fallback to LE Random address type */
2857 return BDADDR_LE_RANDOM;
2860 default:
2861 /* Fallback to BR/EDR type */
2862 return BDADDR_BREDR;
2866 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2867 u16 data_len)
2869 struct mgmt_rp_get_connections *rp;
2870 struct hci_conn *c;
2871 size_t rp_len;
2872 int err;
2873 u16 i;
2875 BT_DBG("");
2877 hci_dev_lock(hdev);
2879 if (!hdev_is_powered(hdev)) {
2880 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2881 MGMT_STATUS_NOT_POWERED);
2882 goto unlock;
2885 i = 0;
2886 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2887 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2888 i++;
2891 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2892 rp = kmalloc(rp_len, GFP_KERNEL);
2893 if (!rp) {
2894 err = -ENOMEM;
2895 goto unlock;
2898 i = 0;
2899 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2900 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2901 continue;
2902 bacpy(&rp->addr[i].bdaddr, &c->dst);
2903 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2904 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2905 continue;
2906 i++;
2909 rp->conn_count = cpu_to_le16(i);
2911 /* Recalculate length in case of filtered SCO connections, etc */
2912 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2914 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2915 rp_len);
2917 kfree(rp);
2919 unlock:
2920 hci_dev_unlock(hdev);
2921 return err;
2924 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2925 struct mgmt_cp_pin_code_neg_reply *cp)
2927 struct pending_cmd *cmd;
2928 int err;
2930 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2931 sizeof(*cp));
2932 if (!cmd)
2933 return -ENOMEM;
2935 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2936 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2937 if (err < 0)
2938 mgmt_pending_remove(cmd);
2940 return err;
2943 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2944 u16 len)
2946 struct hci_conn *conn;
2947 struct mgmt_cp_pin_code_reply *cp = data;
2948 struct hci_cp_pin_code_reply reply;
2949 struct pending_cmd *cmd;
2950 int err;
2952 BT_DBG("");
2954 hci_dev_lock(hdev);
2956 if (!hdev_is_powered(hdev)) {
2957 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2958 MGMT_STATUS_NOT_POWERED);
2959 goto failed;
2962 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2963 if (!conn) {
2964 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2965 MGMT_STATUS_NOT_CONNECTED);
2966 goto failed;
2969 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2970 struct mgmt_cp_pin_code_neg_reply ncp;
2972 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2974 BT_ERR("PIN code is not 16 bytes long");
2976 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2977 if (err >= 0)
2978 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2979 MGMT_STATUS_INVALID_PARAMS);
2981 goto failed;
2984 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2985 if (!cmd) {
2986 err = -ENOMEM;
2987 goto failed;
2990 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2991 reply.pin_len = cp->pin_len;
2992 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2994 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2995 if (err < 0)
2996 mgmt_pending_remove(cmd);
2998 failed:
2999 hci_dev_unlock(hdev);
3000 return err;
3003 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3004 u16 len)
3006 struct mgmt_cp_set_io_capability *cp = data;
3008 BT_DBG("");
3010 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3011 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3012 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3014 hci_dev_lock(hdev);
3016 hdev->io_capability = cp->io_capability;
3018 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3019 hdev->io_capability);
3021 hci_dev_unlock(hdev);
3023 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3027 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3029 struct hci_dev *hdev = conn->hdev;
3030 struct pending_cmd *cmd;
3032 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3033 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3034 continue;
3036 if (cmd->user_data != conn)
3037 continue;
3039 return cmd;
3042 return NULL;
3045 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3047 struct mgmt_rp_pair_device rp;
3048 struct hci_conn *conn = cmd->user_data;
3050 bacpy(&rp.addr.bdaddr, &conn->dst);
3051 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3053 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3054 &rp, sizeof(rp));
3056 /* So we don't get further callbacks for this connection */
3057 conn->connect_cfm_cb = NULL;
3058 conn->security_cfm_cb = NULL;
3059 conn->disconn_cfm_cb = NULL;
3061 hci_conn_drop(conn);
3062 hci_conn_put(conn);
3064 mgmt_pending_remove(cmd);
3067 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3069 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3070 struct pending_cmd *cmd;
3072 cmd = find_pairing(conn);
3073 if (cmd)
3074 pairing_complete(cmd, status);
3077 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3079 struct pending_cmd *cmd;
3081 BT_DBG("status %u", status);
3083 cmd = find_pairing(conn);
3084 if (!cmd)
3085 BT_DBG("Unable to find a pending command");
3086 else
3087 pairing_complete(cmd, mgmt_status(status));
3090 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3092 struct pending_cmd *cmd;
3094 BT_DBG("status %u", status);
3096 if (!status)
3097 return;
3099 cmd = find_pairing(conn);
3100 if (!cmd)
3101 BT_DBG("Unable to find a pending command");
3102 else
3103 pairing_complete(cmd, mgmt_status(status));
3106 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3107 u16 len)
3109 struct mgmt_cp_pair_device *cp = data;
3110 struct mgmt_rp_pair_device rp;
3111 struct pending_cmd *cmd;
3112 u8 sec_level, auth_type;
3113 struct hci_conn *conn;
3114 int err;
3116 BT_DBG("");
3118 memset(&rp, 0, sizeof(rp));
3119 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3120 rp.addr.type = cp->addr.type;
3122 if (!bdaddr_type_is_valid(cp->addr.type))
3123 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3124 MGMT_STATUS_INVALID_PARAMS,
3125 &rp, sizeof(rp));
3127 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3128 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3129 MGMT_STATUS_INVALID_PARAMS,
3130 &rp, sizeof(rp));
3132 hci_dev_lock(hdev);
3134 if (!hdev_is_powered(hdev)) {
3135 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3136 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3137 goto unlock;
3140 sec_level = BT_SECURITY_MEDIUM;
3141 auth_type = HCI_AT_DEDICATED_BONDING;
3143 if (cp->addr.type == BDADDR_BREDR) {
3144 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3145 auth_type);
3146 } else {
3147 u8 addr_type;
3149 /* Convert from L2CAP channel address type to HCI address type
3151 if (cp->addr.type == BDADDR_LE_PUBLIC)
3152 addr_type = ADDR_LE_DEV_PUBLIC;
3153 else
3154 addr_type = ADDR_LE_DEV_RANDOM;
3156 /* When pairing a new device, it is expected to remember
3157 * this device for future connections. Adding the connection
3158 * parameter information ahead of time allows tracking
3159 * of the slave preferred values and will speed up any
3160 * further connection establishment.
3162 * If connection parameters already exist, then they
3163 * will be kept and this function does nothing.
3165 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3167 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3168 sec_level, HCI_LE_CONN_TIMEOUT,
3169 HCI_ROLE_MASTER);
3172 if (IS_ERR(conn)) {
3173 int status;
3175 if (PTR_ERR(conn) == -EBUSY)
3176 status = MGMT_STATUS_BUSY;
3177 else
3178 status = MGMT_STATUS_CONNECT_FAILED;
3180 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3181 status, &rp,
3182 sizeof(rp));
3183 goto unlock;
3186 if (conn->connect_cfm_cb) {
3187 hci_conn_drop(conn);
3188 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3189 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3190 goto unlock;
3193 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3194 if (!cmd) {
3195 err = -ENOMEM;
3196 hci_conn_drop(conn);
3197 goto unlock;
3200 /* For LE, just connecting isn't a proof that the pairing finished */
3201 if (cp->addr.type == BDADDR_BREDR) {
3202 conn->connect_cfm_cb = pairing_complete_cb;
3203 conn->security_cfm_cb = pairing_complete_cb;
3204 conn->disconn_cfm_cb = pairing_complete_cb;
3205 } else {
3206 conn->connect_cfm_cb = le_pairing_complete_cb;
3207 conn->security_cfm_cb = le_pairing_complete_cb;
3208 conn->disconn_cfm_cb = le_pairing_complete_cb;
3211 conn->io_capability = cp->io_cap;
3212 cmd->user_data = hci_conn_get(conn);
3214 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3215 hci_conn_security(conn, sec_level, auth_type, true))
3216 pairing_complete(cmd, 0);
3218 err = 0;
3220 unlock:
3221 hci_dev_unlock(hdev);
3222 return err;
3225 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3226 u16 len)
3228 struct mgmt_addr_info *addr = data;
3229 struct pending_cmd *cmd;
3230 struct hci_conn *conn;
3231 int err;
3233 BT_DBG("");
3235 hci_dev_lock(hdev);
3237 if (!hdev_is_powered(hdev)) {
3238 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3239 MGMT_STATUS_NOT_POWERED);
3240 goto unlock;
3243 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3244 if (!cmd) {
3245 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3246 MGMT_STATUS_INVALID_PARAMS);
3247 goto unlock;
3250 conn = cmd->user_data;
3252 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3253 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3254 MGMT_STATUS_INVALID_PARAMS);
3255 goto unlock;
3258 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3261 addr, sizeof(*addr));
3262 unlock:
3263 hci_dev_unlock(hdev);
3264 return err;
3267 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3268 struct mgmt_addr_info *addr, u16 mgmt_op,
3269 u16 hci_op, __le32 passkey)
3271 struct pending_cmd *cmd;
3272 struct hci_conn *conn;
3273 int err;
3275 hci_dev_lock(hdev);
3277 if (!hdev_is_powered(hdev)) {
3278 err = cmd_complete(sk, hdev->id, mgmt_op,
3279 MGMT_STATUS_NOT_POWERED, addr,
3280 sizeof(*addr));
3281 goto done;
3284 if (addr->type == BDADDR_BREDR)
3285 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3286 else
3287 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3289 if (!conn) {
3290 err = cmd_complete(sk, hdev->id, mgmt_op,
3291 MGMT_STATUS_NOT_CONNECTED, addr,
3292 sizeof(*addr));
3293 goto done;
3296 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3297 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3298 if (!err)
3299 err = cmd_complete(sk, hdev->id, mgmt_op,
3300 MGMT_STATUS_SUCCESS, addr,
3301 sizeof(*addr));
3302 else
3303 err = cmd_complete(sk, hdev->id, mgmt_op,
3304 MGMT_STATUS_FAILED, addr,
3305 sizeof(*addr));
3307 goto done;
3310 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3311 if (!cmd) {
3312 err = -ENOMEM;
3313 goto done;
3316 /* Continue with pairing via HCI */
3317 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3318 struct hci_cp_user_passkey_reply cp;
3320 bacpy(&cp.bdaddr, &addr->bdaddr);
3321 cp.passkey = passkey;
3322 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3323 } else
3324 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3325 &addr->bdaddr);
3327 if (err < 0)
3328 mgmt_pending_remove(cmd);
3330 done:
3331 hci_dev_unlock(hdev);
3332 return err;
3335 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3336 void *data, u16 len)
3338 struct mgmt_cp_pin_code_neg_reply *cp = data;
3340 BT_DBG("");
3342 return user_pairing_resp(sk, hdev, &cp->addr,
3343 MGMT_OP_PIN_CODE_NEG_REPLY,
3344 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3347 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3348 u16 len)
3350 struct mgmt_cp_user_confirm_reply *cp = data;
3352 BT_DBG("");
3354 if (len != sizeof(*cp))
3355 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3356 MGMT_STATUS_INVALID_PARAMS);
3358 return user_pairing_resp(sk, hdev, &cp->addr,
3359 MGMT_OP_USER_CONFIRM_REPLY,
3360 HCI_OP_USER_CONFIRM_REPLY, 0);
3363 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3364 void *data, u16 len)
3366 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3368 BT_DBG("");
3370 return user_pairing_resp(sk, hdev, &cp->addr,
3371 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3372 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3375 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3376 u16 len)
3378 struct mgmt_cp_user_passkey_reply *cp = data;
3380 BT_DBG("");
3382 return user_pairing_resp(sk, hdev, &cp->addr,
3383 MGMT_OP_USER_PASSKEY_REPLY,
3384 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3387 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3388 void *data, u16 len)
3390 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3392 BT_DBG("");
3394 return user_pairing_resp(sk, hdev, &cp->addr,
3395 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3396 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3399 static void update_name(struct hci_request *req)
3401 struct hci_dev *hdev = req->hdev;
3402 struct hci_cp_write_local_name cp;
3404 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3406 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3409 static void set_name_complete(struct hci_dev *hdev, u8 status)
3411 struct mgmt_cp_set_local_name *cp;
3412 struct pending_cmd *cmd;
3414 BT_DBG("status 0x%02x", status);
3416 hci_dev_lock(hdev);
3418 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3419 if (!cmd)
3420 goto unlock;
3422 cp = cmd->param;
3424 if (status)
3425 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3426 mgmt_status(status));
3427 else
3428 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3429 cp, sizeof(*cp));
3431 mgmt_pending_remove(cmd);
3433 unlock:
3434 hci_dev_unlock(hdev);
3437 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3438 u16 len)
3440 struct mgmt_cp_set_local_name *cp = data;
3441 struct pending_cmd *cmd;
3442 struct hci_request req;
3443 int err;
3445 BT_DBG("");
3447 hci_dev_lock(hdev);
3449 /* If the old values are the same as the new ones just return a
3450 * direct command complete event.
3452 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3453 !memcmp(hdev->short_name, cp->short_name,
3454 sizeof(hdev->short_name))) {
3455 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3456 data, len);
3457 goto failed;
3460 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3462 if (!hdev_is_powered(hdev)) {
3463 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3465 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3466 data, len);
3467 if (err < 0)
3468 goto failed;
3470 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3471 sk);
3473 goto failed;
3476 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3477 if (!cmd) {
3478 err = -ENOMEM;
3479 goto failed;
3482 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3484 hci_req_init(&req, hdev);
3486 if (lmp_bredr_capable(hdev)) {
3487 update_name(&req);
3488 update_eir(&req);
3491 /* The name is stored in the scan response data and so
3492 * no need to udpate the advertising data here.
3494 if (lmp_le_capable(hdev))
3495 update_scan_rsp_data(&req);
3497 err = hci_req_run(&req, set_name_complete);
3498 if (err < 0)
3499 mgmt_pending_remove(cmd);
3501 failed:
3502 hci_dev_unlock(hdev);
3503 return err;
3506 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3507 void *data, u16 data_len)
3509 struct pending_cmd *cmd;
3510 int err;
3512 BT_DBG("%s", hdev->name);
3514 hci_dev_lock(hdev);
3516 if (!hdev_is_powered(hdev)) {
3517 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3518 MGMT_STATUS_NOT_POWERED);
3519 goto unlock;
3522 if (!lmp_ssp_capable(hdev)) {
3523 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3524 MGMT_STATUS_NOT_SUPPORTED);
3525 goto unlock;
3528 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3529 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3530 MGMT_STATUS_BUSY);
3531 goto unlock;
3534 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3535 if (!cmd) {
3536 err = -ENOMEM;
3537 goto unlock;
3540 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3541 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3542 0, NULL);
3543 else
3544 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3546 if (err < 0)
3547 mgmt_pending_remove(cmd);
3549 unlock:
3550 hci_dev_unlock(hdev);
3551 return err;
3554 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3555 void *data, u16 len)
3557 int err;
3559 BT_DBG("%s ", hdev->name);
3561 hci_dev_lock(hdev);
3563 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3564 struct mgmt_cp_add_remote_oob_data *cp = data;
3565 u8 status;
3567 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3568 cp->hash, cp->randomizer);
3569 if (err < 0)
3570 status = MGMT_STATUS_FAILED;
3571 else
3572 status = MGMT_STATUS_SUCCESS;
3574 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3575 status, &cp->addr, sizeof(cp->addr));
3576 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3577 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3578 u8 status;
3580 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3581 cp->hash192,
3582 cp->randomizer192,
3583 cp->hash256,
3584 cp->randomizer256);
3585 if (err < 0)
3586 status = MGMT_STATUS_FAILED;
3587 else
3588 status = MGMT_STATUS_SUCCESS;
3590 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3591 status, &cp->addr, sizeof(cp->addr));
3592 } else {
3593 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3594 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3595 MGMT_STATUS_INVALID_PARAMS);
3598 hci_dev_unlock(hdev);
3599 return err;
3602 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3603 void *data, u16 len)
3605 struct mgmt_cp_remove_remote_oob_data *cp = data;
3606 u8 status;
3607 int err;
3609 BT_DBG("%s", hdev->name);
3611 hci_dev_lock(hdev);
3613 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3614 if (err < 0)
3615 status = MGMT_STATUS_INVALID_PARAMS;
3616 else
3617 status = MGMT_STATUS_SUCCESS;
3619 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3620 status, &cp->addr, sizeof(cp->addr));
3622 hci_dev_unlock(hdev);
3623 return err;
3626 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3628 struct pending_cmd *cmd;
3629 u8 type;
3630 int err;
3632 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3634 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3635 if (!cmd)
3636 return -ENOENT;
3638 type = hdev->discovery.type;
3640 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3641 &type, sizeof(type));
3642 mgmt_pending_remove(cmd);
3644 return err;
3647 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3649 unsigned long timeout = 0;
3651 BT_DBG("status %d", status);
3653 if (status) {
3654 hci_dev_lock(hdev);
3655 mgmt_start_discovery_failed(hdev, status);
3656 hci_dev_unlock(hdev);
3657 return;
3660 hci_dev_lock(hdev);
3661 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3662 hci_dev_unlock(hdev);
3664 switch (hdev->discovery.type) {
3665 case DISCOV_TYPE_LE:
3666 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3667 break;
3669 case DISCOV_TYPE_INTERLEAVED:
3670 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3671 break;
3673 case DISCOV_TYPE_BREDR:
3674 break;
3676 default:
3677 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3680 if (!timeout)
3681 return;
3683 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3686 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3687 void *data, u16 len)
3689 struct mgmt_cp_start_discovery *cp = data;
3690 struct pending_cmd *cmd;
3691 struct hci_cp_le_set_scan_param param_cp;
3692 struct hci_cp_le_set_scan_enable enable_cp;
3693 struct hci_cp_inquiry inq_cp;
3694 struct hci_request req;
3695 /* General inquiry access code (GIAC) */
3696 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3697 u8 status, own_addr_type;
3698 int err;
3700 BT_DBG("%s", hdev->name);
3702 hci_dev_lock(hdev);
3704 if (!hdev_is_powered(hdev)) {
3705 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3706 MGMT_STATUS_NOT_POWERED);
3707 goto failed;
3710 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3711 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3712 MGMT_STATUS_BUSY);
3713 goto failed;
3716 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3717 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3718 MGMT_STATUS_BUSY);
3719 goto failed;
3722 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3723 if (!cmd) {
3724 err = -ENOMEM;
3725 goto failed;
3728 hdev->discovery.type = cp->type;
3730 hci_req_init(&req, hdev);
3732 switch (hdev->discovery.type) {
3733 case DISCOV_TYPE_BREDR:
3734 status = mgmt_bredr_support(hdev);
3735 if (status) {
3736 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3737 status);
3738 mgmt_pending_remove(cmd);
3739 goto failed;
3742 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3743 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3744 MGMT_STATUS_BUSY);
3745 mgmt_pending_remove(cmd);
3746 goto failed;
3749 hci_inquiry_cache_flush(hdev);
3751 memset(&inq_cp, 0, sizeof(inq_cp));
3752 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3753 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3754 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3755 break;
3757 case DISCOV_TYPE_LE:
3758 case DISCOV_TYPE_INTERLEAVED:
3759 status = mgmt_le_support(hdev);
3760 if (status) {
3761 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3762 status);
3763 mgmt_pending_remove(cmd);
3764 goto failed;
3767 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3768 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3769 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3770 MGMT_STATUS_NOT_SUPPORTED);
3771 mgmt_pending_remove(cmd);
3772 goto failed;
3775 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3776 /* Don't let discovery abort an outgoing
3777 * connection attempt that's using directed
3778 * advertising.
3780 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3781 BT_CONNECT)) {
3782 err = cmd_status(sk, hdev->id,
3783 MGMT_OP_START_DISCOVERY,
3784 MGMT_STATUS_REJECTED);
3785 mgmt_pending_remove(cmd);
3786 goto failed;
3789 disable_advertising(&req);
3792 /* If controller is scanning, it means the background scanning
3793 * is running. Thus, we should temporarily stop it in order to
3794 * set the discovery scanning parameters.
3796 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3797 hci_req_add_le_scan_disable(&req);
3799 memset(&param_cp, 0, sizeof(param_cp));
3801 /* All active scans will be done with either a resolvable
3802 * private address (when privacy feature has been enabled)
3803 * or unresolvable private address.
3805 err = hci_update_random_address(&req, true, &own_addr_type);
3806 if (err < 0) {
3807 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3808 MGMT_STATUS_FAILED);
3809 mgmt_pending_remove(cmd);
3810 goto failed;
3813 param_cp.type = LE_SCAN_ACTIVE;
3814 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3815 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3816 param_cp.own_address_type = own_addr_type;
3817 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3818 &param_cp);
3820 memset(&enable_cp, 0, sizeof(enable_cp));
3821 enable_cp.enable = LE_SCAN_ENABLE;
3822 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3823 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3824 &enable_cp);
3825 break;
3827 default:
3828 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3829 MGMT_STATUS_INVALID_PARAMS);
3830 mgmt_pending_remove(cmd);
3831 goto failed;
3834 err = hci_req_run(&req, start_discovery_complete);
3835 if (err < 0)
3836 mgmt_pending_remove(cmd);
3837 else
3838 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3840 failed:
3841 hci_dev_unlock(hdev);
3842 return err;
3845 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3847 struct pending_cmd *cmd;
3848 int err;
3850 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3851 if (!cmd)
3852 return -ENOENT;
3854 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3855 &hdev->discovery.type, sizeof(hdev->discovery.type));
3856 mgmt_pending_remove(cmd);
3858 return err;
3861 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3863 BT_DBG("status %d", status);
3865 hci_dev_lock(hdev);
3867 if (status) {
3868 mgmt_stop_discovery_failed(hdev, status);
3869 goto unlock;
3872 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3874 unlock:
3875 hci_dev_unlock(hdev);
3878 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3879 u16 len)
3881 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3882 struct pending_cmd *cmd;
3883 struct hci_request req;
3884 int err;
3886 BT_DBG("%s", hdev->name);
3888 hci_dev_lock(hdev);
3890 if (!hci_discovery_active(hdev)) {
3891 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3892 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3893 sizeof(mgmt_cp->type));
3894 goto unlock;
3897 if (hdev->discovery.type != mgmt_cp->type) {
3898 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3899 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3900 sizeof(mgmt_cp->type));
3901 goto unlock;
3904 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3905 if (!cmd) {
3906 err = -ENOMEM;
3907 goto unlock;
3910 hci_req_init(&req, hdev);
3912 hci_stop_discovery(&req);
3914 err = hci_req_run(&req, stop_discovery_complete);
3915 if (!err) {
3916 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3917 goto unlock;
3920 mgmt_pending_remove(cmd);
3922 /* If no HCI commands were sent we're done */
3923 if (err == -ENODATA) {
3924 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3925 &mgmt_cp->type, sizeof(mgmt_cp->type));
3926 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3929 unlock:
3930 hci_dev_unlock(hdev);
3931 return err;
3934 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3935 u16 len)
3937 struct mgmt_cp_confirm_name *cp = data;
3938 struct inquiry_entry *e;
3939 int err;
3941 BT_DBG("%s", hdev->name);
3943 hci_dev_lock(hdev);
3945 if (!hci_discovery_active(hdev)) {
3946 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3947 MGMT_STATUS_FAILED, &cp->addr,
3948 sizeof(cp->addr));
3949 goto failed;
3952 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3953 if (!e) {
3954 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3955 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3956 sizeof(cp->addr));
3957 goto failed;
3960 if (cp->name_known) {
3961 e->name_state = NAME_KNOWN;
3962 list_del(&e->list);
3963 } else {
3964 e->name_state = NAME_NEEDED;
3965 hci_inquiry_cache_update_resolve(hdev, e);
3968 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3969 sizeof(cp->addr));
3971 failed:
3972 hci_dev_unlock(hdev);
3973 return err;
3976 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3977 u16 len)
3979 struct mgmt_cp_block_device *cp = data;
3980 u8 status;
3981 int err;
3983 BT_DBG("%s", hdev->name);
3985 if (!bdaddr_type_is_valid(cp->addr.type))
3986 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3987 MGMT_STATUS_INVALID_PARAMS,
3988 &cp->addr, sizeof(cp->addr));
3990 hci_dev_lock(hdev);
3992 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3993 cp->addr.type);
3994 if (err < 0) {
3995 status = MGMT_STATUS_FAILED;
3996 goto done;
3999 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4000 sk);
4001 status = MGMT_STATUS_SUCCESS;
4003 done:
4004 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4005 &cp->addr, sizeof(cp->addr));
4007 hci_dev_unlock(hdev);
4009 return err;
4012 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4013 u16 len)
4015 struct mgmt_cp_unblock_device *cp = data;
4016 u8 status;
4017 int err;
4019 BT_DBG("%s", hdev->name);
4021 if (!bdaddr_type_is_valid(cp->addr.type))
4022 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4023 MGMT_STATUS_INVALID_PARAMS,
4024 &cp->addr, sizeof(cp->addr));
4026 hci_dev_lock(hdev);
4028 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4029 cp->addr.type);
4030 if (err < 0) {
4031 status = MGMT_STATUS_INVALID_PARAMS;
4032 goto done;
4035 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4036 sk);
4037 status = MGMT_STATUS_SUCCESS;
4039 done:
4040 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4041 &cp->addr, sizeof(cp->addr));
4043 hci_dev_unlock(hdev);
4045 return err;
4048 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4049 u16 len)
4051 struct mgmt_cp_set_device_id *cp = data;
4052 struct hci_request req;
4053 int err;
4054 __u16 source;
4056 BT_DBG("%s", hdev->name);
4058 source = __le16_to_cpu(cp->source);
4060 if (source > 0x0002)
4061 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4062 MGMT_STATUS_INVALID_PARAMS);
4064 hci_dev_lock(hdev);
4066 hdev->devid_source = source;
4067 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4068 hdev->devid_product = __le16_to_cpu(cp->product);
4069 hdev->devid_version = __le16_to_cpu(cp->version);
4071 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4073 hci_req_init(&req, hdev);
4074 update_eir(&req);
4075 hci_req_run(&req, NULL);
4077 hci_dev_unlock(hdev);
4079 return err;
4082 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4084 struct cmd_lookup match = { NULL, hdev };
4086 if (status) {
4087 u8 mgmt_err = mgmt_status(status);
4089 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4090 cmd_status_rsp, &mgmt_err);
4091 return;
4094 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4095 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4096 else
4097 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4099 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4100 &match);
4102 new_settings(hdev, match.sk);
4104 if (match.sk)
4105 sock_put(match.sk);
4108 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4109 u16 len)
4111 struct mgmt_mode *cp = data;
4112 struct pending_cmd *cmd;
4113 struct hci_request req;
4114 u8 val, enabled, status;
4115 int err;
4117 BT_DBG("request for %s", hdev->name);
4119 status = mgmt_le_support(hdev);
4120 if (status)
4121 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4122 status);
4124 if (cp->val != 0x00 && cp->val != 0x01)
4125 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4126 MGMT_STATUS_INVALID_PARAMS);
4128 hci_dev_lock(hdev);
4130 val = !!cp->val;
4131 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4133 /* The following conditions are ones which mean that we should
4134 * not do any HCI communication but directly send a mgmt
4135 * response to user space (after toggling the flag if
4136 * necessary).
4138 if (!hdev_is_powered(hdev) || val == enabled ||
4139 hci_conn_num(hdev, LE_LINK) > 0 ||
4140 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4141 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4142 bool changed = false;
4144 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4145 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4146 changed = true;
4149 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4150 if (err < 0)
4151 goto unlock;
4153 if (changed)
4154 err = new_settings(hdev, sk);
4156 goto unlock;
4159 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4160 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4161 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4162 MGMT_STATUS_BUSY);
4163 goto unlock;
4166 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4167 if (!cmd) {
4168 err = -ENOMEM;
4169 goto unlock;
4172 hci_req_init(&req, hdev);
4174 if (val)
4175 enable_advertising(&req);
4176 else
4177 disable_advertising(&req);
4179 err = hci_req_run(&req, set_advertising_complete);
4180 if (err < 0)
4181 mgmt_pending_remove(cmd);
4183 unlock:
4184 hci_dev_unlock(hdev);
4185 return err;
4188 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4189 void *data, u16 len)
4191 struct mgmt_cp_set_static_address *cp = data;
4192 int err;
4194 BT_DBG("%s", hdev->name);
4196 if (!lmp_le_capable(hdev))
4197 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4198 MGMT_STATUS_NOT_SUPPORTED);
4200 if (hdev_is_powered(hdev))
4201 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4202 MGMT_STATUS_REJECTED);
4204 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4205 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4206 return cmd_status(sk, hdev->id,
4207 MGMT_OP_SET_STATIC_ADDRESS,
4208 MGMT_STATUS_INVALID_PARAMS);
4210 /* Two most significant bits shall be set */
4211 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4212 return cmd_status(sk, hdev->id,
4213 MGMT_OP_SET_STATIC_ADDRESS,
4214 MGMT_STATUS_INVALID_PARAMS);
4217 hci_dev_lock(hdev);
4219 bacpy(&hdev->static_addr, &cp->bdaddr);
4221 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4223 hci_dev_unlock(hdev);
4225 return err;
4228 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4229 void *data, u16 len)
4231 struct mgmt_cp_set_scan_params *cp = data;
4232 __u16 interval, window;
4233 int err;
4235 BT_DBG("%s", hdev->name);
4237 if (!lmp_le_capable(hdev))
4238 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4239 MGMT_STATUS_NOT_SUPPORTED);
4241 interval = __le16_to_cpu(cp->interval);
4243 if (interval < 0x0004 || interval > 0x4000)
4244 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4245 MGMT_STATUS_INVALID_PARAMS);
4247 window = __le16_to_cpu(cp->window);
4249 if (window < 0x0004 || window > 0x4000)
4250 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4251 MGMT_STATUS_INVALID_PARAMS);
4253 if (window > interval)
4254 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4255 MGMT_STATUS_INVALID_PARAMS);
4257 hci_dev_lock(hdev);
4259 hdev->le_scan_interval = interval;
4260 hdev->le_scan_window = window;
4262 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4264 /* If background scan is running, restart it so new parameters are
4265 * loaded.
4267 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4268 hdev->discovery.state == DISCOVERY_STOPPED) {
4269 struct hci_request req;
4271 hci_req_init(&req, hdev);
4273 hci_req_add_le_scan_disable(&req);
4274 hci_req_add_le_passive_scan(&req);
4276 hci_req_run(&req, NULL);
4279 hci_dev_unlock(hdev);
4281 return err;
4284 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4286 struct pending_cmd *cmd;
4288 BT_DBG("status 0x%02x", status);
4290 hci_dev_lock(hdev);
4292 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4293 if (!cmd)
4294 goto unlock;
4296 if (status) {
4297 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4298 mgmt_status(status));
4299 } else {
4300 struct mgmt_mode *cp = cmd->param;
4302 if (cp->val)
4303 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4304 else
4305 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4307 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4308 new_settings(hdev, cmd->sk);
4311 mgmt_pending_remove(cmd);
4313 unlock:
4314 hci_dev_unlock(hdev);
4317 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4318 void *data, u16 len)
4320 struct mgmt_mode *cp = data;
4321 struct pending_cmd *cmd;
4322 struct hci_request req;
4323 int err;
4325 BT_DBG("%s", hdev->name);
4327 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4328 hdev->hci_ver < BLUETOOTH_VER_1_2)
4329 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4330 MGMT_STATUS_NOT_SUPPORTED);
4332 if (cp->val != 0x00 && cp->val != 0x01)
4333 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4334 MGMT_STATUS_INVALID_PARAMS);
4336 if (!hdev_is_powered(hdev))
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4338 MGMT_STATUS_NOT_POWERED);
4340 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4341 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4342 MGMT_STATUS_REJECTED);
4344 hci_dev_lock(hdev);
4346 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4347 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4348 MGMT_STATUS_BUSY);
4349 goto unlock;
4352 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4353 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4354 hdev);
4355 goto unlock;
4358 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4359 data, len);
4360 if (!cmd) {
4361 err = -ENOMEM;
4362 goto unlock;
4365 hci_req_init(&req, hdev);
4367 write_fast_connectable(&req, cp->val);
4369 err = hci_req_run(&req, fast_connectable_complete);
4370 if (err < 0) {
4371 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4372 MGMT_STATUS_FAILED);
4373 mgmt_pending_remove(cmd);
4376 unlock:
4377 hci_dev_unlock(hdev);
4379 return err;
4382 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4384 struct pending_cmd *cmd;
4386 BT_DBG("status 0x%02x", status);
4388 hci_dev_lock(hdev);
4390 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4391 if (!cmd)
4392 goto unlock;
4394 if (status) {
4395 u8 mgmt_err = mgmt_status(status);
4397 /* We need to restore the flag if related HCI commands
4398 * failed.
4400 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4402 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4403 } else {
4404 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4405 new_settings(hdev, cmd->sk);
4408 mgmt_pending_remove(cmd);
4410 unlock:
4411 hci_dev_unlock(hdev);
4414 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4416 struct mgmt_mode *cp = data;
4417 struct pending_cmd *cmd;
4418 struct hci_request req;
4419 int err;
4421 BT_DBG("request for %s", hdev->name);
4423 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4424 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4425 MGMT_STATUS_NOT_SUPPORTED);
4427 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4428 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4429 MGMT_STATUS_REJECTED);
4431 if (cp->val != 0x00 && cp->val != 0x01)
4432 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4433 MGMT_STATUS_INVALID_PARAMS);
4435 hci_dev_lock(hdev);
4437 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4438 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4439 goto unlock;
4442 if (!hdev_is_powered(hdev)) {
4443 if (!cp->val) {
4444 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4445 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4446 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4447 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4448 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4451 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4453 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4454 if (err < 0)
4455 goto unlock;
4457 err = new_settings(hdev, sk);
4458 goto unlock;
4461 /* Reject disabling when powered on */
4462 if (!cp->val) {
4463 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4464 MGMT_STATUS_REJECTED);
4465 goto unlock;
4468 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4469 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4470 MGMT_STATUS_BUSY);
4471 goto unlock;
4474 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4475 if (!cmd) {
4476 err = -ENOMEM;
4477 goto unlock;
4480 /* We need to flip the bit already here so that update_adv_data
4481 * generates the correct flags.
4483 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4485 hci_req_init(&req, hdev);
4487 write_fast_connectable(&req, false);
4488 hci_update_page_scan(hdev, &req);
4490 /* Since only the advertising data flags will change, there
4491 * is no need to update the scan response data.
4493 update_adv_data(&req);
4495 err = hci_req_run(&req, set_bredr_complete);
4496 if (err < 0)
4497 mgmt_pending_remove(cmd);
4499 unlock:
4500 hci_dev_unlock(hdev);
4501 return err;
4504 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4505 void *data, u16 len)
4507 struct mgmt_mode *cp = data;
4508 struct pending_cmd *cmd;
4509 u8 val, status;
4510 int err;
4512 BT_DBG("request for %s", hdev->name);
4514 status = mgmt_bredr_support(hdev);
4515 if (status)
4516 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4517 status);
4519 if (!lmp_sc_capable(hdev) &&
4520 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4521 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4522 MGMT_STATUS_NOT_SUPPORTED);
4524 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4525 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4526 MGMT_STATUS_INVALID_PARAMS);
4528 hci_dev_lock(hdev);
4530 if (!hdev_is_powered(hdev)) {
4531 bool changed;
4533 if (cp->val) {
4534 changed = !test_and_set_bit(HCI_SC_ENABLED,
4535 &hdev->dev_flags);
4536 if (cp->val == 0x02)
4537 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4538 else
4539 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4540 } else {
4541 changed = test_and_clear_bit(HCI_SC_ENABLED,
4542 &hdev->dev_flags);
4543 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4546 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4547 if (err < 0)
4548 goto failed;
4550 if (changed)
4551 err = new_settings(hdev, sk);
4553 goto failed;
4556 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4557 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4558 MGMT_STATUS_BUSY);
4559 goto failed;
4562 val = !!cp->val;
4564 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4565 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4566 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4567 goto failed;
4570 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4571 if (!cmd) {
4572 err = -ENOMEM;
4573 goto failed;
4576 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4577 if (err < 0) {
4578 mgmt_pending_remove(cmd);
4579 goto failed;
4582 if (cp->val == 0x02)
4583 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4584 else
4585 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4587 failed:
4588 hci_dev_unlock(hdev);
4589 return err;
4592 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4593 void *data, u16 len)
4595 struct mgmt_mode *cp = data;
4596 bool changed, use_changed;
4597 int err;
4599 BT_DBG("request for %s", hdev->name);
4601 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4602 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4603 MGMT_STATUS_INVALID_PARAMS);
4605 hci_dev_lock(hdev);
4607 if (cp->val)
4608 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4609 &hdev->dev_flags);
4610 else
4611 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4612 &hdev->dev_flags);
4614 if (cp->val == 0x02)
4615 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4616 &hdev->dev_flags);
4617 else
4618 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4619 &hdev->dev_flags);
4621 if (hdev_is_powered(hdev) && use_changed &&
4622 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4623 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4624 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4625 sizeof(mode), &mode);
4628 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4629 if (err < 0)
4630 goto unlock;
4632 if (changed)
4633 err = new_settings(hdev, sk);
4635 unlock:
4636 hci_dev_unlock(hdev);
4637 return err;
4640 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4641 u16 len)
4643 struct mgmt_cp_set_privacy *cp = cp_data;
4644 bool changed;
4645 int err;
4647 BT_DBG("request for %s", hdev->name);
4649 if (!lmp_le_capable(hdev))
4650 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4651 MGMT_STATUS_NOT_SUPPORTED);
4653 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4654 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4655 MGMT_STATUS_INVALID_PARAMS);
4657 if (hdev_is_powered(hdev))
4658 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4659 MGMT_STATUS_REJECTED);
4661 hci_dev_lock(hdev);
4663 /* If user space supports this command it is also expected to
4664 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4666 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4668 if (cp->privacy) {
4669 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4670 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4671 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4672 } else {
4673 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4674 memset(hdev->irk, 0, sizeof(hdev->irk));
4675 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4678 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4679 if (err < 0)
4680 goto unlock;
4682 if (changed)
4683 err = new_settings(hdev, sk);
4685 unlock:
4686 hci_dev_unlock(hdev);
4687 return err;
4690 static bool irk_is_valid(struct mgmt_irk_info *irk)
4692 switch (irk->addr.type) {
4693 case BDADDR_LE_PUBLIC:
4694 return true;
4696 case BDADDR_LE_RANDOM:
4697 /* Two most significant bits shall be set */
4698 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4699 return false;
4700 return true;
4703 return false;
4706 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4707 u16 len)
4709 struct mgmt_cp_load_irks *cp = cp_data;
4710 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4711 sizeof(struct mgmt_irk_info));
4712 u16 irk_count, expected_len;
4713 int i, err;
4715 BT_DBG("request for %s", hdev->name);
4717 if (!lmp_le_capable(hdev))
4718 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4719 MGMT_STATUS_NOT_SUPPORTED);
4721 irk_count = __le16_to_cpu(cp->irk_count);
4722 if (irk_count > max_irk_count) {
4723 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4724 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4725 MGMT_STATUS_INVALID_PARAMS);
4728 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4729 if (expected_len != len) {
4730 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4731 expected_len, len);
4732 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4733 MGMT_STATUS_INVALID_PARAMS);
4736 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4738 for (i = 0; i < irk_count; i++) {
4739 struct mgmt_irk_info *key = &cp->irks[i];
4741 if (!irk_is_valid(key))
4742 return cmd_status(sk, hdev->id,
4743 MGMT_OP_LOAD_IRKS,
4744 MGMT_STATUS_INVALID_PARAMS);
4747 hci_dev_lock(hdev);
4749 hci_smp_irks_clear(hdev);
4751 for (i = 0; i < irk_count; i++) {
4752 struct mgmt_irk_info *irk = &cp->irks[i];
4753 u8 addr_type;
4755 if (irk->addr.type == BDADDR_LE_PUBLIC)
4756 addr_type = ADDR_LE_DEV_PUBLIC;
4757 else
4758 addr_type = ADDR_LE_DEV_RANDOM;
4760 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4761 BDADDR_ANY);
4764 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4766 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4768 hci_dev_unlock(hdev);
4770 return err;
4773 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4775 if (key->master != 0x00 && key->master != 0x01)
4776 return false;
4778 switch (key->addr.type) {
4779 case BDADDR_LE_PUBLIC:
4780 return true;
4782 case BDADDR_LE_RANDOM:
4783 /* Two most significant bits shall be set */
4784 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4785 return false;
4786 return true;
4789 return false;
4792 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4793 void *cp_data, u16 len)
4795 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4796 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4797 sizeof(struct mgmt_ltk_info));
4798 u16 key_count, expected_len;
4799 int i, err;
4801 BT_DBG("request for %s", hdev->name);
4803 if (!lmp_le_capable(hdev))
4804 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4805 MGMT_STATUS_NOT_SUPPORTED);
4807 key_count = __le16_to_cpu(cp->key_count);
4808 if (key_count > max_key_count) {
4809 BT_ERR("load_ltks: too big key_count value %u", key_count);
4810 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4811 MGMT_STATUS_INVALID_PARAMS);
4814 expected_len = sizeof(*cp) + key_count *
4815 sizeof(struct mgmt_ltk_info);
4816 if (expected_len != len) {
4817 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4818 expected_len, len);
4819 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4820 MGMT_STATUS_INVALID_PARAMS);
4823 BT_DBG("%s key_count %u", hdev->name, key_count);
4825 for (i = 0; i < key_count; i++) {
4826 struct mgmt_ltk_info *key = &cp->keys[i];
4828 if (!ltk_is_valid(key))
4829 return cmd_status(sk, hdev->id,
4830 MGMT_OP_LOAD_LONG_TERM_KEYS,
4831 MGMT_STATUS_INVALID_PARAMS);
4834 hci_dev_lock(hdev);
4836 hci_smp_ltks_clear(hdev);
4838 for (i = 0; i < key_count; i++) {
4839 struct mgmt_ltk_info *key = &cp->keys[i];
4840 u8 type, addr_type, authenticated;
4842 if (key->addr.type == BDADDR_LE_PUBLIC)
4843 addr_type = ADDR_LE_DEV_PUBLIC;
4844 else
4845 addr_type = ADDR_LE_DEV_RANDOM;
4847 if (key->master)
4848 type = SMP_LTK;
4849 else
4850 type = SMP_LTK_SLAVE;
4852 switch (key->type) {
4853 case MGMT_LTK_UNAUTHENTICATED:
4854 authenticated = 0x00;
4855 break;
4856 case MGMT_LTK_AUTHENTICATED:
4857 authenticated = 0x01;
4858 break;
4859 default:
4860 continue;
4863 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4864 authenticated, key->val, key->enc_size, key->ediv,
4865 key->rand);
4868 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4869 NULL, 0);
4871 hci_dev_unlock(hdev);
4873 return err;
4876 struct cmd_conn_lookup {
4877 struct hci_conn *conn;
4878 bool valid_tx_power;
4879 u8 mgmt_status;
4882 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4884 struct cmd_conn_lookup *match = data;
4885 struct mgmt_cp_get_conn_info *cp;
4886 struct mgmt_rp_get_conn_info rp;
4887 struct hci_conn *conn = cmd->user_data;
4889 if (conn != match->conn)
4890 return;
4892 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4894 memset(&rp, 0, sizeof(rp));
4895 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4896 rp.addr.type = cp->addr.type;
4898 if (!match->mgmt_status) {
4899 rp.rssi = conn->rssi;
4901 if (match->valid_tx_power) {
4902 rp.tx_power = conn->tx_power;
4903 rp.max_tx_power = conn->max_tx_power;
4904 } else {
4905 rp.tx_power = HCI_TX_POWER_INVALID;
4906 rp.max_tx_power = HCI_TX_POWER_INVALID;
4910 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4911 match->mgmt_status, &rp, sizeof(rp));
4913 hci_conn_drop(conn);
4914 hci_conn_put(conn);
4916 mgmt_pending_remove(cmd);
4919 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4921 struct hci_cp_read_rssi *cp;
4922 struct hci_conn *conn;
4923 struct cmd_conn_lookup match;
4924 u16 handle;
4926 BT_DBG("status 0x%02x", status);
4928 hci_dev_lock(hdev);
4930 /* TX power data is valid in case request completed successfully,
4931 * otherwise we assume it's not valid. At the moment we assume that
4932 * either both or none of current and max values are valid to keep code
4933 * simple.
4935 match.valid_tx_power = !status;
4937 /* Commands sent in request are either Read RSSI or Read Transmit Power
4938 * Level so we check which one was last sent to retrieve connection
4939 * handle. Both commands have handle as first parameter so it's safe to
4940 * cast data on the same command struct.
4942 * First command sent is always Read RSSI and we fail only if it fails.
4943 * In other case we simply override error to indicate success as we
4944 * already remembered if TX power value is actually valid.
4946 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4947 if (!cp) {
4948 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4949 status = 0;
4952 if (!cp) {
4953 BT_ERR("invalid sent_cmd in response");
4954 goto unlock;
4957 handle = __le16_to_cpu(cp->handle);
4958 conn = hci_conn_hash_lookup_handle(hdev, handle);
4959 if (!conn) {
4960 BT_ERR("unknown handle (%d) in response", handle);
4961 goto unlock;
4964 match.conn = conn;
4965 match.mgmt_status = mgmt_status(status);
4967 /* Cache refresh is complete, now reply for mgmt request for given
4968 * connection only.
4970 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4971 get_conn_info_complete, &match);
4973 unlock:
4974 hci_dev_unlock(hdev);
4977 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4978 u16 len)
4980 struct mgmt_cp_get_conn_info *cp = data;
4981 struct mgmt_rp_get_conn_info rp;
4982 struct hci_conn *conn;
4983 unsigned long conn_info_age;
4984 int err = 0;
4986 BT_DBG("%s", hdev->name);
4988 memset(&rp, 0, sizeof(rp));
4989 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4990 rp.addr.type = cp->addr.type;
4992 if (!bdaddr_type_is_valid(cp->addr.type))
4993 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4994 MGMT_STATUS_INVALID_PARAMS,
4995 &rp, sizeof(rp));
4997 hci_dev_lock(hdev);
4999 if (!hdev_is_powered(hdev)) {
5000 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5001 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5002 goto unlock;
5005 if (cp->addr.type == BDADDR_BREDR)
5006 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5007 &cp->addr.bdaddr);
5008 else
5009 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5011 if (!conn || conn->state != BT_CONNECTED) {
5012 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5013 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5014 goto unlock;
5017 /* To avoid client trying to guess when to poll again for information we
5018 * calculate conn info age as random value between min/max set in hdev.
5020 conn_info_age = hdev->conn_info_min_age +
5021 prandom_u32_max(hdev->conn_info_max_age -
5022 hdev->conn_info_min_age);
5024 /* Query controller to refresh cached values if they are too old or were
5025 * never read.
5027 if (time_after(jiffies, conn->conn_info_timestamp +
5028 msecs_to_jiffies(conn_info_age)) ||
5029 !conn->conn_info_timestamp) {
5030 struct hci_request req;
5031 struct hci_cp_read_tx_power req_txp_cp;
5032 struct hci_cp_read_rssi req_rssi_cp;
5033 struct pending_cmd *cmd;
5035 hci_req_init(&req, hdev);
5036 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5037 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5038 &req_rssi_cp);
5040 /* For LE links TX power does not change thus we don't need to
5041 * query for it once value is known.
5043 if (!bdaddr_type_is_le(cp->addr.type) ||
5044 conn->tx_power == HCI_TX_POWER_INVALID) {
5045 req_txp_cp.handle = cpu_to_le16(conn->handle);
5046 req_txp_cp.type = 0x00;
5047 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5048 sizeof(req_txp_cp), &req_txp_cp);
5051 /* Max TX power needs to be read only once per connection */
5052 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5053 req_txp_cp.handle = cpu_to_le16(conn->handle);
5054 req_txp_cp.type = 0x01;
5055 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5056 sizeof(req_txp_cp), &req_txp_cp);
5059 err = hci_req_run(&req, conn_info_refresh_complete);
5060 if (err < 0)
5061 goto unlock;
5063 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5064 data, len);
5065 if (!cmd) {
5066 err = -ENOMEM;
5067 goto unlock;
5070 hci_conn_hold(conn);
5071 cmd->user_data = hci_conn_get(conn);
5073 conn->conn_info_timestamp = jiffies;
5074 } else {
5075 /* Cache is valid, just reply with values cached in hci_conn */
5076 rp.rssi = conn->rssi;
5077 rp.tx_power = conn->tx_power;
5078 rp.max_tx_power = conn->max_tx_power;
5080 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5081 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5084 unlock:
5085 hci_dev_unlock(hdev);
5086 return err;
5089 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5091 struct mgmt_cp_get_clock_info *cp;
5092 struct mgmt_rp_get_clock_info rp;
5093 struct hci_cp_read_clock *hci_cp;
5094 struct pending_cmd *cmd;
5095 struct hci_conn *conn;
5097 BT_DBG("%s status %u", hdev->name, status);
5099 hci_dev_lock(hdev);
5101 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5102 if (!hci_cp)
5103 goto unlock;
5105 if (hci_cp->which) {
5106 u16 handle = __le16_to_cpu(hci_cp->handle);
5107 conn = hci_conn_hash_lookup_handle(hdev, handle);
5108 } else {
5109 conn = NULL;
5112 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5113 if (!cmd)
5114 goto unlock;
5116 cp = cmd->param;
5118 memset(&rp, 0, sizeof(rp));
5119 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5121 if (status)
5122 goto send_rsp;
5124 rp.local_clock = cpu_to_le32(hdev->clock);
5126 if (conn) {
5127 rp.piconet_clock = cpu_to_le32(conn->clock);
5128 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5131 send_rsp:
5132 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5133 &rp, sizeof(rp));
5134 mgmt_pending_remove(cmd);
5135 if (conn) {
5136 hci_conn_drop(conn);
5137 hci_conn_put(conn);
5140 unlock:
5141 hci_dev_unlock(hdev);
5144 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5145 u16 len)
5147 struct mgmt_cp_get_clock_info *cp = data;
5148 struct mgmt_rp_get_clock_info rp;
5149 struct hci_cp_read_clock hci_cp;
5150 struct pending_cmd *cmd;
5151 struct hci_request req;
5152 struct hci_conn *conn;
5153 int err;
5155 BT_DBG("%s", hdev->name);
5157 memset(&rp, 0, sizeof(rp));
5158 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5159 rp.addr.type = cp->addr.type;
5161 if (cp->addr.type != BDADDR_BREDR)
5162 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5163 MGMT_STATUS_INVALID_PARAMS,
5164 &rp, sizeof(rp));
5166 hci_dev_lock(hdev);
5168 if (!hdev_is_powered(hdev)) {
5169 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5170 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5171 goto unlock;
5174 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5175 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5176 &cp->addr.bdaddr);
5177 if (!conn || conn->state != BT_CONNECTED) {
5178 err = cmd_complete(sk, hdev->id,
5179 MGMT_OP_GET_CLOCK_INFO,
5180 MGMT_STATUS_NOT_CONNECTED,
5181 &rp, sizeof(rp));
5182 goto unlock;
5184 } else {
5185 conn = NULL;
5188 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5189 if (!cmd) {
5190 err = -ENOMEM;
5191 goto unlock;
5194 hci_req_init(&req, hdev);
5196 memset(&hci_cp, 0, sizeof(hci_cp));
5197 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5199 if (conn) {
5200 hci_conn_hold(conn);
5201 cmd->user_data = hci_conn_get(conn);
5203 hci_cp.handle = cpu_to_le16(conn->handle);
5204 hci_cp.which = 0x01; /* Piconet clock */
5205 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5208 err = hci_req_run(&req, get_clock_info_complete);
5209 if (err < 0)
5210 mgmt_pending_remove(cmd);
5212 unlock:
5213 hci_dev_unlock(hdev);
5214 return err;
5217 static void device_added(struct sock *sk, struct hci_dev *hdev,
5218 bdaddr_t *bdaddr, u8 type, u8 action)
5220 struct mgmt_ev_device_added ev;
5222 bacpy(&ev.addr.bdaddr, bdaddr);
5223 ev.addr.type = type;
5224 ev.action = action;
5226 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5229 static int add_device(struct sock *sk, struct hci_dev *hdev,
5230 void *data, u16 len)
5232 struct mgmt_cp_add_device *cp = data;
5233 u8 auto_conn, addr_type;
5234 int err;
5236 BT_DBG("%s", hdev->name);
5238 if (!bdaddr_type_is_valid(cp->addr.type) ||
5239 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5240 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5241 MGMT_STATUS_INVALID_PARAMS,
5242 &cp->addr, sizeof(cp->addr));
5244 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5245 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5246 MGMT_STATUS_INVALID_PARAMS,
5247 &cp->addr, sizeof(cp->addr));
5249 hci_dev_lock(hdev);
5251 if (cp->addr.type == BDADDR_BREDR) {
5252 /* Only incoming connections action is supported for now */
5253 if (cp->action != 0x01) {
5254 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5255 MGMT_STATUS_INVALID_PARAMS,
5256 &cp->addr, sizeof(cp->addr));
5257 goto unlock;
5260 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5261 cp->addr.type);
5262 if (err)
5263 goto unlock;
5265 hci_update_page_scan(hdev, NULL);
5267 goto added;
5270 if (cp->addr.type == BDADDR_LE_PUBLIC)
5271 addr_type = ADDR_LE_DEV_PUBLIC;
5272 else
5273 addr_type = ADDR_LE_DEV_RANDOM;
5275 if (cp->action == 0x02)
5276 auto_conn = HCI_AUTO_CONN_ALWAYS;
5277 else if (cp->action == 0x01)
5278 auto_conn = HCI_AUTO_CONN_DIRECT;
5279 else
5280 auto_conn = HCI_AUTO_CONN_REPORT;
5282 /* If the connection parameters don't exist for this device,
5283 * they will be created and configured with defaults.
5285 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5286 auto_conn) < 0) {
5287 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5288 MGMT_STATUS_FAILED,
5289 &cp->addr, sizeof(cp->addr));
5290 goto unlock;
5293 added:
5294 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5296 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5297 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5299 unlock:
5300 hci_dev_unlock(hdev);
5301 return err;
5304 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5305 bdaddr_t *bdaddr, u8 type)
5307 struct mgmt_ev_device_removed ev;
5309 bacpy(&ev.addr.bdaddr, bdaddr);
5310 ev.addr.type = type;
5312 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5315 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5316 void *data, u16 len)
5318 struct mgmt_cp_remove_device *cp = data;
5319 int err;
5321 BT_DBG("%s", hdev->name);
5323 hci_dev_lock(hdev);
5325 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5326 struct hci_conn_params *params;
5327 u8 addr_type;
5329 if (!bdaddr_type_is_valid(cp->addr.type)) {
5330 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5331 MGMT_STATUS_INVALID_PARAMS,
5332 &cp->addr, sizeof(cp->addr));
5333 goto unlock;
5336 if (cp->addr.type == BDADDR_BREDR) {
5337 err = hci_bdaddr_list_del(&hdev->whitelist,
5338 &cp->addr.bdaddr,
5339 cp->addr.type);
5340 if (err) {
5341 err = cmd_complete(sk, hdev->id,
5342 MGMT_OP_REMOVE_DEVICE,
5343 MGMT_STATUS_INVALID_PARAMS,
5344 &cp->addr, sizeof(cp->addr));
5345 goto unlock;
5348 hci_update_page_scan(hdev, NULL);
5350 device_removed(sk, hdev, &cp->addr.bdaddr,
5351 cp->addr.type);
5352 goto complete;
5355 if (cp->addr.type == BDADDR_LE_PUBLIC)
5356 addr_type = ADDR_LE_DEV_PUBLIC;
5357 else
5358 addr_type = ADDR_LE_DEV_RANDOM;
5360 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5361 addr_type);
5362 if (!params) {
5363 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5364 MGMT_STATUS_INVALID_PARAMS,
5365 &cp->addr, sizeof(cp->addr));
5366 goto unlock;
5369 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5370 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5371 MGMT_STATUS_INVALID_PARAMS,
5372 &cp->addr, sizeof(cp->addr));
5373 goto unlock;
5376 list_del(&params->action);
5377 list_del(&params->list);
5378 kfree(params);
5379 hci_update_background_scan(hdev);
5381 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5382 } else {
5383 struct hci_conn_params *p, *tmp;
5384 struct bdaddr_list *b, *btmp;
5386 if (cp->addr.type) {
5387 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5388 MGMT_STATUS_INVALID_PARAMS,
5389 &cp->addr, sizeof(cp->addr));
5390 goto unlock;
5393 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5394 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5395 list_del(&b->list);
5396 kfree(b);
5399 hci_update_page_scan(hdev, NULL);
5401 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5402 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5403 continue;
5404 device_removed(sk, hdev, &p->addr, p->addr_type);
5405 list_del(&p->action);
5406 list_del(&p->list);
5407 kfree(p);
5410 BT_DBG("All LE connection parameters were removed");
5412 hci_update_background_scan(hdev);
5415 complete:
5416 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5417 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5419 unlock:
5420 hci_dev_unlock(hdev);
5421 return err;
5424 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5425 u16 len)
5427 struct mgmt_cp_load_conn_param *cp = data;
5428 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5429 sizeof(struct mgmt_conn_param));
5430 u16 param_count, expected_len;
5431 int i;
5433 if (!lmp_le_capable(hdev))
5434 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5435 MGMT_STATUS_NOT_SUPPORTED);
5437 param_count = __le16_to_cpu(cp->param_count);
5438 if (param_count > max_param_count) {
5439 BT_ERR("load_conn_param: too big param_count value %u",
5440 param_count);
5441 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5442 MGMT_STATUS_INVALID_PARAMS);
5445 expected_len = sizeof(*cp) + param_count *
5446 sizeof(struct mgmt_conn_param);
5447 if (expected_len != len) {
5448 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5449 expected_len, len);
5450 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5451 MGMT_STATUS_INVALID_PARAMS);
5454 BT_DBG("%s param_count %u", hdev->name, param_count);
5456 hci_dev_lock(hdev);
5458 hci_conn_params_clear_disabled(hdev);
5460 for (i = 0; i < param_count; i++) {
5461 struct mgmt_conn_param *param = &cp->params[i];
5462 struct hci_conn_params *hci_param;
5463 u16 min, max, latency, timeout;
5464 u8 addr_type;
5466 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5467 param->addr.type);
5469 if (param->addr.type == BDADDR_LE_PUBLIC) {
5470 addr_type = ADDR_LE_DEV_PUBLIC;
5471 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5472 addr_type = ADDR_LE_DEV_RANDOM;
5473 } else {
5474 BT_ERR("Ignoring invalid connection parameters");
5475 continue;
5478 min = le16_to_cpu(param->min_interval);
5479 max = le16_to_cpu(param->max_interval);
5480 latency = le16_to_cpu(param->latency);
5481 timeout = le16_to_cpu(param->timeout);
5483 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5484 min, max, latency, timeout);
5486 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5487 BT_ERR("Ignoring invalid connection parameters");
5488 continue;
5491 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5492 addr_type);
5493 if (!hci_param) {
5494 BT_ERR("Failed to add connection parameters");
5495 continue;
5498 hci_param->conn_min_interval = min;
5499 hci_param->conn_max_interval = max;
5500 hci_param->conn_latency = latency;
5501 hci_param->supervision_timeout = timeout;
5504 hci_dev_unlock(hdev);
5506 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5509 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5510 void *data, u16 len)
5512 struct mgmt_cp_set_external_config *cp = data;
5513 bool changed;
5514 int err;
5516 BT_DBG("%s", hdev->name);
5518 if (hdev_is_powered(hdev))
5519 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5520 MGMT_STATUS_REJECTED);
5522 if (cp->config != 0x00 && cp->config != 0x01)
5523 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5524 MGMT_STATUS_INVALID_PARAMS);
5526 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5527 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5528 MGMT_STATUS_NOT_SUPPORTED);
5530 hci_dev_lock(hdev);
5532 if (cp->config)
5533 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5534 &hdev->dev_flags);
5535 else
5536 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5537 &hdev->dev_flags);
5539 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5540 if (err < 0)
5541 goto unlock;
5543 if (!changed)
5544 goto unlock;
5546 err = new_options(hdev, sk);
5548 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5549 mgmt_index_removed(hdev);
5551 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5552 set_bit(HCI_CONFIG, &hdev->dev_flags);
5553 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5555 queue_work(hdev->req_workqueue, &hdev->power_on);
5556 } else {
5557 set_bit(HCI_RAW, &hdev->flags);
5558 mgmt_index_added(hdev);
5562 unlock:
5563 hci_dev_unlock(hdev);
5564 return err;
5567 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5568 void *data, u16 len)
5570 struct mgmt_cp_set_public_address *cp = data;
5571 bool changed;
5572 int err;
5574 BT_DBG("%s", hdev->name);
5576 if (hdev_is_powered(hdev))
5577 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5578 MGMT_STATUS_REJECTED);
5580 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5581 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5582 MGMT_STATUS_INVALID_PARAMS);
5584 if (!hdev->set_bdaddr)
5585 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5586 MGMT_STATUS_NOT_SUPPORTED);
5588 hci_dev_lock(hdev);
5590 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5591 bacpy(&hdev->public_addr, &cp->bdaddr);
5593 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5594 if (err < 0)
5595 goto unlock;
5597 if (!changed)
5598 goto unlock;
5600 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5601 err = new_options(hdev, sk);
5603 if (is_configured(hdev)) {
5604 mgmt_index_removed(hdev);
5606 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5608 set_bit(HCI_CONFIG, &hdev->dev_flags);
5609 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5611 queue_work(hdev->req_workqueue, &hdev->power_on);
5614 unlock:
5615 hci_dev_unlock(hdev);
5616 return err;
5619 static const struct mgmt_handler {
5620 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5621 u16 data_len);
5622 bool var_len;
5623 size_t data_len;
5624 } mgmt_handlers[] = {
5625 { NULL }, /* 0x0000 (no command) */
5626 { read_version, false, MGMT_READ_VERSION_SIZE },
5627 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5628 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5629 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5630 { set_powered, false, MGMT_SETTING_SIZE },
5631 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5632 { set_connectable, false, MGMT_SETTING_SIZE },
5633 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5634 { set_bondable, false, MGMT_SETTING_SIZE },
5635 { set_link_security, false, MGMT_SETTING_SIZE },
5636 { set_ssp, false, MGMT_SETTING_SIZE },
5637 { set_hs, false, MGMT_SETTING_SIZE },
5638 { set_le, false, MGMT_SETTING_SIZE },
5639 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5640 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5641 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5642 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5643 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5644 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5645 { disconnect, false, MGMT_DISCONNECT_SIZE },
5646 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5647 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5648 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5649 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5650 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5651 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5652 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5653 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5654 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5655 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5656 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5657 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5658 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5659 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5660 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5661 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5662 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5663 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5664 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5665 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5666 { set_advertising, false, MGMT_SETTING_SIZE },
5667 { set_bredr, false, MGMT_SETTING_SIZE },
5668 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5669 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5670 { set_secure_conn, false, MGMT_SETTING_SIZE },
5671 { set_debug_keys, false, MGMT_SETTING_SIZE },
5672 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5673 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5674 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5675 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5676 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5677 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5678 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5679 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5680 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5681 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5682 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5685 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5687 void *buf;
5688 u8 *cp;
5689 struct mgmt_hdr *hdr;
5690 u16 opcode, index, len;
5691 struct hci_dev *hdev = NULL;
5692 const struct mgmt_handler *handler;
5693 int err;
5695 BT_DBG("got %zu bytes", msglen);
5697 if (msglen < sizeof(*hdr))
5698 return -EINVAL;
5700 buf = kmalloc(msglen, GFP_KERNEL);
5701 if (!buf)
5702 return -ENOMEM;
5704 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5705 err = -EFAULT;
5706 goto done;
5709 hdr = buf;
5710 opcode = __le16_to_cpu(hdr->opcode);
5711 index = __le16_to_cpu(hdr->index);
5712 len = __le16_to_cpu(hdr->len);
5714 if (len != msglen - sizeof(*hdr)) {
5715 err = -EINVAL;
5716 goto done;
5719 if (index != MGMT_INDEX_NONE) {
5720 hdev = hci_dev_get(index);
5721 if (!hdev) {
5722 err = cmd_status(sk, index, opcode,
5723 MGMT_STATUS_INVALID_INDEX);
5724 goto done;
5727 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5728 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5729 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5730 err = cmd_status(sk, index, opcode,
5731 MGMT_STATUS_INVALID_INDEX);
5732 goto done;
5735 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5736 opcode != MGMT_OP_READ_CONFIG_INFO &&
5737 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5738 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5739 err = cmd_status(sk, index, opcode,
5740 MGMT_STATUS_INVALID_INDEX);
5741 goto done;
5745 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5746 mgmt_handlers[opcode].func == NULL) {
5747 BT_DBG("Unknown op %u", opcode);
5748 err = cmd_status(sk, index, opcode,
5749 MGMT_STATUS_UNKNOWN_COMMAND);
5750 goto done;
5753 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5754 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5755 err = cmd_status(sk, index, opcode,
5756 MGMT_STATUS_INVALID_INDEX);
5757 goto done;
5760 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5761 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5762 err = cmd_status(sk, index, opcode,
5763 MGMT_STATUS_INVALID_INDEX);
5764 goto done;
5767 handler = &mgmt_handlers[opcode];
5769 if ((handler->var_len && len < handler->data_len) ||
5770 (!handler->var_len && len != handler->data_len)) {
5771 err = cmd_status(sk, index, opcode,
5772 MGMT_STATUS_INVALID_PARAMS);
5773 goto done;
5776 if (hdev)
5777 mgmt_init_hdev(sk, hdev);
5779 cp = buf + sizeof(*hdr);
5781 err = handler->func(sk, hdev, cp, len);
5782 if (err < 0)
5783 goto done;
5785 err = msglen;
5787 done:
5788 if (hdev)
5789 hci_dev_put(hdev);
5791 kfree(buf);
5792 return err;
5795 void mgmt_index_added(struct hci_dev *hdev)
5797 if (hdev->dev_type != HCI_BREDR)
5798 return;
5800 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5801 return;
5803 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5804 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5805 else
5806 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5809 void mgmt_index_removed(struct hci_dev *hdev)
5811 u8 status = MGMT_STATUS_INVALID_INDEX;
5813 if (hdev->dev_type != HCI_BREDR)
5814 return;
5816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5817 return;
5819 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5821 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5822 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5823 else
5824 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5827 /* This function requires the caller holds hdev->lock */
5828 static void restart_le_actions(struct hci_dev *hdev)
5830 struct hci_conn_params *p;
5832 list_for_each_entry(p, &hdev->le_conn_params, list) {
5833 /* Needed for AUTO_OFF case where might not "really"
5834 * have been powered off.
5836 list_del_init(&p->action);
5838 switch (p->auto_connect) {
5839 case HCI_AUTO_CONN_DIRECT:
5840 case HCI_AUTO_CONN_ALWAYS:
5841 list_add(&p->action, &hdev->pend_le_conns);
5842 break;
5843 case HCI_AUTO_CONN_REPORT:
5844 list_add(&p->action, &hdev->pend_le_reports);
5845 break;
5846 default:
5847 break;
5851 hci_update_background_scan(hdev);
5854 static void powered_complete(struct hci_dev *hdev, u8 status)
5856 struct cmd_lookup match = { NULL, hdev };
5858 BT_DBG("status 0x%02x", status);
5860 hci_dev_lock(hdev);
5862 restart_le_actions(hdev);
5864 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5866 new_settings(hdev, match.sk);
5868 hci_dev_unlock(hdev);
5870 if (match.sk)
5871 sock_put(match.sk);
5874 static int powered_update_hci(struct hci_dev *hdev)
5876 struct hci_request req;
5877 u8 link_sec;
5879 hci_req_init(&req, hdev);
5881 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5882 !lmp_host_ssp_capable(hdev)) {
5883 u8 ssp = 1;
5885 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5888 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5889 lmp_bredr_capable(hdev)) {
5890 struct hci_cp_write_le_host_supported cp;
5892 cp.le = 0x01;
5893 cp.simul = 0x00;
5895 /* Check first if we already have the right
5896 * host state (host features set)
5898 if (cp.le != lmp_host_le_capable(hdev) ||
5899 cp.simul != lmp_host_le_br_capable(hdev))
5900 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5901 sizeof(cp), &cp);
5904 if (lmp_le_capable(hdev)) {
5905 /* Make sure the controller has a good default for
5906 * advertising data. This also applies to the case
5907 * where BR/EDR was toggled during the AUTO_OFF phase.
5909 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5910 update_adv_data(&req);
5911 update_scan_rsp_data(&req);
5914 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5915 enable_advertising(&req);
5918 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5919 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5920 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5921 sizeof(link_sec), &link_sec);
5923 if (lmp_bredr_capable(hdev)) {
5924 write_fast_connectable(&req, false);
5925 hci_update_page_scan(hdev, &req);
5926 update_class(&req);
5927 update_name(&req);
5928 update_eir(&req);
5931 return hci_req_run(&req, powered_complete);
5934 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5936 struct cmd_lookup match = { NULL, hdev };
5937 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5938 u8 zero_cod[] = { 0, 0, 0 };
5939 int err;
5941 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5942 return 0;
5944 if (powered) {
5945 if (powered_update_hci(hdev) == 0)
5946 return 0;
5948 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5949 &match);
5950 goto new_settings;
5953 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5954 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5956 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5957 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5958 zero_cod, sizeof(zero_cod), NULL);
5960 new_settings:
5961 err = new_settings(hdev, match.sk);
5963 if (match.sk)
5964 sock_put(match.sk);
5966 return err;
5969 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5971 struct pending_cmd *cmd;
5972 u8 status;
5974 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5975 if (!cmd)
5976 return;
5978 if (err == -ERFKILL)
5979 status = MGMT_STATUS_RFKILLED;
5980 else
5981 status = MGMT_STATUS_FAILED;
5983 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5985 mgmt_pending_remove(cmd);
5988 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5990 struct hci_request req;
5992 hci_dev_lock(hdev);
5994 /* When discoverable timeout triggers, then just make sure
5995 * the limited discoverable flag is cleared. Even in the case
5996 * of a timeout triggered from general discoverable, it is
5997 * safe to unconditionally clear the flag.
5999 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6000 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6002 hci_req_init(&req, hdev);
6003 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6004 u8 scan = SCAN_PAGE;
6005 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6006 sizeof(scan), &scan);
6008 update_class(&req);
6009 update_adv_data(&req);
6010 hci_req_run(&req, NULL);
6012 hdev->discov_timeout = 0;
6014 new_settings(hdev, NULL);
6016 hci_dev_unlock(hdev);
6019 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6020 bool persistent)
6022 struct mgmt_ev_new_link_key ev;
6024 memset(&ev, 0, sizeof(ev));
6026 ev.store_hint = persistent;
6027 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6028 ev.key.addr.type = BDADDR_BREDR;
6029 ev.key.type = key->type;
6030 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6031 ev.key.pin_len = key->pin_len;
6033 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6036 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6038 if (ltk->authenticated)
6039 return MGMT_LTK_AUTHENTICATED;
6041 return MGMT_LTK_UNAUTHENTICATED;
6044 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6046 struct mgmt_ev_new_long_term_key ev;
6048 memset(&ev, 0, sizeof(ev));
6050 /* Devices using resolvable or non-resolvable random addresses
6051 * without providing an indentity resolving key don't require
6052 * to store long term keys. Their addresses will change the
6053 * next time around.
6055 * Only when a remote device provides an identity address
6056 * make sure the long term key is stored. If the remote
6057 * identity is known, the long term keys are internally
6058 * mapped to the identity address. So allow static random
6059 * and public addresses here.
6061 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6062 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6063 ev.store_hint = 0x00;
6064 else
6065 ev.store_hint = persistent;
6067 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6068 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6069 ev.key.type = mgmt_ltk_type(key);
6070 ev.key.enc_size = key->enc_size;
6071 ev.key.ediv = key->ediv;
6072 ev.key.rand = key->rand;
6074 if (key->type == SMP_LTK)
6075 ev.key.master = 1;
6077 memcpy(ev.key.val, key->val, sizeof(key->val));
6079 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6082 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6084 struct mgmt_ev_new_irk ev;
6086 memset(&ev, 0, sizeof(ev));
6088 /* For identity resolving keys from devices that are already
6089 * using a public address or static random address, do not
6090 * ask for storing this key. The identity resolving key really
6091 * is only mandatory for devices using resovlable random
6092 * addresses.
6094 * Storing all identity resolving keys has the downside that
6095 * they will be also loaded on next boot of they system. More
6096 * identity resolving keys, means more time during scanning is
6097 * needed to actually resolve these addresses.
6099 if (bacmp(&irk->rpa, BDADDR_ANY))
6100 ev.store_hint = 0x01;
6101 else
6102 ev.store_hint = 0x00;
6104 bacpy(&ev.rpa, &irk->rpa);
6105 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6106 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6107 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6109 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6112 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6113 bool persistent)
6115 struct mgmt_ev_new_csrk ev;
6117 memset(&ev, 0, sizeof(ev));
6119 /* Devices using resolvable or non-resolvable random addresses
6120 * without providing an indentity resolving key don't require
6121 * to store signature resolving keys. Their addresses will change
6122 * the next time around.
6124 * Only when a remote device provides an identity address
6125 * make sure the signature resolving key is stored. So allow
6126 * static random and public addresses here.
6128 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6129 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6130 ev.store_hint = 0x00;
6131 else
6132 ev.store_hint = persistent;
6134 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6135 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6136 ev.key.master = csrk->master;
6137 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6139 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6142 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6143 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6144 u16 max_interval, u16 latency, u16 timeout)
6146 struct mgmt_ev_new_conn_param ev;
6148 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6149 return;
6151 memset(&ev, 0, sizeof(ev));
6152 bacpy(&ev.addr.bdaddr, bdaddr);
6153 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6154 ev.store_hint = store_hint;
6155 ev.min_interval = cpu_to_le16(min_interval);
6156 ev.max_interval = cpu_to_le16(max_interval);
6157 ev.latency = cpu_to_le16(latency);
6158 ev.timeout = cpu_to_le16(timeout);
6160 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6163 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6164 u8 data_len)
6166 eir[eir_len++] = sizeof(type) + data_len;
6167 eir[eir_len++] = type;
6168 memcpy(&eir[eir_len], data, data_len);
6169 eir_len += data_len;
6171 return eir_len;
6174 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6175 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6176 u8 *dev_class)
6178 char buf[512];
6179 struct mgmt_ev_device_connected *ev = (void *) buf;
6180 u16 eir_len = 0;
6182 bacpy(&ev->addr.bdaddr, bdaddr);
6183 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6185 ev->flags = __cpu_to_le32(flags);
6187 if (name_len > 0)
6188 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6189 name, name_len);
6191 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6192 eir_len = eir_append_data(ev->eir, eir_len,
6193 EIR_CLASS_OF_DEV, dev_class, 3);
6195 ev->eir_len = cpu_to_le16(eir_len);
6197 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6198 sizeof(*ev) + eir_len, NULL);
6201 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6203 struct mgmt_cp_disconnect *cp = cmd->param;
6204 struct sock **sk = data;
6205 struct mgmt_rp_disconnect rp;
6207 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6208 rp.addr.type = cp->addr.type;
6210 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6211 sizeof(rp));
6213 *sk = cmd->sk;
6214 sock_hold(*sk);
6216 mgmt_pending_remove(cmd);
6219 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6221 struct hci_dev *hdev = data;
6222 struct mgmt_cp_unpair_device *cp = cmd->param;
6223 struct mgmt_rp_unpair_device rp;
6225 memset(&rp, 0, sizeof(rp));
6226 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6227 rp.addr.type = cp->addr.type;
6229 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6231 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6233 mgmt_pending_remove(cmd);
6236 bool mgmt_powering_down(struct hci_dev *hdev)
6238 struct pending_cmd *cmd;
6239 struct mgmt_mode *cp;
6241 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6242 if (!cmd)
6243 return false;
6245 cp = cmd->param;
6246 if (!cp->val)
6247 return true;
6249 return false;
6252 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6253 u8 link_type, u8 addr_type, u8 reason,
6254 bool mgmt_connected)
6256 struct mgmt_ev_device_disconnected ev;
6257 struct sock *sk = NULL;
6259 /* The connection is still in hci_conn_hash so test for 1
6260 * instead of 0 to know if this is the last one.
6262 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6263 cancel_delayed_work(&hdev->power_off);
6264 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6267 if (!mgmt_connected)
6268 return;
6270 if (link_type != ACL_LINK && link_type != LE_LINK)
6271 return;
6273 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6275 bacpy(&ev.addr.bdaddr, bdaddr);
6276 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6277 ev.reason = reason;
6279 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6281 if (sk)
6282 sock_put(sk);
6284 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6285 hdev);
6288 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6289 u8 link_type, u8 addr_type, u8 status)
6291 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6292 struct mgmt_cp_disconnect *cp;
6293 struct mgmt_rp_disconnect rp;
6294 struct pending_cmd *cmd;
6296 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6297 hdev);
6299 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6300 if (!cmd)
6301 return;
6303 cp = cmd->param;
6305 if (bacmp(bdaddr, &cp->addr.bdaddr))
6306 return;
6308 if (cp->addr.type != bdaddr_type)
6309 return;
6311 bacpy(&rp.addr.bdaddr, bdaddr);
6312 rp.addr.type = bdaddr_type;
6314 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6315 mgmt_status(status), &rp, sizeof(rp));
6317 mgmt_pending_remove(cmd);
6320 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6321 u8 addr_type, u8 status)
6323 struct mgmt_ev_connect_failed ev;
6325 /* The connection is still in hci_conn_hash so test for 1
6326 * instead of 0 to know if this is the last one.
6328 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6329 cancel_delayed_work(&hdev->power_off);
6330 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6333 bacpy(&ev.addr.bdaddr, bdaddr);
6334 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6335 ev.status = mgmt_status(status);
6337 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6340 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6342 struct mgmt_ev_pin_code_request ev;
6344 bacpy(&ev.addr.bdaddr, bdaddr);
6345 ev.addr.type = BDADDR_BREDR;
6346 ev.secure = secure;
6348 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6351 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6352 u8 status)
6354 struct pending_cmd *cmd;
6355 struct mgmt_rp_pin_code_reply rp;
6357 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6358 if (!cmd)
6359 return;
6361 bacpy(&rp.addr.bdaddr, bdaddr);
6362 rp.addr.type = BDADDR_BREDR;
6364 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6365 mgmt_status(status), &rp, sizeof(rp));
6367 mgmt_pending_remove(cmd);
6370 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6371 u8 status)
6373 struct pending_cmd *cmd;
6374 struct mgmt_rp_pin_code_reply rp;
6376 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6377 if (!cmd)
6378 return;
6380 bacpy(&rp.addr.bdaddr, bdaddr);
6381 rp.addr.type = BDADDR_BREDR;
6383 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6384 mgmt_status(status), &rp, sizeof(rp));
6386 mgmt_pending_remove(cmd);
6389 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6390 u8 link_type, u8 addr_type, u32 value,
6391 u8 confirm_hint)
6393 struct mgmt_ev_user_confirm_request ev;
6395 BT_DBG("%s", hdev->name);
6397 bacpy(&ev.addr.bdaddr, bdaddr);
6398 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6399 ev.confirm_hint = confirm_hint;
6400 ev.value = cpu_to_le32(value);
6402 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6403 NULL);
6406 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6407 u8 link_type, u8 addr_type)
6409 struct mgmt_ev_user_passkey_request ev;
6411 BT_DBG("%s", hdev->name);
6413 bacpy(&ev.addr.bdaddr, bdaddr);
6414 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6416 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6417 NULL);
6420 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6421 u8 link_type, u8 addr_type, u8 status,
6422 u8 opcode)
6424 struct pending_cmd *cmd;
6425 struct mgmt_rp_user_confirm_reply rp;
6426 int err;
6428 cmd = mgmt_pending_find(opcode, hdev);
6429 if (!cmd)
6430 return -ENOENT;
6432 bacpy(&rp.addr.bdaddr, bdaddr);
6433 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6434 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6435 &rp, sizeof(rp));
6437 mgmt_pending_remove(cmd);
6439 return err;
6442 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6443 u8 link_type, u8 addr_type, u8 status)
6445 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6446 status, MGMT_OP_USER_CONFIRM_REPLY);
6449 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6450 u8 link_type, u8 addr_type, u8 status)
6452 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6453 status,
6454 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6457 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6458 u8 link_type, u8 addr_type, u8 status)
6460 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6461 status, MGMT_OP_USER_PASSKEY_REPLY);
6464 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6465 u8 link_type, u8 addr_type, u8 status)
6467 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6468 status,
6469 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6472 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6473 u8 link_type, u8 addr_type, u32 passkey,
6474 u8 entered)
6476 struct mgmt_ev_passkey_notify ev;
6478 BT_DBG("%s", hdev->name);
6480 bacpy(&ev.addr.bdaddr, bdaddr);
6481 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6482 ev.passkey = __cpu_to_le32(passkey);
6483 ev.entered = entered;
6485 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6488 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6490 struct mgmt_ev_auth_failed ev;
6491 struct pending_cmd *cmd;
6492 u8 status = mgmt_status(hci_status);
6494 bacpy(&ev.addr.bdaddr, &conn->dst);
6495 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6496 ev.status = status;
6498 cmd = find_pairing(conn);
6500 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6501 cmd ? cmd->sk : NULL);
6503 if (cmd)
6504 pairing_complete(cmd, status);
6507 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6509 struct cmd_lookup match = { NULL, hdev };
6510 bool changed;
6512 if (status) {
6513 u8 mgmt_err = mgmt_status(status);
6514 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6515 cmd_status_rsp, &mgmt_err);
6516 return;
6519 if (test_bit(HCI_AUTH, &hdev->flags))
6520 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6521 &hdev->dev_flags);
6522 else
6523 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6524 &hdev->dev_flags);
6526 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6527 &match);
6529 if (changed)
6530 new_settings(hdev, match.sk);
6532 if (match.sk)
6533 sock_put(match.sk);
6536 static void clear_eir(struct hci_request *req)
6538 struct hci_dev *hdev = req->hdev;
6539 struct hci_cp_write_eir cp;
6541 if (!lmp_ext_inq_capable(hdev))
6542 return;
6544 memset(hdev->eir, 0, sizeof(hdev->eir));
6546 memset(&cp, 0, sizeof(cp));
6548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6551 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6553 struct cmd_lookup match = { NULL, hdev };
6554 struct hci_request req;
6555 bool changed = false;
6557 if (status) {
6558 u8 mgmt_err = mgmt_status(status);
6560 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6561 &hdev->dev_flags)) {
6562 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6563 new_settings(hdev, NULL);
6566 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6567 &mgmt_err);
6568 return;
6571 if (enable) {
6572 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6573 } else {
6574 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6575 if (!changed)
6576 changed = test_and_clear_bit(HCI_HS_ENABLED,
6577 &hdev->dev_flags);
6578 else
6579 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6582 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6584 if (changed)
6585 new_settings(hdev, match.sk);
6587 if (match.sk)
6588 sock_put(match.sk);
6590 hci_req_init(&req, hdev);
6592 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6593 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6594 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6595 sizeof(enable), &enable);
6596 update_eir(&req);
6597 } else {
6598 clear_eir(&req);
6601 hci_req_run(&req, NULL);
6604 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6606 struct cmd_lookup match = { NULL, hdev };
6607 bool changed = false;
6609 if (status) {
6610 u8 mgmt_err = mgmt_status(status);
6612 if (enable) {
6613 if (test_and_clear_bit(HCI_SC_ENABLED,
6614 &hdev->dev_flags))
6615 new_settings(hdev, NULL);
6616 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6619 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6620 cmd_status_rsp, &mgmt_err);
6621 return;
6624 if (enable) {
6625 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6626 } else {
6627 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6628 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6631 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6632 settings_rsp, &match);
6634 if (changed)
6635 new_settings(hdev, match.sk);
6637 if (match.sk)
6638 sock_put(match.sk);
6641 static void sk_lookup(struct pending_cmd *cmd, void *data)
6643 struct cmd_lookup *match = data;
6645 if (match->sk == NULL) {
6646 match->sk = cmd->sk;
6647 sock_hold(match->sk);
6651 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6652 u8 status)
6654 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6656 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6657 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6658 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6660 if (!status)
6661 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6662 NULL);
6664 if (match.sk)
6665 sock_put(match.sk);
6668 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6670 struct mgmt_cp_set_local_name ev;
6671 struct pending_cmd *cmd;
6673 if (status)
6674 return;
6676 memset(&ev, 0, sizeof(ev));
6677 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6678 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6680 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6681 if (!cmd) {
6682 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6684 /* If this is a HCI command related to powering on the
6685 * HCI dev don't send any mgmt signals.
6687 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6688 return;
6691 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6692 cmd ? cmd->sk : NULL);
6695 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6696 u8 *randomizer192, u8 *hash256,
6697 u8 *randomizer256, u8 status)
6699 struct pending_cmd *cmd;
6701 BT_DBG("%s status %u", hdev->name, status);
6703 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6704 if (!cmd)
6705 return;
6707 if (status) {
6708 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6709 mgmt_status(status));
6710 } else {
6711 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6712 hash256 && randomizer256) {
6713 struct mgmt_rp_read_local_oob_ext_data rp;
6715 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6716 memcpy(rp.randomizer192, randomizer192,
6717 sizeof(rp.randomizer192));
6719 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6720 memcpy(rp.randomizer256, randomizer256,
6721 sizeof(rp.randomizer256));
6723 cmd_complete(cmd->sk, hdev->id,
6724 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6725 &rp, sizeof(rp));
6726 } else {
6727 struct mgmt_rp_read_local_oob_data rp;
6729 memcpy(rp.hash, hash192, sizeof(rp.hash));
6730 memcpy(rp.randomizer, randomizer192,
6731 sizeof(rp.randomizer));
6733 cmd_complete(cmd->sk, hdev->id,
6734 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6735 &rp, sizeof(rp));
6739 mgmt_pending_remove(cmd);
6742 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6743 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6744 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6746 char buf[512];
6747 struct mgmt_ev_device_found *ev = (void *) buf;
6748 size_t ev_size;
6750 /* Don't send events for a non-kernel initiated discovery. With
6751 * LE one exception is if we have pend_le_reports > 0 in which
6752 * case we're doing passive scanning and want these events.
6754 if (!hci_discovery_active(hdev)) {
6755 if (link_type == ACL_LINK)
6756 return;
6757 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6758 return;
6761 /* Make sure that the buffer is big enough. The 5 extra bytes
6762 * are for the potential CoD field.
6764 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6765 return;
6767 memset(buf, 0, sizeof(buf));
6769 bacpy(&ev->addr.bdaddr, bdaddr);
6770 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6771 ev->rssi = rssi;
6772 ev->flags = cpu_to_le32(flags);
6774 if (eir_len > 0)
6775 memcpy(ev->eir, eir, eir_len);
6777 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6778 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6779 dev_class, 3);
6781 if (scan_rsp_len > 0)
6782 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6784 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6785 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6787 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6790 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6791 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6793 struct mgmt_ev_device_found *ev;
6794 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6795 u16 eir_len;
6797 ev = (struct mgmt_ev_device_found *) buf;
6799 memset(buf, 0, sizeof(buf));
6801 bacpy(&ev->addr.bdaddr, bdaddr);
6802 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6803 ev->rssi = rssi;
6805 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6806 name_len);
6808 ev->eir_len = cpu_to_le16(eir_len);
6810 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6813 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6815 struct mgmt_ev_discovering ev;
6816 struct pending_cmd *cmd;
6818 BT_DBG("%s discovering %u", hdev->name, discovering);
6820 if (discovering)
6821 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6822 else
6823 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6825 if (cmd != NULL) {
6826 u8 type = hdev->discovery.type;
6828 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6829 sizeof(type));
6830 mgmt_pending_remove(cmd);
6833 memset(&ev, 0, sizeof(ev));
6834 ev.type = hdev->discovery.type;
6835 ev.discovering = discovering;
6837 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6840 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6842 BT_DBG("%s status %u", hdev->name, status);
6845 void mgmt_reenable_advertising(struct hci_dev *hdev)
6847 struct hci_request req;
6849 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6850 return;
6852 hci_req_init(&req, hdev);
6853 enable_advertising(&req);
6854 hci_req_run(&req, adv_enable_complete);