mm/zsmalloc: allocate exactly size of struct zs_pool
[linux/fpc-iii.git] / net / bluetooth / mgmt.c
blob7384f11613369b0997df0229ecc6d8ea2c80bb5b
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "smp.h"
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 8
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 MGMT_OP_START_SERVICE_DISCOVERY,
99 static const u16 mgmt_events[] = {
100 MGMT_EV_CONTROLLER_ERROR,
101 MGMT_EV_INDEX_ADDED,
102 MGMT_EV_INDEX_REMOVED,
103 MGMT_EV_NEW_SETTINGS,
104 MGMT_EV_CLASS_OF_DEV_CHANGED,
105 MGMT_EV_LOCAL_NAME_CHANGED,
106 MGMT_EV_NEW_LINK_KEY,
107 MGMT_EV_NEW_LONG_TERM_KEY,
108 MGMT_EV_DEVICE_CONNECTED,
109 MGMT_EV_DEVICE_DISCONNECTED,
110 MGMT_EV_CONNECT_FAILED,
111 MGMT_EV_PIN_CODE_REQUEST,
112 MGMT_EV_USER_CONFIRM_REQUEST,
113 MGMT_EV_USER_PASSKEY_REQUEST,
114 MGMT_EV_AUTH_FAILED,
115 MGMT_EV_DEVICE_FOUND,
116 MGMT_EV_DISCOVERING,
117 MGMT_EV_DEVICE_BLOCKED,
118 MGMT_EV_DEVICE_UNBLOCKED,
119 MGMT_EV_DEVICE_UNPAIRED,
120 MGMT_EV_PASSKEY_NOTIFY,
121 MGMT_EV_NEW_IRK,
122 MGMT_EV_NEW_CSRK,
123 MGMT_EV_DEVICE_ADDED,
124 MGMT_EV_DEVICE_REMOVED,
125 MGMT_EV_NEW_CONN_PARAM,
126 MGMT_EV_UNCONF_INDEX_ADDED,
127 MGMT_EV_UNCONF_INDEX_REMOVED,
128 MGMT_EV_NEW_CONFIG_OPTIONS,
131 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
133 struct pending_cmd {
134 struct list_head list;
135 u16 opcode;
136 int index;
137 void *param;
138 size_t param_len;
139 struct sock *sk;
140 void *user_data;
141 void (*cmd_complete)(struct pending_cmd *cmd, u8 status);
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
238 /* Time stamp */
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
244 return 0;
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
274 return err;
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
308 return err;
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
356 return err;
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
414 kfree(rp);
416 return err;
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
474 kfree(rp);
476 return err;
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
489 return true;
492 static __le32 get_missing_options(struct hci_dev *hdev)
494 u32 options = 0;
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
529 BT_DBG("sock %p %s", sk, hdev->name);
531 hci_dev_lock(hdev);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
551 static u32 get_supported_settings(struct hci_dev *hdev)
553 u32 settings = 0;
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_BONDABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_SECURE_CONN;
581 settings |= MGMT_SETTING_PRIVACY;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 hdev->set_bdaddr)
586 settings |= MGMT_SETTING_CONFIGURATION;
588 return settings;
591 static u32 get_current_settings(struct hci_dev *hdev)
593 u32 settings = 0;
595 if (hdev_is_powered(hdev))
596 settings |= MGMT_SETTING_POWERED;
598 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_CONNECTABLE;
601 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_FAST_CONNECTABLE;
604 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_DISCOVERABLE;
607 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BONDABLE;
610 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_BREDR;
613 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LE;
616 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
617 settings |= MGMT_SETTING_LINK_SECURITY;
619 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_SSP;
622 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
623 settings |= MGMT_SETTING_HS;
625 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
626 settings |= MGMT_SETTING_ADVERTISING;
628 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
629 settings |= MGMT_SETTING_SECURE_CONN;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
632 settings |= MGMT_SETTING_DEBUG_KEYS;
634 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
635 settings |= MGMT_SETTING_PRIVACY;
637 return settings;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
644 u8 *ptr = data, *uuids_start = NULL;
645 struct bt_uuid *uuid;
647 if (len < 4)
648 return ptr;
650 list_for_each_entry(uuid, &hdev->uuids, list) {
651 u16 uuid16;
653 if (uuid->size != 16)
654 continue;
656 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
657 if (uuid16 < 0x1100)
658 continue;
660 if (uuid16 == PNP_INFO_SVCLASS_ID)
661 continue;
663 if (!uuids_start) {
664 uuids_start = ptr;
665 uuids_start[0] = 1;
666 uuids_start[1] = EIR_UUID16_ALL;
667 ptr += 2;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr - data) + sizeof(u16) > len) {
672 uuids_start[1] = EIR_UUID16_SOME;
673 break;
676 *ptr++ = (uuid16 & 0x00ff);
677 *ptr++ = (uuid16 & 0xff00) >> 8;
678 uuids_start[0] += sizeof(uuid16);
681 return ptr;
684 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
686 u8 *ptr = data, *uuids_start = NULL;
687 struct bt_uuid *uuid;
689 if (len < 6)
690 return ptr;
692 list_for_each_entry(uuid, &hdev->uuids, list) {
693 if (uuid->size != 32)
694 continue;
696 if (!uuids_start) {
697 uuids_start = ptr;
698 uuids_start[0] = 1;
699 uuids_start[1] = EIR_UUID32_ALL;
700 ptr += 2;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr - data) + sizeof(u32) > len) {
705 uuids_start[1] = EIR_UUID32_SOME;
706 break;
709 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 ptr += sizeof(u32);
711 uuids_start[0] += sizeof(u32);
714 return ptr;
717 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
719 u8 *ptr = data, *uuids_start = NULL;
720 struct bt_uuid *uuid;
722 if (len < 18)
723 return ptr;
725 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 128)
727 continue;
729 if (!uuids_start) {
730 uuids_start = ptr;
731 uuids_start[0] = 1;
732 uuids_start[1] = EIR_UUID128_ALL;
733 ptr += 2;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr - data) + 16 > len) {
738 uuids_start[1] = EIR_UUID128_SOME;
739 break;
742 memcpy(ptr, uuid->uuid, 16);
743 ptr += 16;
744 uuids_start[0] += 16;
747 return ptr;
750 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
752 struct pending_cmd *cmd;
754 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
755 if (cmd->opcode == opcode)
756 return cmd;
759 return NULL;
762 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
763 struct hci_dev *hdev,
764 const void *data)
766 struct pending_cmd *cmd;
768 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
769 if (cmd->user_data != data)
770 continue;
771 if (cmd->opcode == opcode)
772 return cmd;
775 return NULL;
778 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
780 u8 ad_len = 0;
781 size_t name_len;
783 name_len = strlen(hdev->dev_name);
784 if (name_len > 0) {
785 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
787 if (name_len > max_len) {
788 name_len = max_len;
789 ptr[1] = EIR_NAME_SHORT;
790 } else
791 ptr[1] = EIR_NAME_COMPLETE;
793 ptr[0] = name_len + 1;
795 memcpy(ptr + 2, hdev->dev_name, name_len);
797 ad_len += (name_len + 2);
798 ptr += (name_len + 2);
801 return ad_len;
804 static void update_scan_rsp_data(struct hci_request *req)
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_le_set_scan_rsp_data cp;
808 u8 len;
810 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
811 return;
813 memset(&cp, 0, sizeof(cp));
815 len = create_scan_rsp_data(hdev, cp.data);
817 if (hdev->scan_rsp_data_len == len &&
818 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
819 return;
821 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
822 hdev->scan_rsp_data_len = len;
824 cp.length = len;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
829 static u8 get_adv_discov_flags(struct hci_dev *hdev)
831 struct pending_cmd *cmd;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 if (cmd) {
838 struct mgmt_mode *cp = cmd->param;
839 if (cp->val == 0x01)
840 return LE_AD_GENERAL;
841 else if (cp->val == 0x02)
842 return LE_AD_LIMITED;
843 } else {
844 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
845 return LE_AD_LIMITED;
846 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
847 return LE_AD_GENERAL;
850 return 0;
853 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
855 u8 ad_len = 0, flags = 0;
857 flags |= get_adv_discov_flags(hdev);
859 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
860 flags |= LE_AD_NO_BREDR;
862 if (flags) {
863 BT_DBG("adv flags 0x%02x", flags);
865 ptr[0] = 2;
866 ptr[1] = EIR_FLAGS;
867 ptr[2] = flags;
869 ad_len += 3;
870 ptr += 3;
873 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 ptr[0] = 2;
875 ptr[1] = EIR_TX_POWER;
876 ptr[2] = (u8) hdev->adv_tx_power;
878 ad_len += 3;
879 ptr += 3;
882 return ad_len;
885 static void update_adv_data(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
888 struct hci_cp_le_set_adv_data cp;
889 u8 len;
891 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
892 return;
894 memset(&cp, 0, sizeof(cp));
896 len = create_adv_data(hdev, cp.data);
898 if (hdev->adv_data_len == len &&
899 memcmp(cp.data, hdev->adv_data, len) == 0)
900 return;
902 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
903 hdev->adv_data_len = len;
905 cp.length = len;
907 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
910 int mgmt_update_adv_data(struct hci_dev *hdev)
912 struct hci_request req;
914 hci_req_init(&req, hdev);
915 update_adv_data(&req);
917 return hci_req_run(&req, NULL);
920 static void create_eir(struct hci_dev *hdev, u8 *data)
922 u8 *ptr = data;
923 size_t name_len;
925 name_len = strlen(hdev->dev_name);
927 if (name_len > 0) {
928 /* EIR Data type */
929 if (name_len > 48) {
930 name_len = 48;
931 ptr[1] = EIR_NAME_SHORT;
932 } else
933 ptr[1] = EIR_NAME_COMPLETE;
935 /* EIR Data length */
936 ptr[0] = name_len + 1;
938 memcpy(ptr + 2, hdev->dev_name, name_len);
940 ptr += (name_len + 2);
943 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
944 ptr[0] = 2;
945 ptr[1] = EIR_TX_POWER;
946 ptr[2] = (u8) hdev->inq_tx_power;
948 ptr += 3;
951 if (hdev->devid_source > 0) {
952 ptr[0] = 9;
953 ptr[1] = EIR_DEVICE_ID;
955 put_unaligned_le16(hdev->devid_source, ptr + 2);
956 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
957 put_unaligned_le16(hdev->devid_product, ptr + 6);
958 put_unaligned_le16(hdev->devid_version, ptr + 8);
960 ptr += 10;
963 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
968 static void update_eir(struct hci_request *req)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_cp_write_eir cp;
973 if (!hdev_is_powered(hdev))
974 return;
976 if (!lmp_ext_inq_capable(hdev))
977 return;
979 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
980 return;
982 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
983 return;
985 memset(&cp, 0, sizeof(cp));
987 create_eir(hdev, cp.data);
989 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
990 return;
992 memcpy(hdev->eir, cp.data, sizeof(cp.data));
994 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
997 static u8 get_service_classes(struct hci_dev *hdev)
999 struct bt_uuid *uuid;
1000 u8 val = 0;
1002 list_for_each_entry(uuid, &hdev->uuids, list)
1003 val |= uuid->svc_hint;
1005 return val;
1008 static void update_class(struct hci_request *req)
1010 struct hci_dev *hdev = req->hdev;
1011 u8 cod[3];
1013 BT_DBG("%s", hdev->name);
1015 if (!hdev_is_powered(hdev))
1016 return;
1018 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1019 return;
1021 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1022 return;
1024 cod[0] = hdev->minor_class;
1025 cod[1] = hdev->major_class;
1026 cod[2] = get_service_classes(hdev);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1029 cod[1] |= 0x20;
1031 if (memcmp(cod, hdev->dev_class, 3) == 0)
1032 return;
1034 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1037 static bool get_connectable(struct hci_dev *hdev)
1039 struct pending_cmd *cmd;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1045 if (cmd) {
1046 struct mgmt_mode *cp = cmd->param;
1047 return cp->val;
1050 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1053 static void disable_advertising(struct hci_request *req)
1055 u8 enable = 0x00;
1057 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1060 static void enable_advertising(struct hci_request *req)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_param cp;
1064 u8 own_addr_type, enable = 0x01;
1065 bool connectable;
1067 if (hci_conn_num(hdev, LE_LINK) > 0)
1068 return;
1070 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1071 disable_advertising(req);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 connectable = get_connectable(hdev);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1087 return;
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1093 cp.own_address_type = own_addr_type;
1094 cp.channel_map = hdev->le_adv_channel_map;
1096 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101 static void service_cache_off(struct work_struct *work)
1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 service_cache.work);
1105 struct hci_request req;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1108 return;
1110 hci_req_init(&req, hdev);
1112 hci_dev_lock(hdev);
1114 update_eir(&req);
1115 update_class(&req);
1117 hci_dev_unlock(hdev);
1119 hci_req_run(&req, NULL);
1122 static void rpa_expired(struct work_struct *work)
1124 struct hci_dev *hdev = container_of(work, struct hci_dev,
1125 rpa_expired.work);
1126 struct hci_request req;
1128 BT_DBG("");
1130 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1132 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1133 return;
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req, hdev);
1139 enable_advertising(&req);
1140 hci_req_run(&req, NULL);
1143 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1145 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1146 return;
1148 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1149 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1154 * it
1156 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1159 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1160 void *data, u16 data_len)
1162 struct mgmt_rp_read_info rp;
1164 BT_DBG("sock %p %s", sk, hdev->name);
1166 hci_dev_lock(hdev);
1168 memset(&rp, 0, sizeof(rp));
1170 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172 rp.version = hdev->hci_ver;
1173 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1176 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178 memcpy(rp.dev_class, hdev->dev_class, 3);
1180 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1181 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183 hci_dev_unlock(hdev);
1185 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 sizeof(rp));
1189 static void mgmt_pending_free(struct pending_cmd *cmd)
1191 sock_put(cmd->sk);
1192 kfree(cmd->param);
1193 kfree(cmd);
1196 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1197 struct hci_dev *hdev, void *data,
1198 u16 len)
1200 struct pending_cmd *cmd;
1202 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1203 if (!cmd)
1204 return NULL;
1206 cmd->opcode = opcode;
1207 cmd->index = hdev->id;
1209 cmd->param = kmemdup(data, len, GFP_KERNEL);
1210 if (!cmd->param) {
1211 kfree(cmd);
1212 return NULL;
1215 cmd->param_len = len;
1217 cmd->sk = sk;
1218 sock_hold(sk);
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1222 return cmd;
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1227 void *data),
1228 void *data)
1230 struct pending_cmd *cmd, *tmp;
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1234 continue;
1236 cb(cmd, data);
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1264 static bool hci_stop_discovery(struct hci_request *req)
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 } else {
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1279 return true;
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 NAME_PENDING);
1284 if (!e)
1285 break;
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 &cp);
1291 return true;
1293 default:
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1297 return true;
1300 break;
1303 return false;
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1313 hci_req_init(&req, hdev);
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1317 u8 scan = 0x00;
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1324 discov_stopped = hci_stop_discovery(&req);
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1330 switch (conn->state) {
1331 case BT_CONNECTED:
1332 case BT_CONFIG:
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 break;
1337 case BT_CONNECT:
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 0, NULL);
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 6, &conn->dst);
1344 break;
1345 case BT_CONNECT2:
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 sizeof(rej), &rej);
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 sizeof(rej), &rej);
1354 break;
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1362 return err;
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 u16 len)
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1370 int err;
1372 BT_DBG("request for %s", hdev->name);
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1378 hci_dev_lock(hdev);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 MGMT_STATUS_BUSY);
1383 goto failed;
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1389 if (cp->val) {
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 data, len);
1392 err = mgmt_powered(hdev, 1);
1393 goto failed;
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1408 if (cp->val) {
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1410 err = 0;
1411 } else {
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1414 if (!err)
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 err = 0;
1426 failed:
1427 hci_dev_unlock(hdev);
1428 return err;
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1433 __le32 ev;
1435 ev = cpu_to_le32(get_current_settings(hdev));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1440 int mgmt_new_settings(struct hci_dev *hdev)
1442 return new_settings(hdev, NULL);
1445 struct cmd_lookup {
1446 struct sock *sk;
1447 struct hci_dev *hdev;
1448 u8 mgmt_status;
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1453 struct cmd_lookup *match = data;
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1457 list_del(&cmd->list);
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1464 mgmt_pending_free(cmd);
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1469 u8 *status = data;
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1475 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1477 if (cmd->cmd_complete) {
1478 u8 *status = data;
1480 cmd->cmd_complete(cmd, *status);
1481 mgmt_pending_remove(cmd);
1483 return;
1486 cmd_status_rsp(cmd, data);
1489 static void generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1491 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1492 cmd->param_len);
1495 static void addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1497 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1498 sizeof(struct mgmt_addr_info));
1501 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1503 if (!lmp_bredr_capable(hdev))
1504 return MGMT_STATUS_NOT_SUPPORTED;
1505 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1506 return MGMT_STATUS_REJECTED;
1507 else
1508 return MGMT_STATUS_SUCCESS;
1511 static u8 mgmt_le_support(struct hci_dev *hdev)
1513 if (!lmp_le_capable(hdev))
1514 return MGMT_STATUS_NOT_SUPPORTED;
1515 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1516 return MGMT_STATUS_REJECTED;
1517 else
1518 return MGMT_STATUS_SUCCESS;
1521 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1523 struct pending_cmd *cmd;
1524 struct mgmt_mode *cp;
1525 struct hci_request req;
1526 bool changed;
1528 BT_DBG("status 0x%02x", status);
1530 hci_dev_lock(hdev);
1532 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1533 if (!cmd)
1534 goto unlock;
1536 if (status) {
1537 u8 mgmt_err = mgmt_status(status);
1538 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1539 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1540 goto remove_cmd;
1543 cp = cmd->param;
1544 if (cp->val) {
1545 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1546 &hdev->dev_flags);
1548 if (hdev->discov_timeout > 0) {
1549 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1550 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1551 to);
1553 } else {
1554 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1555 &hdev->dev_flags);
1558 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1560 if (changed)
1561 new_settings(hdev, cmd->sk);
1563 /* When the discoverable mode gets changed, make sure
1564 * that class of device has the limited discoverable
1565 * bit correctly set. Also update page scan based on whitelist
1566 * entries.
1568 hci_req_init(&req, hdev);
1569 hci_update_page_scan(hdev, &req);
1570 update_class(&req);
1571 hci_req_run(&req, NULL);
1573 remove_cmd:
1574 mgmt_pending_remove(cmd);
1576 unlock:
1577 hci_dev_unlock(hdev);
1580 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1581 u16 len)
1583 struct mgmt_cp_set_discoverable *cp = data;
1584 struct pending_cmd *cmd;
1585 struct hci_request req;
1586 u16 timeout;
1587 u8 scan;
1588 int err;
1590 BT_DBG("request for %s", hdev->name);
1592 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1593 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1597 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_INVALID_PARAMS);
1601 timeout = __le16_to_cpu(cp->timeout);
1603 /* Disabling discoverable requires that no timeout is set,
1604 * and enabling limited discoverable requires a timeout.
1606 if ((cp->val == 0x00 && timeout > 0) ||
1607 (cp->val == 0x02 && timeout == 0))
1608 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 MGMT_STATUS_INVALID_PARAMS);
1611 hci_dev_lock(hdev);
1613 if (!hdev_is_powered(hdev) && timeout > 0) {
1614 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1615 MGMT_STATUS_NOT_POWERED);
1616 goto failed;
1619 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1622 MGMT_STATUS_BUSY);
1623 goto failed;
1626 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1627 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1628 MGMT_STATUS_REJECTED);
1629 goto failed;
1632 if (!hdev_is_powered(hdev)) {
1633 bool changed = false;
1635 /* Setting limited discoverable when powered off is
1636 * not a valid operation since it requires a timeout
1637 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1640 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1641 changed = true;
1644 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1645 if (err < 0)
1646 goto failed;
1648 if (changed)
1649 err = new_settings(hdev, sk);
1651 goto failed;
1654 /* If the current mode is the same, then just update the timeout
1655 * value with the new value. And if only the timeout gets updated,
1656 * then no need for any HCI transactions.
1658 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1659 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1660 &hdev->dev_flags)) {
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1664 if (cp->val && hdev->discov_timeout > 0) {
1665 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1666 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1667 to);
1670 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1671 goto failed;
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1675 if (!cmd) {
1676 err = -ENOMEM;
1677 goto failed;
1680 /* Cancel any potential discoverable timeout that might be
1681 * still active and store new timeout value. The arming of
1682 * the timeout happens in the complete handler.
1684 cancel_delayed_work(&hdev->discov_off);
1685 hdev->discov_timeout = timeout;
1687 /* Limited discoverable mode */
1688 if (cp->val == 0x02)
1689 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1690 else
1691 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1693 hci_req_init(&req, hdev);
1695 /* The procedure for LE-only controllers is much simpler - just
1696 * update the advertising data.
1698 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1699 goto update_ad;
1701 scan = SCAN_PAGE;
1703 if (cp->val) {
1704 struct hci_cp_write_current_iac_lap hci_cp;
1706 if (cp->val == 0x02) {
1707 /* Limited discoverable mode */
1708 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1709 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1710 hci_cp.iac_lap[1] = 0x8b;
1711 hci_cp.iac_lap[2] = 0x9e;
1712 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1713 hci_cp.iac_lap[4] = 0x8b;
1714 hci_cp.iac_lap[5] = 0x9e;
1715 } else {
1716 /* General discoverable mode */
1717 hci_cp.num_iac = 1;
1718 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1719 hci_cp.iac_lap[1] = 0x8b;
1720 hci_cp.iac_lap[2] = 0x9e;
1723 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1724 (hci_cp.num_iac * 3) + 1, &hci_cp);
1726 scan |= SCAN_INQUIRY;
1727 } else {
1728 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1731 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1733 update_ad:
1734 update_adv_data(&req);
1736 err = hci_req_run(&req, set_discoverable_complete);
1737 if (err < 0)
1738 mgmt_pending_remove(cmd);
1740 failed:
1741 hci_dev_unlock(hdev);
1742 return err;
1745 static void write_fast_connectable(struct hci_request *req, bool enable)
1747 struct hci_dev *hdev = req->hdev;
1748 struct hci_cp_write_page_scan_activity acp;
1749 u8 type;
1751 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1752 return;
1754 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1755 return;
1757 if (enable) {
1758 type = PAGE_SCAN_TYPE_INTERLACED;
1760 /* 160 msec page scan interval */
1761 acp.interval = cpu_to_le16(0x0100);
1762 } else {
1763 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1765 /* default 1.28 sec page scan */
1766 acp.interval = cpu_to_le16(0x0800);
1769 acp.window = cpu_to_le16(0x0012);
1771 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1772 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1773 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1774 sizeof(acp), &acp);
1776 if (hdev->page_scan_type != type)
1777 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1780 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1782 struct pending_cmd *cmd;
1783 struct mgmt_mode *cp;
1784 bool conn_changed, discov_changed;
1786 BT_DBG("status 0x%02x", status);
1788 hci_dev_lock(hdev);
1790 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1791 if (!cmd)
1792 goto unlock;
1794 if (status) {
1795 u8 mgmt_err = mgmt_status(status);
1796 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1797 goto remove_cmd;
1800 cp = cmd->param;
1801 if (cp->val) {
1802 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1803 &hdev->dev_flags);
1804 discov_changed = false;
1805 } else {
1806 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1807 &hdev->dev_flags);
1808 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1809 &hdev->dev_flags);
1812 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1814 if (conn_changed || discov_changed) {
1815 new_settings(hdev, cmd->sk);
1816 hci_update_page_scan(hdev, NULL);
1817 if (discov_changed)
1818 mgmt_update_adv_data(hdev);
1819 hci_update_background_scan(hdev);
1822 remove_cmd:
1823 mgmt_pending_remove(cmd);
1825 unlock:
1826 hci_dev_unlock(hdev);
1829 static int set_connectable_update_settings(struct hci_dev *hdev,
1830 struct sock *sk, u8 val)
1832 bool changed = false;
1833 int err;
1835 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1836 changed = true;
1838 if (val) {
1839 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1840 } else {
1841 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1842 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1845 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1846 if (err < 0)
1847 return err;
1849 if (changed) {
1850 hci_update_page_scan(hdev, NULL);
1851 hci_update_background_scan(hdev);
1852 return new_settings(hdev, sk);
1855 return 0;
1858 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1859 u16 len)
1861 struct mgmt_mode *cp = data;
1862 struct pending_cmd *cmd;
1863 struct hci_request req;
1864 u8 scan;
1865 int err;
1867 BT_DBG("request for %s", hdev->name);
1869 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1870 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1871 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1872 MGMT_STATUS_REJECTED);
1874 if (cp->val != 0x00 && cp->val != 0x01)
1875 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1876 MGMT_STATUS_INVALID_PARAMS);
1878 hci_dev_lock(hdev);
1880 if (!hdev_is_powered(hdev)) {
1881 err = set_connectable_update_settings(hdev, sk, cp->val);
1882 goto failed;
1885 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1886 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1887 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1888 MGMT_STATUS_BUSY);
1889 goto failed;
1892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1893 if (!cmd) {
1894 err = -ENOMEM;
1895 goto failed;
1898 hci_req_init(&req, hdev);
1900 /* If BR/EDR is not enabled and we disable advertising as a
1901 * by-product of disabling connectable, we need to update the
1902 * advertising flags.
1904 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1905 if (!cp->val) {
1906 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1907 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1909 update_adv_data(&req);
1910 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1911 if (cp->val) {
1912 scan = SCAN_PAGE;
1913 } else {
1914 /* If we don't have any whitelist entries just
1915 * disable all scanning. If there are entries
1916 * and we had both page and inquiry scanning
1917 * enabled then fall back to only page scanning.
1918 * Otherwise no changes are needed.
1920 if (list_empty(&hdev->whitelist))
1921 scan = SCAN_DISABLED;
1922 else if (test_bit(HCI_ISCAN, &hdev->flags))
1923 scan = SCAN_PAGE;
1924 else
1925 goto no_scan_update;
1927 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1928 hdev->discov_timeout > 0)
1929 cancel_delayed_work(&hdev->discov_off);
1932 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1935 no_scan_update:
1936 /* If we're going from non-connectable to connectable or
1937 * vice-versa when fast connectable is enabled ensure that fast
1938 * connectable gets disabled. write_fast_connectable won't do
1939 * anything if the page scan parameters are already what they
1940 * should be.
1942 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1943 write_fast_connectable(&req, false);
1945 /* Update the advertising parameters if necessary */
1946 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1947 enable_advertising(&req);
1949 err = hci_req_run(&req, set_connectable_complete);
1950 if (err < 0) {
1951 mgmt_pending_remove(cmd);
1952 if (err == -ENODATA)
1953 err = set_connectable_update_settings(hdev, sk,
1954 cp->val);
1955 goto failed;
1958 failed:
1959 hci_dev_unlock(hdev);
1960 return err;
1963 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1964 u16 len)
1966 struct mgmt_mode *cp = data;
1967 bool changed;
1968 int err;
1970 BT_DBG("request for %s", hdev->name);
1972 if (cp->val != 0x00 && cp->val != 0x01)
1973 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1974 MGMT_STATUS_INVALID_PARAMS);
1976 hci_dev_lock(hdev);
1978 if (cp->val)
1979 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1980 else
1981 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1983 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1984 if (err < 0)
1985 goto unlock;
1987 if (changed)
1988 err = new_settings(hdev, sk);
1990 unlock:
1991 hci_dev_unlock(hdev);
1992 return err;
1995 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1996 u16 len)
1998 struct mgmt_mode *cp = data;
1999 struct pending_cmd *cmd;
2000 u8 val, status;
2001 int err;
2003 BT_DBG("request for %s", hdev->name);
2005 status = mgmt_bredr_support(hdev);
2006 if (status)
2007 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2008 status);
2010 if (cp->val != 0x00 && cp->val != 0x01)
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2012 MGMT_STATUS_INVALID_PARAMS);
2014 hci_dev_lock(hdev);
2016 if (!hdev_is_powered(hdev)) {
2017 bool changed = false;
2019 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2020 &hdev->dev_flags)) {
2021 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2022 changed = true;
2025 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2026 if (err < 0)
2027 goto failed;
2029 if (changed)
2030 err = new_settings(hdev, sk);
2032 goto failed;
2035 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2036 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2037 MGMT_STATUS_BUSY);
2038 goto failed;
2041 val = !!cp->val;
2043 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2044 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2045 goto failed;
2048 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2049 if (!cmd) {
2050 err = -ENOMEM;
2051 goto failed;
2054 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2055 if (err < 0) {
2056 mgmt_pending_remove(cmd);
2057 goto failed;
2060 failed:
2061 hci_dev_unlock(hdev);
2062 return err;
2065 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2068 struct pending_cmd *cmd;
2069 u8 status;
2070 int err;
2072 BT_DBG("request for %s", hdev->name);
2074 status = mgmt_bredr_support(hdev);
2075 if (status)
2076 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (cp->val != 0x00 && cp->val != 0x01)
2083 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 MGMT_STATUS_INVALID_PARAMS);
2086 hci_dev_lock(hdev);
2088 if (!hdev_is_powered(hdev)) {
2089 bool changed;
2091 if (cp->val) {
2092 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2093 &hdev->dev_flags);
2094 } else {
2095 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2096 &hdev->dev_flags);
2097 if (!changed)
2098 changed = test_and_clear_bit(HCI_HS_ENABLED,
2099 &hdev->dev_flags);
2100 else
2101 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2104 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2105 if (err < 0)
2106 goto failed;
2108 if (changed)
2109 err = new_settings(hdev, sk);
2111 goto failed;
2114 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2115 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2116 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2117 MGMT_STATUS_BUSY);
2118 goto failed;
2121 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2122 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2123 goto failed;
2126 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2127 if (!cmd) {
2128 err = -ENOMEM;
2129 goto failed;
2132 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2133 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2134 sizeof(cp->val), &cp->val);
2136 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2137 if (err < 0) {
2138 mgmt_pending_remove(cmd);
2139 goto failed;
2142 failed:
2143 hci_dev_unlock(hdev);
2144 return err;
2147 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2149 struct mgmt_mode *cp = data;
2150 bool changed;
2151 u8 status;
2152 int err;
2154 BT_DBG("request for %s", hdev->name);
2156 status = mgmt_bredr_support(hdev);
2157 if (status)
2158 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2160 if (!lmp_ssp_capable(hdev))
2161 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2162 MGMT_STATUS_NOT_SUPPORTED);
2164 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2165 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2166 MGMT_STATUS_REJECTED);
2168 if (cp->val != 0x00 && cp->val != 0x01)
2169 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2170 MGMT_STATUS_INVALID_PARAMS);
2172 hci_dev_lock(hdev);
2174 if (cp->val) {
2175 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2176 } else {
2177 if (hdev_is_powered(hdev)) {
2178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2179 MGMT_STATUS_REJECTED);
2180 goto unlock;
2183 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2186 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2187 if (err < 0)
2188 goto unlock;
2190 if (changed)
2191 err = new_settings(hdev, sk);
2193 unlock:
2194 hci_dev_unlock(hdev);
2195 return err;
2198 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2200 struct cmd_lookup match = { NULL, hdev };
2202 if (status) {
2203 u8 mgmt_err = mgmt_status(status);
2205 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2206 &mgmt_err);
2207 return;
2210 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2212 new_settings(hdev, match.sk);
2214 if (match.sk)
2215 sock_put(match.sk);
2217 /* Make sure the controller has a good default for
2218 * advertising data. Restrict the update to when LE
2219 * has actually been enabled. During power on, the
2220 * update in powered_update_hci will take care of it.
2222 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2223 struct hci_request req;
2225 hci_dev_lock(hdev);
2227 hci_req_init(&req, hdev);
2228 update_adv_data(&req);
2229 update_scan_rsp_data(&req);
2230 hci_req_run(&req, NULL);
2232 hci_update_background_scan(hdev);
2234 hci_dev_unlock(hdev);
2238 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2240 struct mgmt_mode *cp = data;
2241 struct hci_cp_write_le_host_supported hci_cp;
2242 struct pending_cmd *cmd;
2243 struct hci_request req;
2244 int err;
2245 u8 val, enabled;
2247 BT_DBG("request for %s", hdev->name);
2249 if (!lmp_le_capable(hdev))
2250 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2251 MGMT_STATUS_NOT_SUPPORTED);
2253 if (cp->val != 0x00 && cp->val != 0x01)
2254 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2255 MGMT_STATUS_INVALID_PARAMS);
2257 /* LE-only devices do not allow toggling LE on/off */
2258 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2259 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2260 MGMT_STATUS_REJECTED);
2262 hci_dev_lock(hdev);
2264 val = !!cp->val;
2265 enabled = lmp_host_le_capable(hdev);
2267 if (!hdev_is_powered(hdev) || val == enabled) {
2268 bool changed = false;
2270 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2271 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2272 changed = true;
2275 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2276 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2277 changed = true;
2280 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2281 if (err < 0)
2282 goto unlock;
2284 if (changed)
2285 err = new_settings(hdev, sk);
2287 goto unlock;
2290 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2291 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2292 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2293 MGMT_STATUS_BUSY);
2294 goto unlock;
2297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2298 if (!cmd) {
2299 err = -ENOMEM;
2300 goto unlock;
2303 hci_req_init(&req, hdev);
2305 memset(&hci_cp, 0, sizeof(hci_cp));
2307 if (val) {
2308 hci_cp.le = val;
2309 hci_cp.simul = 0x00;
2310 } else {
2311 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2312 disable_advertising(&req);
2315 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2316 &hci_cp);
2318 err = hci_req_run(&req, le_enable_complete);
2319 if (err < 0)
2320 mgmt_pending_remove(cmd);
2322 unlock:
2323 hci_dev_unlock(hdev);
2324 return err;
2327 /* This is a helper function to test for pending mgmt commands that can
2328 * cause CoD or EIR HCI commands. We can only allow one such pending
2329 * mgmt command at a time since otherwise we cannot easily track what
2330 * the current values are, will be, and based on that calculate if a new
2331 * HCI command needs to be sent and if yes with what value.
2333 static bool pending_eir_or_class(struct hci_dev *hdev)
2335 struct pending_cmd *cmd;
2337 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2338 switch (cmd->opcode) {
2339 case MGMT_OP_ADD_UUID:
2340 case MGMT_OP_REMOVE_UUID:
2341 case MGMT_OP_SET_DEV_CLASS:
2342 case MGMT_OP_SET_POWERED:
2343 return true;
2347 return false;
2350 static const u8 bluetooth_base_uuid[] = {
2351 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2352 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2355 static u8 get_uuid_size(const u8 *uuid)
2357 u32 val;
2359 if (memcmp(uuid, bluetooth_base_uuid, 12))
2360 return 128;
2362 val = get_unaligned_le32(&uuid[12]);
2363 if (val > 0xffff)
2364 return 32;
2366 return 16;
2369 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2371 struct pending_cmd *cmd;
2373 hci_dev_lock(hdev);
2375 cmd = mgmt_pending_find(mgmt_op, hdev);
2376 if (!cmd)
2377 goto unlock;
2379 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2380 hdev->dev_class, 3);
2382 mgmt_pending_remove(cmd);
2384 unlock:
2385 hci_dev_unlock(hdev);
2388 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2390 BT_DBG("status 0x%02x", status);
2392 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2395 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2397 struct mgmt_cp_add_uuid *cp = data;
2398 struct pending_cmd *cmd;
2399 struct hci_request req;
2400 struct bt_uuid *uuid;
2401 int err;
2403 BT_DBG("request for %s", hdev->name);
2405 hci_dev_lock(hdev);
2407 if (pending_eir_or_class(hdev)) {
2408 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2409 MGMT_STATUS_BUSY);
2410 goto failed;
2413 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2414 if (!uuid) {
2415 err = -ENOMEM;
2416 goto failed;
2419 memcpy(uuid->uuid, cp->uuid, 16);
2420 uuid->svc_hint = cp->svc_hint;
2421 uuid->size = get_uuid_size(cp->uuid);
2423 list_add_tail(&uuid->list, &hdev->uuids);
2425 hci_req_init(&req, hdev);
2427 update_class(&req);
2428 update_eir(&req);
2430 err = hci_req_run(&req, add_uuid_complete);
2431 if (err < 0) {
2432 if (err != -ENODATA)
2433 goto failed;
2435 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2436 hdev->dev_class, 3);
2437 goto failed;
2440 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2441 if (!cmd) {
2442 err = -ENOMEM;
2443 goto failed;
2446 err = 0;
2448 failed:
2449 hci_dev_unlock(hdev);
2450 return err;
2453 static bool enable_service_cache(struct hci_dev *hdev)
2455 if (!hdev_is_powered(hdev))
2456 return false;
2458 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2459 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2460 CACHE_TIMEOUT);
2461 return true;
2464 return false;
2467 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2469 BT_DBG("status 0x%02x", status);
2471 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2474 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2475 u16 len)
2477 struct mgmt_cp_remove_uuid *cp = data;
2478 struct pending_cmd *cmd;
2479 struct bt_uuid *match, *tmp;
2480 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2481 struct hci_request req;
2482 int err, found;
2484 BT_DBG("request for %s", hdev->name);
2486 hci_dev_lock(hdev);
2488 if (pending_eir_or_class(hdev)) {
2489 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2490 MGMT_STATUS_BUSY);
2491 goto unlock;
2494 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2495 hci_uuids_clear(hdev);
2497 if (enable_service_cache(hdev)) {
2498 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2499 0, hdev->dev_class, 3);
2500 goto unlock;
2503 goto update_class;
2506 found = 0;
2508 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2509 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2510 continue;
2512 list_del(&match->list);
2513 kfree(match);
2514 found++;
2517 if (found == 0) {
2518 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2519 MGMT_STATUS_INVALID_PARAMS);
2520 goto unlock;
2523 update_class:
2524 hci_req_init(&req, hdev);
2526 update_class(&req);
2527 update_eir(&req);
2529 err = hci_req_run(&req, remove_uuid_complete);
2530 if (err < 0) {
2531 if (err != -ENODATA)
2532 goto unlock;
2534 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2535 hdev->dev_class, 3);
2536 goto unlock;
2539 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2540 if (!cmd) {
2541 err = -ENOMEM;
2542 goto unlock;
2545 err = 0;
2547 unlock:
2548 hci_dev_unlock(hdev);
2549 return err;
2552 static void set_class_complete(struct hci_dev *hdev, u8 status)
2554 BT_DBG("status 0x%02x", status);
2556 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2559 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2560 u16 len)
2562 struct mgmt_cp_set_dev_class *cp = data;
2563 struct pending_cmd *cmd;
2564 struct hci_request req;
2565 int err;
2567 BT_DBG("request for %s", hdev->name);
2569 if (!lmp_bredr_capable(hdev))
2570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2571 MGMT_STATUS_NOT_SUPPORTED);
2573 hci_dev_lock(hdev);
2575 if (pending_eir_or_class(hdev)) {
2576 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2577 MGMT_STATUS_BUSY);
2578 goto unlock;
2581 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2582 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2583 MGMT_STATUS_INVALID_PARAMS);
2584 goto unlock;
2587 hdev->major_class = cp->major;
2588 hdev->minor_class = cp->minor;
2590 if (!hdev_is_powered(hdev)) {
2591 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2592 hdev->dev_class, 3);
2593 goto unlock;
2596 hci_req_init(&req, hdev);
2598 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2599 hci_dev_unlock(hdev);
2600 cancel_delayed_work_sync(&hdev->service_cache);
2601 hci_dev_lock(hdev);
2602 update_eir(&req);
2605 update_class(&req);
2607 err = hci_req_run(&req, set_class_complete);
2608 if (err < 0) {
2609 if (err != -ENODATA)
2610 goto unlock;
2612 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2613 hdev->dev_class, 3);
2614 goto unlock;
2617 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2618 if (!cmd) {
2619 err = -ENOMEM;
2620 goto unlock;
2623 err = 0;
2625 unlock:
2626 hci_dev_unlock(hdev);
2627 return err;
2630 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2631 u16 len)
2633 struct mgmt_cp_load_link_keys *cp = data;
2634 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2635 sizeof(struct mgmt_link_key_info));
2636 u16 key_count, expected_len;
2637 bool changed;
2638 int i;
2640 BT_DBG("request for %s", hdev->name);
2642 if (!lmp_bredr_capable(hdev))
2643 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2644 MGMT_STATUS_NOT_SUPPORTED);
2646 key_count = __le16_to_cpu(cp->key_count);
2647 if (key_count > max_key_count) {
2648 BT_ERR("load_link_keys: too big key_count value %u",
2649 key_count);
2650 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2651 MGMT_STATUS_INVALID_PARAMS);
2654 expected_len = sizeof(*cp) + key_count *
2655 sizeof(struct mgmt_link_key_info);
2656 if (expected_len != len) {
2657 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2658 expected_len, len);
2659 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2660 MGMT_STATUS_INVALID_PARAMS);
2663 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2664 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2665 MGMT_STATUS_INVALID_PARAMS);
2667 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2668 key_count);
2670 for (i = 0; i < key_count; i++) {
2671 struct mgmt_link_key_info *key = &cp->keys[i];
2673 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2674 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2675 MGMT_STATUS_INVALID_PARAMS);
2678 hci_dev_lock(hdev);
2680 hci_link_keys_clear(hdev);
2682 if (cp->debug_keys)
2683 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2684 &hdev->dev_flags);
2685 else
2686 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2687 &hdev->dev_flags);
2689 if (changed)
2690 new_settings(hdev, NULL);
2692 for (i = 0; i < key_count; i++) {
2693 struct mgmt_link_key_info *key = &cp->keys[i];
2695 /* Always ignore debug keys and require a new pairing if
2696 * the user wants to use them.
2698 if (key->type == HCI_LK_DEBUG_COMBINATION)
2699 continue;
2701 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2702 key->type, key->pin_len, NULL);
2705 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2707 hci_dev_unlock(hdev);
2709 return 0;
2712 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2713 u8 addr_type, struct sock *skip_sk)
2715 struct mgmt_ev_device_unpaired ev;
2717 bacpy(&ev.addr.bdaddr, bdaddr);
2718 ev.addr.type = addr_type;
2720 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2721 skip_sk);
2724 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2725 u16 len)
2727 struct mgmt_cp_unpair_device *cp = data;
2728 struct mgmt_rp_unpair_device rp;
2729 struct hci_cp_disconnect dc;
2730 struct pending_cmd *cmd;
2731 struct hci_conn *conn;
2732 int err;
2734 memset(&rp, 0, sizeof(rp));
2735 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2736 rp.addr.type = cp->addr.type;
2738 if (!bdaddr_type_is_valid(cp->addr.type))
2739 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2740 MGMT_STATUS_INVALID_PARAMS,
2741 &rp, sizeof(rp));
2743 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2744 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 MGMT_STATUS_INVALID_PARAMS,
2746 &rp, sizeof(rp));
2748 hci_dev_lock(hdev);
2750 if (!hdev_is_powered(hdev)) {
2751 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2752 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2753 goto unlock;
2756 if (cp->addr.type == BDADDR_BREDR) {
2757 /* If disconnection is requested, then look up the
2758 * connection. If the remote device is connected, it
2759 * will be later used to terminate the link.
2761 * Setting it to NULL explicitly will cause no
2762 * termination of the link.
2764 if (cp->disconnect)
2765 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2766 &cp->addr.bdaddr);
2767 else
2768 conn = NULL;
2770 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2771 } else {
2772 u8 addr_type;
2774 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2775 &cp->addr.bdaddr);
2776 if (conn) {
2777 /* Defer clearing up the connection parameters
2778 * until closing to give a chance of keeping
2779 * them if a repairing happens.
2781 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2783 /* If disconnection is not requested, then
2784 * clear the connection variable so that the
2785 * link is not terminated.
2787 if (!cp->disconnect)
2788 conn = NULL;
2791 if (cp->addr.type == BDADDR_LE_PUBLIC)
2792 addr_type = ADDR_LE_DEV_PUBLIC;
2793 else
2794 addr_type = ADDR_LE_DEV_RANDOM;
2796 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2798 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2801 if (err < 0) {
2802 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2803 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2804 goto unlock;
2807 /* If the connection variable is set, then termination of the
2808 * link is requested.
2810 if (!conn) {
2811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2812 &rp, sizeof(rp));
2813 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2814 goto unlock;
2817 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2818 sizeof(*cp));
2819 if (!cmd) {
2820 err = -ENOMEM;
2821 goto unlock;
2824 cmd->cmd_complete = addr_cmd_complete;
2826 dc.handle = cpu_to_le16(conn->handle);
2827 dc.reason = 0x13; /* Remote User Terminated Connection */
2828 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2829 if (err < 0)
2830 mgmt_pending_remove(cmd);
2832 unlock:
2833 hci_dev_unlock(hdev);
2834 return err;
2837 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2838 u16 len)
2840 struct mgmt_cp_disconnect *cp = data;
2841 struct mgmt_rp_disconnect rp;
2842 struct pending_cmd *cmd;
2843 struct hci_conn *conn;
2844 int err;
2846 BT_DBG("");
2848 memset(&rp, 0, sizeof(rp));
2849 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2850 rp.addr.type = cp->addr.type;
2852 if (!bdaddr_type_is_valid(cp->addr.type))
2853 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2854 MGMT_STATUS_INVALID_PARAMS,
2855 &rp, sizeof(rp));
2857 hci_dev_lock(hdev);
2859 if (!test_bit(HCI_UP, &hdev->flags)) {
2860 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2861 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2862 goto failed;
2865 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2866 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2867 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2868 goto failed;
2871 if (cp->addr.type == BDADDR_BREDR)
2872 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2873 &cp->addr.bdaddr);
2874 else
2875 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2877 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2878 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2879 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2880 goto failed;
2883 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2884 if (!cmd) {
2885 err = -ENOMEM;
2886 goto failed;
2889 cmd->cmd_complete = generic_cmd_complete;
2891 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2892 if (err < 0)
2893 mgmt_pending_remove(cmd);
2895 failed:
2896 hci_dev_unlock(hdev);
2897 return err;
2900 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2902 switch (link_type) {
2903 case LE_LINK:
2904 switch (addr_type) {
2905 case ADDR_LE_DEV_PUBLIC:
2906 return BDADDR_LE_PUBLIC;
2908 default:
2909 /* Fallback to LE Random address type */
2910 return BDADDR_LE_RANDOM;
2913 default:
2914 /* Fallback to BR/EDR type */
2915 return BDADDR_BREDR;
2919 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2920 u16 data_len)
2922 struct mgmt_rp_get_connections *rp;
2923 struct hci_conn *c;
2924 size_t rp_len;
2925 int err;
2926 u16 i;
2928 BT_DBG("");
2930 hci_dev_lock(hdev);
2932 if (!hdev_is_powered(hdev)) {
2933 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2934 MGMT_STATUS_NOT_POWERED);
2935 goto unlock;
2938 i = 0;
2939 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2940 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2941 i++;
2944 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2945 rp = kmalloc(rp_len, GFP_KERNEL);
2946 if (!rp) {
2947 err = -ENOMEM;
2948 goto unlock;
2951 i = 0;
2952 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2953 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2954 continue;
2955 bacpy(&rp->addr[i].bdaddr, &c->dst);
2956 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2957 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2958 continue;
2959 i++;
2962 rp->conn_count = cpu_to_le16(i);
2964 /* Recalculate length in case of filtered SCO connections, etc */
2965 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2967 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2968 rp_len);
2970 kfree(rp);
2972 unlock:
2973 hci_dev_unlock(hdev);
2974 return err;
2977 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2978 struct mgmt_cp_pin_code_neg_reply *cp)
2980 struct pending_cmd *cmd;
2981 int err;
2983 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2984 sizeof(*cp));
2985 if (!cmd)
2986 return -ENOMEM;
2988 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2989 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2990 if (err < 0)
2991 mgmt_pending_remove(cmd);
2993 return err;
2996 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2997 u16 len)
2999 struct hci_conn *conn;
3000 struct mgmt_cp_pin_code_reply *cp = data;
3001 struct hci_cp_pin_code_reply reply;
3002 struct pending_cmd *cmd;
3003 int err;
3005 BT_DBG("");
3007 hci_dev_lock(hdev);
3009 if (!hdev_is_powered(hdev)) {
3010 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3011 MGMT_STATUS_NOT_POWERED);
3012 goto failed;
3015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3016 if (!conn) {
3017 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3018 MGMT_STATUS_NOT_CONNECTED);
3019 goto failed;
3022 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3023 struct mgmt_cp_pin_code_neg_reply ncp;
3025 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3027 BT_ERR("PIN code is not 16 bytes long");
3029 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3030 if (err >= 0)
3031 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3032 MGMT_STATUS_INVALID_PARAMS);
3034 goto failed;
3037 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3038 if (!cmd) {
3039 err = -ENOMEM;
3040 goto failed;
3043 cmd->cmd_complete = addr_cmd_complete;
3045 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3046 reply.pin_len = cp->pin_len;
3047 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3049 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3050 if (err < 0)
3051 mgmt_pending_remove(cmd);
3053 failed:
3054 hci_dev_unlock(hdev);
3055 return err;
3058 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3059 u16 len)
3061 struct mgmt_cp_set_io_capability *cp = data;
3063 BT_DBG("");
3065 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3066 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3067 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3069 hci_dev_lock(hdev);
3071 hdev->io_capability = cp->io_capability;
3073 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3074 hdev->io_capability);
3076 hci_dev_unlock(hdev);
3078 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3082 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3084 struct hci_dev *hdev = conn->hdev;
3085 struct pending_cmd *cmd;
3087 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3088 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3089 continue;
3091 if (cmd->user_data != conn)
3092 continue;
3094 return cmd;
3097 return NULL;
3100 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3102 struct mgmt_rp_pair_device rp;
3103 struct hci_conn *conn = cmd->user_data;
3105 bacpy(&rp.addr.bdaddr, &conn->dst);
3106 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3108 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3109 &rp, sizeof(rp));
3111 /* So we don't get further callbacks for this connection */
3112 conn->connect_cfm_cb = NULL;
3113 conn->security_cfm_cb = NULL;
3114 conn->disconn_cfm_cb = NULL;
3116 hci_conn_drop(conn);
3117 hci_conn_put(conn);
3119 mgmt_pending_remove(cmd);
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3127 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3129 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3130 struct pending_cmd *cmd;
3132 cmd = find_pairing(conn);
3133 if (cmd)
3134 cmd->cmd_complete(cmd, status);
3137 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3139 struct pending_cmd *cmd;
3141 BT_DBG("status %u", status);
3143 cmd = find_pairing(conn);
3144 if (!cmd)
3145 BT_DBG("Unable to find a pending command");
3146 else
3147 cmd->cmd_complete(cmd, mgmt_status(status));
3150 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3152 struct pending_cmd *cmd;
3154 BT_DBG("status %u", status);
3156 if (!status)
3157 return;
3159 cmd = find_pairing(conn);
3160 if (!cmd)
3161 BT_DBG("Unable to find a pending command");
3162 else
3163 cmd->cmd_complete(cmd, mgmt_status(status));
3166 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3167 u16 len)
3169 struct mgmt_cp_pair_device *cp = data;
3170 struct mgmt_rp_pair_device rp;
3171 struct pending_cmd *cmd;
3172 u8 sec_level, auth_type;
3173 struct hci_conn *conn;
3174 int err;
3176 BT_DBG("");
3178 memset(&rp, 0, sizeof(rp));
3179 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3180 rp.addr.type = cp->addr.type;
3182 if (!bdaddr_type_is_valid(cp->addr.type))
3183 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3184 MGMT_STATUS_INVALID_PARAMS,
3185 &rp, sizeof(rp));
3187 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3188 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3189 MGMT_STATUS_INVALID_PARAMS,
3190 &rp, sizeof(rp));
3192 hci_dev_lock(hdev);
3194 if (!hdev_is_powered(hdev)) {
3195 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3196 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3197 goto unlock;
3200 sec_level = BT_SECURITY_MEDIUM;
3201 auth_type = HCI_AT_DEDICATED_BONDING;
3203 if (cp->addr.type == BDADDR_BREDR) {
3204 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3205 auth_type);
3206 } else {
3207 u8 addr_type;
3209 /* Convert from L2CAP channel address type to HCI address type
3211 if (cp->addr.type == BDADDR_LE_PUBLIC)
3212 addr_type = ADDR_LE_DEV_PUBLIC;
3213 else
3214 addr_type = ADDR_LE_DEV_RANDOM;
3216 /* When pairing a new device, it is expected to remember
3217 * this device for future connections. Adding the connection
3218 * parameter information ahead of time allows tracking
3219 * of the slave preferred values and will speed up any
3220 * further connection establishment.
3222 * If connection parameters already exist, then they
3223 * will be kept and this function does nothing.
3225 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3227 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3228 sec_level, HCI_LE_CONN_TIMEOUT,
3229 HCI_ROLE_MASTER);
3232 if (IS_ERR(conn)) {
3233 int status;
3235 if (PTR_ERR(conn) == -EBUSY)
3236 status = MGMT_STATUS_BUSY;
3237 else
3238 status = MGMT_STATUS_CONNECT_FAILED;
3240 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3241 status, &rp,
3242 sizeof(rp));
3243 goto unlock;
3246 if (conn->connect_cfm_cb) {
3247 hci_conn_drop(conn);
3248 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3249 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3250 goto unlock;
3253 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3254 if (!cmd) {
3255 err = -ENOMEM;
3256 hci_conn_drop(conn);
3257 goto unlock;
3260 cmd->cmd_complete = pairing_complete;
3262 /* For LE, just connecting isn't a proof that the pairing finished */
3263 if (cp->addr.type == BDADDR_BREDR) {
3264 conn->connect_cfm_cb = pairing_complete_cb;
3265 conn->security_cfm_cb = pairing_complete_cb;
3266 conn->disconn_cfm_cb = pairing_complete_cb;
3267 } else {
3268 conn->connect_cfm_cb = le_pairing_complete_cb;
3269 conn->security_cfm_cb = le_pairing_complete_cb;
3270 conn->disconn_cfm_cb = le_pairing_complete_cb;
3273 conn->io_capability = cp->io_cap;
3274 cmd->user_data = hci_conn_get(conn);
3276 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3277 hci_conn_security(conn, sec_level, auth_type, true))
3278 pairing_complete(cmd, 0);
3280 err = 0;
3282 unlock:
3283 hci_dev_unlock(hdev);
3284 return err;
3287 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3288 u16 len)
3290 struct mgmt_addr_info *addr = data;
3291 struct pending_cmd *cmd;
3292 struct hci_conn *conn;
3293 int err;
3295 BT_DBG("");
3297 hci_dev_lock(hdev);
3299 if (!hdev_is_powered(hdev)) {
3300 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3301 MGMT_STATUS_NOT_POWERED);
3302 goto unlock;
3305 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3306 if (!cmd) {
3307 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3308 MGMT_STATUS_INVALID_PARAMS);
3309 goto unlock;
3312 conn = cmd->user_data;
3314 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3315 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3316 MGMT_STATUS_INVALID_PARAMS);
3317 goto unlock;
3320 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3322 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3323 addr, sizeof(*addr));
3324 unlock:
3325 hci_dev_unlock(hdev);
3326 return err;
3329 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3330 struct mgmt_addr_info *addr, u16 mgmt_op,
3331 u16 hci_op, __le32 passkey)
3333 struct pending_cmd *cmd;
3334 struct hci_conn *conn;
3335 int err;
3337 hci_dev_lock(hdev);
3339 if (!hdev_is_powered(hdev)) {
3340 err = cmd_complete(sk, hdev->id, mgmt_op,
3341 MGMT_STATUS_NOT_POWERED, addr,
3342 sizeof(*addr));
3343 goto done;
3346 if (addr->type == BDADDR_BREDR)
3347 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3348 else
3349 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3351 if (!conn) {
3352 err = cmd_complete(sk, hdev->id, mgmt_op,
3353 MGMT_STATUS_NOT_CONNECTED, addr,
3354 sizeof(*addr));
3355 goto done;
3358 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3359 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3360 if (!err)
3361 err = cmd_complete(sk, hdev->id, mgmt_op,
3362 MGMT_STATUS_SUCCESS, addr,
3363 sizeof(*addr));
3364 else
3365 err = cmd_complete(sk, hdev->id, mgmt_op,
3366 MGMT_STATUS_FAILED, addr,
3367 sizeof(*addr));
3369 goto done;
3372 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3373 if (!cmd) {
3374 err = -ENOMEM;
3375 goto done;
3378 cmd->cmd_complete = addr_cmd_complete;
3380 /* Continue with pairing via HCI */
3381 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3382 struct hci_cp_user_passkey_reply cp;
3384 bacpy(&cp.bdaddr, &addr->bdaddr);
3385 cp.passkey = passkey;
3386 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3387 } else
3388 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3389 &addr->bdaddr);
3391 if (err < 0)
3392 mgmt_pending_remove(cmd);
3394 done:
3395 hci_dev_unlock(hdev);
3396 return err;
3399 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3400 void *data, u16 len)
3402 struct mgmt_cp_pin_code_neg_reply *cp = data;
3404 BT_DBG("");
3406 return user_pairing_resp(sk, hdev, &cp->addr,
3407 MGMT_OP_PIN_CODE_NEG_REPLY,
3408 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3411 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3412 u16 len)
3414 struct mgmt_cp_user_confirm_reply *cp = data;
3416 BT_DBG("");
3418 if (len != sizeof(*cp))
3419 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3420 MGMT_STATUS_INVALID_PARAMS);
3422 return user_pairing_resp(sk, hdev, &cp->addr,
3423 MGMT_OP_USER_CONFIRM_REPLY,
3424 HCI_OP_USER_CONFIRM_REPLY, 0);
3427 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3428 void *data, u16 len)
3430 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3432 BT_DBG("");
3434 return user_pairing_resp(sk, hdev, &cp->addr,
3435 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3436 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3439 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3440 u16 len)
3442 struct mgmt_cp_user_passkey_reply *cp = data;
3444 BT_DBG("");
3446 return user_pairing_resp(sk, hdev, &cp->addr,
3447 MGMT_OP_USER_PASSKEY_REPLY,
3448 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3451 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3452 void *data, u16 len)
3454 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3456 BT_DBG("");
3458 return user_pairing_resp(sk, hdev, &cp->addr,
3459 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3460 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3463 static void update_name(struct hci_request *req)
3465 struct hci_dev *hdev = req->hdev;
3466 struct hci_cp_write_local_name cp;
3468 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3470 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3473 static void set_name_complete(struct hci_dev *hdev, u8 status)
3475 struct mgmt_cp_set_local_name *cp;
3476 struct pending_cmd *cmd;
3478 BT_DBG("status 0x%02x", status);
3480 hci_dev_lock(hdev);
3482 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3483 if (!cmd)
3484 goto unlock;
3486 cp = cmd->param;
3488 if (status)
3489 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3490 mgmt_status(status));
3491 else
3492 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3493 cp, sizeof(*cp));
3495 mgmt_pending_remove(cmd);
3497 unlock:
3498 hci_dev_unlock(hdev);
3501 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3502 u16 len)
3504 struct mgmt_cp_set_local_name *cp = data;
3505 struct pending_cmd *cmd;
3506 struct hci_request req;
3507 int err;
3509 BT_DBG("");
3511 hci_dev_lock(hdev);
3513 /* If the old values are the same as the new ones just return a
3514 * direct command complete event.
3516 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3517 !memcmp(hdev->short_name, cp->short_name,
3518 sizeof(hdev->short_name))) {
3519 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3520 data, len);
3521 goto failed;
3524 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3526 if (!hdev_is_powered(hdev)) {
3527 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3529 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3530 data, len);
3531 if (err < 0)
3532 goto failed;
3534 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3535 sk);
3537 goto failed;
3540 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3541 if (!cmd) {
3542 err = -ENOMEM;
3543 goto failed;
3546 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3548 hci_req_init(&req, hdev);
3550 if (lmp_bredr_capable(hdev)) {
3551 update_name(&req);
3552 update_eir(&req);
3555 /* The name is stored in the scan response data and so
3556 * no need to udpate the advertising data here.
3558 if (lmp_le_capable(hdev))
3559 update_scan_rsp_data(&req);
3561 err = hci_req_run(&req, set_name_complete);
3562 if (err < 0)
3563 mgmt_pending_remove(cmd);
3565 failed:
3566 hci_dev_unlock(hdev);
3567 return err;
3570 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3571 void *data, u16 data_len)
3573 struct pending_cmd *cmd;
3574 int err;
3576 BT_DBG("%s", hdev->name);
3578 hci_dev_lock(hdev);
3580 if (!hdev_is_powered(hdev)) {
3581 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3582 MGMT_STATUS_NOT_POWERED);
3583 goto unlock;
3586 if (!lmp_ssp_capable(hdev)) {
3587 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3588 MGMT_STATUS_NOT_SUPPORTED);
3589 goto unlock;
3592 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3593 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3594 MGMT_STATUS_BUSY);
3595 goto unlock;
3598 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3599 if (!cmd) {
3600 err = -ENOMEM;
3601 goto unlock;
3604 if (bredr_sc_enabled(hdev))
3605 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3606 0, NULL);
3607 else
3608 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3610 if (err < 0)
3611 mgmt_pending_remove(cmd);
3613 unlock:
3614 hci_dev_unlock(hdev);
3615 return err;
3618 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3619 void *data, u16 len)
3621 int err;
3623 BT_DBG("%s ", hdev->name);
3625 hci_dev_lock(hdev);
3627 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3628 struct mgmt_cp_add_remote_oob_data *cp = data;
3629 u8 status;
3631 if (cp->addr.type != BDADDR_BREDR) {
3632 err = cmd_complete(sk, hdev->id,
3633 MGMT_OP_ADD_REMOTE_OOB_DATA,
3634 MGMT_STATUS_INVALID_PARAMS,
3635 &cp->addr, sizeof(cp->addr));
3636 goto unlock;
3639 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3640 cp->addr.type, cp->hash,
3641 cp->rand, NULL, NULL);
3642 if (err < 0)
3643 status = MGMT_STATUS_FAILED;
3644 else
3645 status = MGMT_STATUS_SUCCESS;
3647 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3648 status, &cp->addr, sizeof(cp->addr));
3649 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3650 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3651 u8 *rand192, *hash192;
3652 u8 status;
3654 if (cp->addr.type != BDADDR_BREDR) {
3655 err = cmd_complete(sk, hdev->id,
3656 MGMT_OP_ADD_REMOTE_OOB_DATA,
3657 MGMT_STATUS_INVALID_PARAMS,
3658 &cp->addr, sizeof(cp->addr));
3659 goto unlock;
3662 if (bdaddr_type_is_le(cp->addr.type)) {
3663 rand192 = NULL;
3664 hash192 = NULL;
3665 } else {
3666 rand192 = cp->rand192;
3667 hash192 = cp->hash192;
3670 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3671 cp->addr.type, hash192, rand192,
3672 cp->hash256, cp->rand256);
3673 if (err < 0)
3674 status = MGMT_STATUS_FAILED;
3675 else
3676 status = MGMT_STATUS_SUCCESS;
3678 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3679 status, &cp->addr, sizeof(cp->addr));
3680 } else {
3681 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3682 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3683 MGMT_STATUS_INVALID_PARAMS);
3686 unlock:
3687 hci_dev_unlock(hdev);
3688 return err;
3691 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3692 void *data, u16 len)
3694 struct mgmt_cp_remove_remote_oob_data *cp = data;
3695 u8 status;
3696 int err;
3698 BT_DBG("%s", hdev->name);
3700 if (cp->addr.type != BDADDR_BREDR)
3701 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3702 MGMT_STATUS_INVALID_PARAMS,
3703 &cp->addr, sizeof(cp->addr));
3705 hci_dev_lock(hdev);
3707 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3708 hci_remote_oob_data_clear(hdev);
3709 status = MGMT_STATUS_SUCCESS;
3710 goto done;
3713 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3714 if (err < 0)
3715 status = MGMT_STATUS_INVALID_PARAMS;
3716 else
3717 status = MGMT_STATUS_SUCCESS;
3719 done:
3720 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3721 status, &cp->addr, sizeof(cp->addr));
3723 hci_dev_unlock(hdev);
3724 return err;
3727 static bool trigger_discovery(struct hci_request *req, u8 *status)
3729 struct hci_dev *hdev = req->hdev;
3730 struct hci_cp_le_set_scan_param param_cp;
3731 struct hci_cp_le_set_scan_enable enable_cp;
3732 struct hci_cp_inquiry inq_cp;
3733 /* General inquiry access code (GIAC) */
3734 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3735 u8 own_addr_type;
3736 int err;
3738 switch (hdev->discovery.type) {
3739 case DISCOV_TYPE_BREDR:
3740 *status = mgmt_bredr_support(hdev);
3741 if (*status)
3742 return false;
3744 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3745 *status = MGMT_STATUS_BUSY;
3746 return false;
3749 hci_inquiry_cache_flush(hdev);
3751 memset(&inq_cp, 0, sizeof(inq_cp));
3752 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3753 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3754 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3755 break;
3757 case DISCOV_TYPE_LE:
3758 case DISCOV_TYPE_INTERLEAVED:
3759 *status = mgmt_le_support(hdev);
3760 if (*status)
3761 return false;
3763 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3764 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3765 *status = MGMT_STATUS_NOT_SUPPORTED;
3766 return false;
3769 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3770 /* Don't let discovery abort an outgoing
3771 * connection attempt that's using directed
3772 * advertising.
3774 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3775 BT_CONNECT)) {
3776 *status = MGMT_STATUS_REJECTED;
3777 return false;
3780 disable_advertising(req);
3783 /* If controller is scanning, it means the background scanning
3784 * is running. Thus, we should temporarily stop it in order to
3785 * set the discovery scanning parameters.
3787 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3788 hci_req_add_le_scan_disable(req);
3790 memset(&param_cp, 0, sizeof(param_cp));
3792 /* All active scans will be done with either a resolvable
3793 * private address (when privacy feature has been enabled)
3794 * or unresolvable private address.
3796 err = hci_update_random_address(req, true, &own_addr_type);
3797 if (err < 0) {
3798 *status = MGMT_STATUS_FAILED;
3799 return false;
3802 param_cp.type = LE_SCAN_ACTIVE;
3803 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3804 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3805 param_cp.own_address_type = own_addr_type;
3806 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3807 &param_cp);
3809 memset(&enable_cp, 0, sizeof(enable_cp));
3810 enable_cp.enable = LE_SCAN_ENABLE;
3811 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3812 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3813 &enable_cp);
3814 break;
3816 default:
3817 *status = MGMT_STATUS_INVALID_PARAMS;
3818 return false;
3821 return true;
3824 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3826 struct pending_cmd *cmd;
3827 unsigned long timeout;
3829 BT_DBG("status %d", status);
3831 hci_dev_lock(hdev);
3833 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3834 if (!cmd)
3835 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3837 if (cmd) {
3838 cmd->cmd_complete(cmd, mgmt_status(status));
3839 mgmt_pending_remove(cmd);
3842 if (status) {
3843 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3844 goto unlock;
3847 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3849 switch (hdev->discovery.type) {
3850 case DISCOV_TYPE_LE:
3851 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3852 break;
3853 case DISCOV_TYPE_INTERLEAVED:
3854 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3855 break;
3856 case DISCOV_TYPE_BREDR:
3857 timeout = 0;
3858 break;
3859 default:
3860 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3861 timeout = 0;
3862 break;
3865 if (timeout)
3866 queue_delayed_work(hdev->workqueue,
3867 &hdev->le_scan_disable, timeout);
3869 unlock:
3870 hci_dev_unlock(hdev);
3873 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3874 void *data, u16 len)
3876 struct mgmt_cp_start_discovery *cp = data;
3877 struct pending_cmd *cmd;
3878 struct hci_request req;
3879 u8 status;
3880 int err;
3882 BT_DBG("%s", hdev->name);
3884 hci_dev_lock(hdev);
3886 if (!hdev_is_powered(hdev)) {
3887 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3888 MGMT_STATUS_NOT_POWERED,
3889 &cp->type, sizeof(cp->type));
3890 goto failed;
3893 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3894 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3895 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3896 MGMT_STATUS_BUSY, &cp->type,
3897 sizeof(cp->type));
3898 goto failed;
3901 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
3902 if (!cmd) {
3903 err = -ENOMEM;
3904 goto failed;
3907 cmd->cmd_complete = generic_cmd_complete;
3909 /* Clear the discovery filter first to free any previously
3910 * allocated memory for the UUID list.
3912 hci_discovery_filter_clear(hdev);
3914 hdev->discovery.type = cp->type;
3915 hdev->discovery.report_invalid_rssi = false;
3917 hci_req_init(&req, hdev);
3919 if (!trigger_discovery(&req, &status)) {
3920 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3921 status, &cp->type, sizeof(cp->type));
3922 mgmt_pending_remove(cmd);
3923 goto failed;
3926 err = hci_req_run(&req, start_discovery_complete);
3927 if (err < 0) {
3928 mgmt_pending_remove(cmd);
3929 goto failed;
3932 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3934 failed:
3935 hci_dev_unlock(hdev);
3936 return err;
3939 static void service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
3941 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1);
3944 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3945 void *data, u16 len)
3947 struct mgmt_cp_start_service_discovery *cp = data;
3948 struct pending_cmd *cmd;
3949 struct hci_request req;
3950 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3951 u16 uuid_count, expected_len;
3952 u8 status;
3953 int err;
3955 BT_DBG("%s", hdev->name);
3957 hci_dev_lock(hdev);
3959 if (!hdev_is_powered(hdev)) {
3960 err = cmd_complete(sk, hdev->id,
3961 MGMT_OP_START_SERVICE_DISCOVERY,
3962 MGMT_STATUS_NOT_POWERED,
3963 &cp->type, sizeof(cp->type));
3964 goto failed;
3967 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3968 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3969 err = cmd_complete(sk, hdev->id,
3970 MGMT_OP_START_SERVICE_DISCOVERY,
3971 MGMT_STATUS_BUSY, &cp->type,
3972 sizeof(cp->type));
3973 goto failed;
3976 uuid_count = __le16_to_cpu(cp->uuid_count);
3977 if (uuid_count > max_uuid_count) {
3978 BT_ERR("service_discovery: too big uuid_count value %u",
3979 uuid_count);
3980 err = cmd_complete(sk, hdev->id,
3981 MGMT_OP_START_SERVICE_DISCOVERY,
3982 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3983 sizeof(cp->type));
3984 goto failed;
3987 expected_len = sizeof(*cp) + uuid_count * 16;
3988 if (expected_len != len) {
3989 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3990 expected_len, len);
3991 err = cmd_complete(sk, hdev->id,
3992 MGMT_OP_START_SERVICE_DISCOVERY,
3993 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3994 sizeof(cp->type));
3995 goto failed;
3998 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3999 hdev, data, len);
4000 if (!cmd) {
4001 err = -ENOMEM;
4002 goto failed;
4005 cmd->cmd_complete = service_discovery_cmd_complete;
4007 /* Clear the discovery filter first to free any previously
4008 * allocated memory for the UUID list.
4010 hci_discovery_filter_clear(hdev);
4012 hdev->discovery.type = cp->type;
4013 hdev->discovery.rssi = cp->rssi;
4014 hdev->discovery.uuid_count = uuid_count;
4016 if (uuid_count > 0) {
4017 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4018 GFP_KERNEL);
4019 if (!hdev->discovery.uuids) {
4020 err = cmd_complete(sk, hdev->id,
4021 MGMT_OP_START_SERVICE_DISCOVERY,
4022 MGMT_STATUS_FAILED,
4023 &cp->type, sizeof(cp->type));
4024 mgmt_pending_remove(cmd);
4025 goto failed;
4029 hci_req_init(&req, hdev);
4031 if (!trigger_discovery(&req, &status)) {
4032 err = cmd_complete(sk, hdev->id,
4033 MGMT_OP_START_SERVICE_DISCOVERY,
4034 status, &cp->type, sizeof(cp->type));
4035 mgmt_pending_remove(cmd);
4036 goto failed;
4039 err = hci_req_run(&req, start_discovery_complete);
4040 if (err < 0) {
4041 mgmt_pending_remove(cmd);
4042 goto failed;
4045 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4047 failed:
4048 hci_dev_unlock(hdev);
4049 return err;
4052 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
4054 struct pending_cmd *cmd;
4056 BT_DBG("status %d", status);
4058 hci_dev_lock(hdev);
4060 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4061 if (cmd) {
4062 cmd->cmd_complete(cmd, mgmt_status(status));
4063 mgmt_pending_remove(cmd);
4066 if (!status)
4067 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4069 hci_dev_unlock(hdev);
4072 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4073 u16 len)
4075 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4076 struct pending_cmd *cmd;
4077 struct hci_request req;
4078 int err;
4080 BT_DBG("%s", hdev->name);
4082 hci_dev_lock(hdev);
4084 if (!hci_discovery_active(hdev)) {
4085 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4086 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4087 sizeof(mgmt_cp->type));
4088 goto unlock;
4091 if (hdev->discovery.type != mgmt_cp->type) {
4092 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4093 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4094 sizeof(mgmt_cp->type));
4095 goto unlock;
4098 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4099 if (!cmd) {
4100 err = -ENOMEM;
4101 goto unlock;
4104 cmd->cmd_complete = generic_cmd_complete;
4106 hci_req_init(&req, hdev);
4108 hci_stop_discovery(&req);
4110 err = hci_req_run(&req, stop_discovery_complete);
4111 if (!err) {
4112 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4113 goto unlock;
4116 mgmt_pending_remove(cmd);
4118 /* If no HCI commands were sent we're done */
4119 if (err == -ENODATA) {
4120 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4121 &mgmt_cp->type, sizeof(mgmt_cp->type));
4122 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4125 unlock:
4126 hci_dev_unlock(hdev);
4127 return err;
4130 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4131 u16 len)
4133 struct mgmt_cp_confirm_name *cp = data;
4134 struct inquiry_entry *e;
4135 int err;
4137 BT_DBG("%s", hdev->name);
4139 hci_dev_lock(hdev);
4141 if (!hci_discovery_active(hdev)) {
4142 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4143 MGMT_STATUS_FAILED, &cp->addr,
4144 sizeof(cp->addr));
4145 goto failed;
4148 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4149 if (!e) {
4150 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4151 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4152 sizeof(cp->addr));
4153 goto failed;
4156 if (cp->name_known) {
4157 e->name_state = NAME_KNOWN;
4158 list_del(&e->list);
4159 } else {
4160 e->name_state = NAME_NEEDED;
4161 hci_inquiry_cache_update_resolve(hdev, e);
4164 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4165 sizeof(cp->addr));
4167 failed:
4168 hci_dev_unlock(hdev);
4169 return err;
4172 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4173 u16 len)
4175 struct mgmt_cp_block_device *cp = data;
4176 u8 status;
4177 int err;
4179 BT_DBG("%s", hdev->name);
4181 if (!bdaddr_type_is_valid(cp->addr.type))
4182 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4183 MGMT_STATUS_INVALID_PARAMS,
4184 &cp->addr, sizeof(cp->addr));
4186 hci_dev_lock(hdev);
4188 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4189 cp->addr.type);
4190 if (err < 0) {
4191 status = MGMT_STATUS_FAILED;
4192 goto done;
4195 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4196 sk);
4197 status = MGMT_STATUS_SUCCESS;
4199 done:
4200 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4201 &cp->addr, sizeof(cp->addr));
4203 hci_dev_unlock(hdev);
4205 return err;
4208 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4209 u16 len)
4211 struct mgmt_cp_unblock_device *cp = data;
4212 u8 status;
4213 int err;
4215 BT_DBG("%s", hdev->name);
4217 if (!bdaddr_type_is_valid(cp->addr.type))
4218 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4219 MGMT_STATUS_INVALID_PARAMS,
4220 &cp->addr, sizeof(cp->addr));
4222 hci_dev_lock(hdev);
4224 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4225 cp->addr.type);
4226 if (err < 0) {
4227 status = MGMT_STATUS_INVALID_PARAMS;
4228 goto done;
4231 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4232 sk);
4233 status = MGMT_STATUS_SUCCESS;
4235 done:
4236 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4237 &cp->addr, sizeof(cp->addr));
4239 hci_dev_unlock(hdev);
4241 return err;
4244 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4245 u16 len)
4247 struct mgmt_cp_set_device_id *cp = data;
4248 struct hci_request req;
4249 int err;
4250 __u16 source;
4252 BT_DBG("%s", hdev->name);
4254 source = __le16_to_cpu(cp->source);
4256 if (source > 0x0002)
4257 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4258 MGMT_STATUS_INVALID_PARAMS);
4260 hci_dev_lock(hdev);
4262 hdev->devid_source = source;
4263 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4264 hdev->devid_product = __le16_to_cpu(cp->product);
4265 hdev->devid_version = __le16_to_cpu(cp->version);
4267 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4269 hci_req_init(&req, hdev);
4270 update_eir(&req);
4271 hci_req_run(&req, NULL);
4273 hci_dev_unlock(hdev);
4275 return err;
4278 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4280 struct cmd_lookup match = { NULL, hdev };
4282 if (status) {
4283 u8 mgmt_err = mgmt_status(status);
4285 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4286 cmd_status_rsp, &mgmt_err);
4287 return;
4290 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4291 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4292 else
4293 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4295 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4296 &match);
4298 new_settings(hdev, match.sk);
4300 if (match.sk)
4301 sock_put(match.sk);
4304 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4305 u16 len)
4307 struct mgmt_mode *cp = data;
4308 struct pending_cmd *cmd;
4309 struct hci_request req;
4310 u8 val, enabled, status;
4311 int err;
4313 BT_DBG("request for %s", hdev->name);
4315 status = mgmt_le_support(hdev);
4316 if (status)
4317 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4318 status);
4320 if (cp->val != 0x00 && cp->val != 0x01)
4321 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4322 MGMT_STATUS_INVALID_PARAMS);
4324 hci_dev_lock(hdev);
4326 val = !!cp->val;
4327 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4329 /* The following conditions are ones which mean that we should
4330 * not do any HCI communication but directly send a mgmt
4331 * response to user space (after toggling the flag if
4332 * necessary).
4334 if (!hdev_is_powered(hdev) || val == enabled ||
4335 hci_conn_num(hdev, LE_LINK) > 0 ||
4336 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4337 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4338 bool changed = false;
4340 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4341 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4342 changed = true;
4345 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4346 if (err < 0)
4347 goto unlock;
4349 if (changed)
4350 err = new_settings(hdev, sk);
4352 goto unlock;
4355 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4356 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4357 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4358 MGMT_STATUS_BUSY);
4359 goto unlock;
4362 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4363 if (!cmd) {
4364 err = -ENOMEM;
4365 goto unlock;
4368 hci_req_init(&req, hdev);
4370 if (val)
4371 enable_advertising(&req);
4372 else
4373 disable_advertising(&req);
4375 err = hci_req_run(&req, set_advertising_complete);
4376 if (err < 0)
4377 mgmt_pending_remove(cmd);
4379 unlock:
4380 hci_dev_unlock(hdev);
4381 return err;
4384 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4385 void *data, u16 len)
4387 struct mgmt_cp_set_static_address *cp = data;
4388 int err;
4390 BT_DBG("%s", hdev->name);
4392 if (!lmp_le_capable(hdev))
4393 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4394 MGMT_STATUS_NOT_SUPPORTED);
4396 if (hdev_is_powered(hdev))
4397 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4398 MGMT_STATUS_REJECTED);
4400 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4401 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4402 return cmd_status(sk, hdev->id,
4403 MGMT_OP_SET_STATIC_ADDRESS,
4404 MGMT_STATUS_INVALID_PARAMS);
4406 /* Two most significant bits shall be set */
4407 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4408 return cmd_status(sk, hdev->id,
4409 MGMT_OP_SET_STATIC_ADDRESS,
4410 MGMT_STATUS_INVALID_PARAMS);
4413 hci_dev_lock(hdev);
4415 bacpy(&hdev->static_addr, &cp->bdaddr);
4417 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4419 hci_dev_unlock(hdev);
4421 return err;
4424 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4425 void *data, u16 len)
4427 struct mgmt_cp_set_scan_params *cp = data;
4428 __u16 interval, window;
4429 int err;
4431 BT_DBG("%s", hdev->name);
4433 if (!lmp_le_capable(hdev))
4434 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4435 MGMT_STATUS_NOT_SUPPORTED);
4437 interval = __le16_to_cpu(cp->interval);
4439 if (interval < 0x0004 || interval > 0x4000)
4440 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4441 MGMT_STATUS_INVALID_PARAMS);
4443 window = __le16_to_cpu(cp->window);
4445 if (window < 0x0004 || window > 0x4000)
4446 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4447 MGMT_STATUS_INVALID_PARAMS);
4449 if (window > interval)
4450 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4451 MGMT_STATUS_INVALID_PARAMS);
4453 hci_dev_lock(hdev);
4455 hdev->le_scan_interval = interval;
4456 hdev->le_scan_window = window;
4458 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4460 /* If background scan is running, restart it so new parameters are
4461 * loaded.
4463 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4464 hdev->discovery.state == DISCOVERY_STOPPED) {
4465 struct hci_request req;
4467 hci_req_init(&req, hdev);
4469 hci_req_add_le_scan_disable(&req);
4470 hci_req_add_le_passive_scan(&req);
4472 hci_req_run(&req, NULL);
4475 hci_dev_unlock(hdev);
4477 return err;
4480 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4482 struct pending_cmd *cmd;
4484 BT_DBG("status 0x%02x", status);
4486 hci_dev_lock(hdev);
4488 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4489 if (!cmd)
4490 goto unlock;
4492 if (status) {
4493 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4494 mgmt_status(status));
4495 } else {
4496 struct mgmt_mode *cp = cmd->param;
4498 if (cp->val)
4499 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4500 else
4501 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4503 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4504 new_settings(hdev, cmd->sk);
4507 mgmt_pending_remove(cmd);
4509 unlock:
4510 hci_dev_unlock(hdev);
4513 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4514 void *data, u16 len)
4516 struct mgmt_mode *cp = data;
4517 struct pending_cmd *cmd;
4518 struct hci_request req;
4519 int err;
4521 BT_DBG("%s", hdev->name);
4523 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4524 hdev->hci_ver < BLUETOOTH_VER_1_2)
4525 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4526 MGMT_STATUS_NOT_SUPPORTED);
4528 if (cp->val != 0x00 && cp->val != 0x01)
4529 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4530 MGMT_STATUS_INVALID_PARAMS);
4532 if (!hdev_is_powered(hdev))
4533 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4534 MGMT_STATUS_NOT_POWERED);
4536 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4537 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4538 MGMT_STATUS_REJECTED);
4540 hci_dev_lock(hdev);
4542 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4543 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4544 MGMT_STATUS_BUSY);
4545 goto unlock;
4548 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4549 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4550 hdev);
4551 goto unlock;
4554 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4555 data, len);
4556 if (!cmd) {
4557 err = -ENOMEM;
4558 goto unlock;
4561 hci_req_init(&req, hdev);
4563 write_fast_connectable(&req, cp->val);
4565 err = hci_req_run(&req, fast_connectable_complete);
4566 if (err < 0) {
4567 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 MGMT_STATUS_FAILED);
4569 mgmt_pending_remove(cmd);
4572 unlock:
4573 hci_dev_unlock(hdev);
4575 return err;
4578 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4580 struct pending_cmd *cmd;
4582 BT_DBG("status 0x%02x", status);
4584 hci_dev_lock(hdev);
4586 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4587 if (!cmd)
4588 goto unlock;
4590 if (status) {
4591 u8 mgmt_err = mgmt_status(status);
4593 /* We need to restore the flag if related HCI commands
4594 * failed.
4596 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4598 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4599 } else {
4600 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4601 new_settings(hdev, cmd->sk);
4604 mgmt_pending_remove(cmd);
4606 unlock:
4607 hci_dev_unlock(hdev);
4610 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4612 struct mgmt_mode *cp = data;
4613 struct pending_cmd *cmd;
4614 struct hci_request req;
4615 int err;
4617 BT_DBG("request for %s", hdev->name);
4619 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4620 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4621 MGMT_STATUS_NOT_SUPPORTED);
4623 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4624 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4625 MGMT_STATUS_REJECTED);
4627 if (cp->val != 0x00 && cp->val != 0x01)
4628 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4629 MGMT_STATUS_INVALID_PARAMS);
4631 hci_dev_lock(hdev);
4633 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4634 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4635 goto unlock;
4638 if (!hdev_is_powered(hdev)) {
4639 if (!cp->val) {
4640 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4641 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4642 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4643 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4644 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4647 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4649 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4650 if (err < 0)
4651 goto unlock;
4653 err = new_settings(hdev, sk);
4654 goto unlock;
4657 /* Reject disabling when powered on */
4658 if (!cp->val) {
4659 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4660 MGMT_STATUS_REJECTED);
4661 goto unlock;
4664 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4665 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4666 MGMT_STATUS_BUSY);
4667 goto unlock;
4670 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4671 if (!cmd) {
4672 err = -ENOMEM;
4673 goto unlock;
4676 /* We need to flip the bit already here so that update_adv_data
4677 * generates the correct flags.
4679 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4681 hci_req_init(&req, hdev);
4683 write_fast_connectable(&req, false);
4684 hci_update_page_scan(hdev, &req);
4686 /* Since only the advertising data flags will change, there
4687 * is no need to update the scan response data.
4689 update_adv_data(&req);
4691 err = hci_req_run(&req, set_bredr_complete);
4692 if (err < 0)
4693 mgmt_pending_remove(cmd);
4695 unlock:
4696 hci_dev_unlock(hdev);
4697 return err;
4700 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4701 void *data, u16 len)
4703 struct mgmt_mode *cp = data;
4704 struct pending_cmd *cmd;
4705 u8 val;
4706 int err;
4708 BT_DBG("request for %s", hdev->name);
4710 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4711 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4712 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4713 MGMT_STATUS_NOT_SUPPORTED);
4715 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4716 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4717 MGMT_STATUS_INVALID_PARAMS);
4719 hci_dev_lock(hdev);
4721 if (!hdev_is_powered(hdev) ||
4722 (!lmp_sc_capable(hdev) &&
4723 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4724 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4725 bool changed;
4727 if (cp->val) {
4728 changed = !test_and_set_bit(HCI_SC_ENABLED,
4729 &hdev->dev_flags);
4730 if (cp->val == 0x02)
4731 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4732 else
4733 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4734 } else {
4735 changed = test_and_clear_bit(HCI_SC_ENABLED,
4736 &hdev->dev_flags);
4737 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4740 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4741 if (err < 0)
4742 goto failed;
4744 if (changed)
4745 err = new_settings(hdev, sk);
4747 goto failed;
4750 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4751 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4752 MGMT_STATUS_BUSY);
4753 goto failed;
4756 val = !!cp->val;
4758 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4759 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4760 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4761 goto failed;
4764 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4765 if (!cmd) {
4766 err = -ENOMEM;
4767 goto failed;
4770 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4771 if (err < 0) {
4772 mgmt_pending_remove(cmd);
4773 goto failed;
4776 if (cp->val == 0x02)
4777 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4778 else
4779 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4781 failed:
4782 hci_dev_unlock(hdev);
4783 return err;
4786 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4787 void *data, u16 len)
4789 struct mgmt_mode *cp = data;
4790 bool changed, use_changed;
4791 int err;
4793 BT_DBG("request for %s", hdev->name);
4795 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4796 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4797 MGMT_STATUS_INVALID_PARAMS);
4799 hci_dev_lock(hdev);
4801 if (cp->val)
4802 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4803 &hdev->dev_flags);
4804 else
4805 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4806 &hdev->dev_flags);
4808 if (cp->val == 0x02)
4809 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4810 &hdev->dev_flags);
4811 else
4812 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4813 &hdev->dev_flags);
4815 if (hdev_is_powered(hdev) && use_changed &&
4816 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4817 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4818 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4819 sizeof(mode), &mode);
4822 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4823 if (err < 0)
4824 goto unlock;
4826 if (changed)
4827 err = new_settings(hdev, sk);
4829 unlock:
4830 hci_dev_unlock(hdev);
4831 return err;
4834 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4835 u16 len)
4837 struct mgmt_cp_set_privacy *cp = cp_data;
4838 bool changed;
4839 int err;
4841 BT_DBG("request for %s", hdev->name);
4843 if (!lmp_le_capable(hdev))
4844 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4845 MGMT_STATUS_NOT_SUPPORTED);
4847 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4848 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4849 MGMT_STATUS_INVALID_PARAMS);
4851 if (hdev_is_powered(hdev))
4852 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4853 MGMT_STATUS_REJECTED);
4855 hci_dev_lock(hdev);
4857 /* If user space supports this command it is also expected to
4858 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4860 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4862 if (cp->privacy) {
4863 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4864 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4865 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4866 } else {
4867 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4868 memset(hdev->irk, 0, sizeof(hdev->irk));
4869 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4872 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4873 if (err < 0)
4874 goto unlock;
4876 if (changed)
4877 err = new_settings(hdev, sk);
4879 unlock:
4880 hci_dev_unlock(hdev);
4881 return err;
4884 static bool irk_is_valid(struct mgmt_irk_info *irk)
4886 switch (irk->addr.type) {
4887 case BDADDR_LE_PUBLIC:
4888 return true;
4890 case BDADDR_LE_RANDOM:
4891 /* Two most significant bits shall be set */
4892 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4893 return false;
4894 return true;
4897 return false;
4900 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4901 u16 len)
4903 struct mgmt_cp_load_irks *cp = cp_data;
4904 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4905 sizeof(struct mgmt_irk_info));
4906 u16 irk_count, expected_len;
4907 int i, err;
4909 BT_DBG("request for %s", hdev->name);
4911 if (!lmp_le_capable(hdev))
4912 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4913 MGMT_STATUS_NOT_SUPPORTED);
4915 irk_count = __le16_to_cpu(cp->irk_count);
4916 if (irk_count > max_irk_count) {
4917 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4918 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4919 MGMT_STATUS_INVALID_PARAMS);
4922 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4923 if (expected_len != len) {
4924 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4925 expected_len, len);
4926 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4927 MGMT_STATUS_INVALID_PARAMS);
4930 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4932 for (i = 0; i < irk_count; i++) {
4933 struct mgmt_irk_info *key = &cp->irks[i];
4935 if (!irk_is_valid(key))
4936 return cmd_status(sk, hdev->id,
4937 MGMT_OP_LOAD_IRKS,
4938 MGMT_STATUS_INVALID_PARAMS);
4941 hci_dev_lock(hdev);
4943 hci_smp_irks_clear(hdev);
4945 for (i = 0; i < irk_count; i++) {
4946 struct mgmt_irk_info *irk = &cp->irks[i];
4947 u8 addr_type;
4949 if (irk->addr.type == BDADDR_LE_PUBLIC)
4950 addr_type = ADDR_LE_DEV_PUBLIC;
4951 else
4952 addr_type = ADDR_LE_DEV_RANDOM;
4954 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4955 BDADDR_ANY);
4958 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4960 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4962 hci_dev_unlock(hdev);
4964 return err;
4967 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4969 if (key->master != 0x00 && key->master != 0x01)
4970 return false;
4972 switch (key->addr.type) {
4973 case BDADDR_LE_PUBLIC:
4974 return true;
4976 case BDADDR_LE_RANDOM:
4977 /* Two most significant bits shall be set */
4978 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4979 return false;
4980 return true;
4983 return false;
4986 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4987 void *cp_data, u16 len)
4989 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4990 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4991 sizeof(struct mgmt_ltk_info));
4992 u16 key_count, expected_len;
4993 int i, err;
4995 BT_DBG("request for %s", hdev->name);
4997 if (!lmp_le_capable(hdev))
4998 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4999 MGMT_STATUS_NOT_SUPPORTED);
5001 key_count = __le16_to_cpu(cp->key_count);
5002 if (key_count > max_key_count) {
5003 BT_ERR("load_ltks: too big key_count value %u", key_count);
5004 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5005 MGMT_STATUS_INVALID_PARAMS);
5008 expected_len = sizeof(*cp) + key_count *
5009 sizeof(struct mgmt_ltk_info);
5010 if (expected_len != len) {
5011 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5012 expected_len, len);
5013 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5014 MGMT_STATUS_INVALID_PARAMS);
5017 BT_DBG("%s key_count %u", hdev->name, key_count);
5019 for (i = 0; i < key_count; i++) {
5020 struct mgmt_ltk_info *key = &cp->keys[i];
5022 if (!ltk_is_valid(key))
5023 return cmd_status(sk, hdev->id,
5024 MGMT_OP_LOAD_LONG_TERM_KEYS,
5025 MGMT_STATUS_INVALID_PARAMS);
5028 hci_dev_lock(hdev);
5030 hci_smp_ltks_clear(hdev);
5032 for (i = 0; i < key_count; i++) {
5033 struct mgmt_ltk_info *key = &cp->keys[i];
5034 u8 type, addr_type, authenticated;
5036 if (key->addr.type == BDADDR_LE_PUBLIC)
5037 addr_type = ADDR_LE_DEV_PUBLIC;
5038 else
5039 addr_type = ADDR_LE_DEV_RANDOM;
5041 switch (key->type) {
5042 case MGMT_LTK_UNAUTHENTICATED:
5043 authenticated = 0x00;
5044 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5045 break;
5046 case MGMT_LTK_AUTHENTICATED:
5047 authenticated = 0x01;
5048 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5049 break;
5050 case MGMT_LTK_P256_UNAUTH:
5051 authenticated = 0x00;
5052 type = SMP_LTK_P256;
5053 break;
5054 case MGMT_LTK_P256_AUTH:
5055 authenticated = 0x01;
5056 type = SMP_LTK_P256;
5057 break;
5058 case MGMT_LTK_P256_DEBUG:
5059 authenticated = 0x00;
5060 type = SMP_LTK_P256_DEBUG;
5061 default:
5062 continue;
5065 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5066 authenticated, key->val, key->enc_size, key->ediv,
5067 key->rand);
5070 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5071 NULL, 0);
5073 hci_dev_unlock(hdev);
5075 return err;
5078 static void conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5080 struct hci_conn *conn = cmd->user_data;
5081 struct mgmt_rp_get_conn_info rp;
5083 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5085 if (status == MGMT_STATUS_SUCCESS) {
5086 rp.rssi = conn->rssi;
5087 rp.tx_power = conn->tx_power;
5088 rp.max_tx_power = conn->max_tx_power;
5089 } else {
5090 rp.rssi = HCI_RSSI_INVALID;
5091 rp.tx_power = HCI_TX_POWER_INVALID;
5092 rp.max_tx_power = HCI_TX_POWER_INVALID;
5095 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
5096 &rp, sizeof(rp));
5098 hci_conn_drop(conn);
5099 hci_conn_put(conn);
5102 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status)
5104 struct hci_cp_read_rssi *cp;
5105 struct pending_cmd *cmd;
5106 struct hci_conn *conn;
5107 u16 handle;
5108 u8 status;
5110 BT_DBG("status 0x%02x", hci_status);
5112 hci_dev_lock(hdev);
5114 /* Commands sent in request are either Read RSSI or Read Transmit Power
5115 * Level so we check which one was last sent to retrieve connection
5116 * handle. Both commands have handle as first parameter so it's safe to
5117 * cast data on the same command struct.
5119 * First command sent is always Read RSSI and we fail only if it fails.
5120 * In other case we simply override error to indicate success as we
5121 * already remembered if TX power value is actually valid.
5123 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5124 if (!cp) {
5125 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5126 status = MGMT_STATUS_SUCCESS;
5127 } else {
5128 status = mgmt_status(hci_status);
5131 if (!cp) {
5132 BT_ERR("invalid sent_cmd in conn_info response");
5133 goto unlock;
5136 handle = __le16_to_cpu(cp->handle);
5137 conn = hci_conn_hash_lookup_handle(hdev, handle);
5138 if (!conn) {
5139 BT_ERR("unknown handle (%d) in conn_info response", handle);
5140 goto unlock;
5143 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5144 if (!cmd)
5145 goto unlock;
5147 cmd->cmd_complete(cmd, status);
5148 mgmt_pending_remove(cmd);
5150 unlock:
5151 hci_dev_unlock(hdev);
5154 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5155 u16 len)
5157 struct mgmt_cp_get_conn_info *cp = data;
5158 struct mgmt_rp_get_conn_info rp;
5159 struct hci_conn *conn;
5160 unsigned long conn_info_age;
5161 int err = 0;
5163 BT_DBG("%s", hdev->name);
5165 memset(&rp, 0, sizeof(rp));
5166 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5167 rp.addr.type = cp->addr.type;
5169 if (!bdaddr_type_is_valid(cp->addr.type))
5170 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5171 MGMT_STATUS_INVALID_PARAMS,
5172 &rp, sizeof(rp));
5174 hci_dev_lock(hdev);
5176 if (!hdev_is_powered(hdev)) {
5177 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5178 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5179 goto unlock;
5182 if (cp->addr.type == BDADDR_BREDR)
5183 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5184 &cp->addr.bdaddr);
5185 else
5186 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5188 if (!conn || conn->state != BT_CONNECTED) {
5189 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5190 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5191 goto unlock;
5194 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5195 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5196 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5197 goto unlock;
5200 /* To avoid client trying to guess when to poll again for information we
5201 * calculate conn info age as random value between min/max set in hdev.
5203 conn_info_age = hdev->conn_info_min_age +
5204 prandom_u32_max(hdev->conn_info_max_age -
5205 hdev->conn_info_min_age);
5207 /* Query controller to refresh cached values if they are too old or were
5208 * never read.
5210 if (time_after(jiffies, conn->conn_info_timestamp +
5211 msecs_to_jiffies(conn_info_age)) ||
5212 !conn->conn_info_timestamp) {
5213 struct hci_request req;
5214 struct hci_cp_read_tx_power req_txp_cp;
5215 struct hci_cp_read_rssi req_rssi_cp;
5216 struct pending_cmd *cmd;
5218 hci_req_init(&req, hdev);
5219 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5220 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5221 &req_rssi_cp);
5223 /* For LE links TX power does not change thus we don't need to
5224 * query for it once value is known.
5226 if (!bdaddr_type_is_le(cp->addr.type) ||
5227 conn->tx_power == HCI_TX_POWER_INVALID) {
5228 req_txp_cp.handle = cpu_to_le16(conn->handle);
5229 req_txp_cp.type = 0x00;
5230 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5231 sizeof(req_txp_cp), &req_txp_cp);
5234 /* Max TX power needs to be read only once per connection */
5235 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5236 req_txp_cp.handle = cpu_to_le16(conn->handle);
5237 req_txp_cp.type = 0x01;
5238 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5239 sizeof(req_txp_cp), &req_txp_cp);
5242 err = hci_req_run(&req, conn_info_refresh_complete);
5243 if (err < 0)
5244 goto unlock;
5246 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5247 data, len);
5248 if (!cmd) {
5249 err = -ENOMEM;
5250 goto unlock;
5253 hci_conn_hold(conn);
5254 cmd->user_data = hci_conn_get(conn);
5255 cmd->cmd_complete = conn_info_cmd_complete;
5257 conn->conn_info_timestamp = jiffies;
5258 } else {
5259 /* Cache is valid, just reply with values cached in hci_conn */
5260 rp.rssi = conn->rssi;
5261 rp.tx_power = conn->tx_power;
5262 rp.max_tx_power = conn->max_tx_power;
5264 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5265 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5268 unlock:
5269 hci_dev_unlock(hdev);
5270 return err;
5273 static void clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
5275 struct hci_conn *conn = cmd->user_data;
5276 struct mgmt_rp_get_clock_info rp;
5277 struct hci_dev *hdev;
5279 memset(&rp, 0, sizeof(rp));
5280 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5282 if (status)
5283 goto complete;
5285 hdev = hci_dev_get(cmd->index);
5286 if (hdev) {
5287 rp.local_clock = cpu_to_le32(hdev->clock);
5288 hci_dev_put(hdev);
5291 if (conn) {
5292 rp.piconet_clock = cpu_to_le32(conn->clock);
5293 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5296 complete:
5297 cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp));
5299 if (conn) {
5300 hci_conn_drop(conn);
5301 hci_conn_put(conn);
5305 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5307 struct hci_cp_read_clock *hci_cp;
5308 struct pending_cmd *cmd;
5309 struct hci_conn *conn;
5311 BT_DBG("%s status %u", hdev->name, status);
5313 hci_dev_lock(hdev);
5315 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5316 if (!hci_cp)
5317 goto unlock;
5319 if (hci_cp->which) {
5320 u16 handle = __le16_to_cpu(hci_cp->handle);
5321 conn = hci_conn_hash_lookup_handle(hdev, handle);
5322 } else {
5323 conn = NULL;
5326 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5327 if (!cmd)
5328 goto unlock;
5330 cmd->cmd_complete(cmd, mgmt_status(status));
5331 mgmt_pending_remove(cmd);
5333 unlock:
5334 hci_dev_unlock(hdev);
5337 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5338 u16 len)
5340 struct mgmt_cp_get_clock_info *cp = data;
5341 struct mgmt_rp_get_clock_info rp;
5342 struct hci_cp_read_clock hci_cp;
5343 struct pending_cmd *cmd;
5344 struct hci_request req;
5345 struct hci_conn *conn;
5346 int err;
5348 BT_DBG("%s", hdev->name);
5350 memset(&rp, 0, sizeof(rp));
5351 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5352 rp.addr.type = cp->addr.type;
5354 if (cp->addr.type != BDADDR_BREDR)
5355 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5356 MGMT_STATUS_INVALID_PARAMS,
5357 &rp, sizeof(rp));
5359 hci_dev_lock(hdev);
5361 if (!hdev_is_powered(hdev)) {
5362 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5363 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5364 goto unlock;
5367 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5369 &cp->addr.bdaddr);
5370 if (!conn || conn->state != BT_CONNECTED) {
5371 err = cmd_complete(sk, hdev->id,
5372 MGMT_OP_GET_CLOCK_INFO,
5373 MGMT_STATUS_NOT_CONNECTED,
5374 &rp, sizeof(rp));
5375 goto unlock;
5377 } else {
5378 conn = NULL;
5381 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5382 if (!cmd) {
5383 err = -ENOMEM;
5384 goto unlock;
5387 cmd->cmd_complete = clock_info_cmd_complete;
5389 hci_req_init(&req, hdev);
5391 memset(&hci_cp, 0, sizeof(hci_cp));
5392 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5394 if (conn) {
5395 hci_conn_hold(conn);
5396 cmd->user_data = hci_conn_get(conn);
5398 hci_cp.handle = cpu_to_le16(conn->handle);
5399 hci_cp.which = 0x01; /* Piconet clock */
5400 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5403 err = hci_req_run(&req, get_clock_info_complete);
5404 if (err < 0)
5405 mgmt_pending_remove(cmd);
5407 unlock:
5408 hci_dev_unlock(hdev);
5409 return err;
5412 static void device_added(struct sock *sk, struct hci_dev *hdev,
5413 bdaddr_t *bdaddr, u8 type, u8 action)
5415 struct mgmt_ev_device_added ev;
5417 bacpy(&ev.addr.bdaddr, bdaddr);
5418 ev.addr.type = type;
5419 ev.action = action;
5421 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5424 static int add_device(struct sock *sk, struct hci_dev *hdev,
5425 void *data, u16 len)
5427 struct mgmt_cp_add_device *cp = data;
5428 u8 auto_conn, addr_type;
5429 int err;
5431 BT_DBG("%s", hdev->name);
5433 if (!bdaddr_type_is_valid(cp->addr.type) ||
5434 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5435 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5436 MGMT_STATUS_INVALID_PARAMS,
5437 &cp->addr, sizeof(cp->addr));
5439 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5440 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5441 MGMT_STATUS_INVALID_PARAMS,
5442 &cp->addr, sizeof(cp->addr));
5444 hci_dev_lock(hdev);
5446 if (cp->addr.type == BDADDR_BREDR) {
5447 /* Only incoming connections action is supported for now */
5448 if (cp->action != 0x01) {
5449 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5450 MGMT_STATUS_INVALID_PARAMS,
5451 &cp->addr, sizeof(cp->addr));
5452 goto unlock;
5455 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5456 cp->addr.type);
5457 if (err)
5458 goto unlock;
5460 hci_update_page_scan(hdev, NULL);
5462 goto added;
5465 if (cp->addr.type == BDADDR_LE_PUBLIC)
5466 addr_type = ADDR_LE_DEV_PUBLIC;
5467 else
5468 addr_type = ADDR_LE_DEV_RANDOM;
5470 if (cp->action == 0x02)
5471 auto_conn = HCI_AUTO_CONN_ALWAYS;
5472 else if (cp->action == 0x01)
5473 auto_conn = HCI_AUTO_CONN_DIRECT;
5474 else
5475 auto_conn = HCI_AUTO_CONN_REPORT;
5477 /* If the connection parameters don't exist for this device,
5478 * they will be created and configured with defaults.
5480 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5481 auto_conn) < 0) {
5482 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5483 MGMT_STATUS_FAILED,
5484 &cp->addr, sizeof(cp->addr));
5485 goto unlock;
5488 added:
5489 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5491 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5492 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5494 unlock:
5495 hci_dev_unlock(hdev);
5496 return err;
5499 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5500 bdaddr_t *bdaddr, u8 type)
5502 struct mgmt_ev_device_removed ev;
5504 bacpy(&ev.addr.bdaddr, bdaddr);
5505 ev.addr.type = type;
5507 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5510 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5511 void *data, u16 len)
5513 struct mgmt_cp_remove_device *cp = data;
5514 int err;
5516 BT_DBG("%s", hdev->name);
5518 hci_dev_lock(hdev);
5520 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5521 struct hci_conn_params *params;
5522 u8 addr_type;
5524 if (!bdaddr_type_is_valid(cp->addr.type)) {
5525 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5526 MGMT_STATUS_INVALID_PARAMS,
5527 &cp->addr, sizeof(cp->addr));
5528 goto unlock;
5531 if (cp->addr.type == BDADDR_BREDR) {
5532 err = hci_bdaddr_list_del(&hdev->whitelist,
5533 &cp->addr.bdaddr,
5534 cp->addr.type);
5535 if (err) {
5536 err = cmd_complete(sk, hdev->id,
5537 MGMT_OP_REMOVE_DEVICE,
5538 MGMT_STATUS_INVALID_PARAMS,
5539 &cp->addr, sizeof(cp->addr));
5540 goto unlock;
5543 hci_update_page_scan(hdev, NULL);
5545 device_removed(sk, hdev, &cp->addr.bdaddr,
5546 cp->addr.type);
5547 goto complete;
5550 if (cp->addr.type == BDADDR_LE_PUBLIC)
5551 addr_type = ADDR_LE_DEV_PUBLIC;
5552 else
5553 addr_type = ADDR_LE_DEV_RANDOM;
5555 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5556 addr_type);
5557 if (!params) {
5558 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5559 MGMT_STATUS_INVALID_PARAMS,
5560 &cp->addr, sizeof(cp->addr));
5561 goto unlock;
5564 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5565 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5566 MGMT_STATUS_INVALID_PARAMS,
5567 &cp->addr, sizeof(cp->addr));
5568 goto unlock;
5571 list_del(&params->action);
5572 list_del(&params->list);
5573 kfree(params);
5574 hci_update_background_scan(hdev);
5576 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5577 } else {
5578 struct hci_conn_params *p, *tmp;
5579 struct bdaddr_list *b, *btmp;
5581 if (cp->addr.type) {
5582 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5583 MGMT_STATUS_INVALID_PARAMS,
5584 &cp->addr, sizeof(cp->addr));
5585 goto unlock;
5588 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5589 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5590 list_del(&b->list);
5591 kfree(b);
5594 hci_update_page_scan(hdev, NULL);
5596 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5597 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5598 continue;
5599 device_removed(sk, hdev, &p->addr, p->addr_type);
5600 list_del(&p->action);
5601 list_del(&p->list);
5602 kfree(p);
5605 BT_DBG("All LE connection parameters were removed");
5607 hci_update_background_scan(hdev);
5610 complete:
5611 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5612 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5614 unlock:
5615 hci_dev_unlock(hdev);
5616 return err;
5619 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5620 u16 len)
5622 struct mgmt_cp_load_conn_param *cp = data;
5623 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5624 sizeof(struct mgmt_conn_param));
5625 u16 param_count, expected_len;
5626 int i;
5628 if (!lmp_le_capable(hdev))
5629 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5630 MGMT_STATUS_NOT_SUPPORTED);
5632 param_count = __le16_to_cpu(cp->param_count);
5633 if (param_count > max_param_count) {
5634 BT_ERR("load_conn_param: too big param_count value %u",
5635 param_count);
5636 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5637 MGMT_STATUS_INVALID_PARAMS);
5640 expected_len = sizeof(*cp) + param_count *
5641 sizeof(struct mgmt_conn_param);
5642 if (expected_len != len) {
5643 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5644 expected_len, len);
5645 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5646 MGMT_STATUS_INVALID_PARAMS);
5649 BT_DBG("%s param_count %u", hdev->name, param_count);
5651 hci_dev_lock(hdev);
5653 hci_conn_params_clear_disabled(hdev);
5655 for (i = 0; i < param_count; i++) {
5656 struct mgmt_conn_param *param = &cp->params[i];
5657 struct hci_conn_params *hci_param;
5658 u16 min, max, latency, timeout;
5659 u8 addr_type;
5661 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5662 param->addr.type);
5664 if (param->addr.type == BDADDR_LE_PUBLIC) {
5665 addr_type = ADDR_LE_DEV_PUBLIC;
5666 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5667 addr_type = ADDR_LE_DEV_RANDOM;
5668 } else {
5669 BT_ERR("Ignoring invalid connection parameters");
5670 continue;
5673 min = le16_to_cpu(param->min_interval);
5674 max = le16_to_cpu(param->max_interval);
5675 latency = le16_to_cpu(param->latency);
5676 timeout = le16_to_cpu(param->timeout);
5678 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5679 min, max, latency, timeout);
5681 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5682 BT_ERR("Ignoring invalid connection parameters");
5683 continue;
5686 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5687 addr_type);
5688 if (!hci_param) {
5689 BT_ERR("Failed to add connection parameters");
5690 continue;
5693 hci_param->conn_min_interval = min;
5694 hci_param->conn_max_interval = max;
5695 hci_param->conn_latency = latency;
5696 hci_param->supervision_timeout = timeout;
5699 hci_dev_unlock(hdev);
5701 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5704 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5705 void *data, u16 len)
5707 struct mgmt_cp_set_external_config *cp = data;
5708 bool changed;
5709 int err;
5711 BT_DBG("%s", hdev->name);
5713 if (hdev_is_powered(hdev))
5714 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5715 MGMT_STATUS_REJECTED);
5717 if (cp->config != 0x00 && cp->config != 0x01)
5718 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5719 MGMT_STATUS_INVALID_PARAMS);
5721 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5722 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5723 MGMT_STATUS_NOT_SUPPORTED);
5725 hci_dev_lock(hdev);
5727 if (cp->config)
5728 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5729 &hdev->dev_flags);
5730 else
5731 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5732 &hdev->dev_flags);
5734 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5735 if (err < 0)
5736 goto unlock;
5738 if (!changed)
5739 goto unlock;
5741 err = new_options(hdev, sk);
5743 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5744 mgmt_index_removed(hdev);
5746 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5747 set_bit(HCI_CONFIG, &hdev->dev_flags);
5748 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5750 queue_work(hdev->req_workqueue, &hdev->power_on);
5751 } else {
5752 set_bit(HCI_RAW, &hdev->flags);
5753 mgmt_index_added(hdev);
5757 unlock:
5758 hci_dev_unlock(hdev);
5759 return err;
5762 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5763 void *data, u16 len)
5765 struct mgmt_cp_set_public_address *cp = data;
5766 bool changed;
5767 int err;
5769 BT_DBG("%s", hdev->name);
5771 if (hdev_is_powered(hdev))
5772 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5773 MGMT_STATUS_REJECTED);
5775 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5776 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5777 MGMT_STATUS_INVALID_PARAMS);
5779 if (!hdev->set_bdaddr)
5780 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5781 MGMT_STATUS_NOT_SUPPORTED);
5783 hci_dev_lock(hdev);
5785 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5786 bacpy(&hdev->public_addr, &cp->bdaddr);
5788 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5789 if (err < 0)
5790 goto unlock;
5792 if (!changed)
5793 goto unlock;
5795 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5796 err = new_options(hdev, sk);
5798 if (is_configured(hdev)) {
5799 mgmt_index_removed(hdev);
5801 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5803 set_bit(HCI_CONFIG, &hdev->dev_flags);
5804 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5806 queue_work(hdev->req_workqueue, &hdev->power_on);
5809 unlock:
5810 hci_dev_unlock(hdev);
5811 return err;
5814 static const struct mgmt_handler {
5815 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5816 u16 data_len);
5817 bool var_len;
5818 size_t data_len;
5819 } mgmt_handlers[] = {
5820 { NULL }, /* 0x0000 (no command) */
5821 { read_version, false, MGMT_READ_VERSION_SIZE },
5822 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5823 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5824 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5825 { set_powered, false, MGMT_SETTING_SIZE },
5826 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5827 { set_connectable, false, MGMT_SETTING_SIZE },
5828 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5829 { set_bondable, false, MGMT_SETTING_SIZE },
5830 { set_link_security, false, MGMT_SETTING_SIZE },
5831 { set_ssp, false, MGMT_SETTING_SIZE },
5832 { set_hs, false, MGMT_SETTING_SIZE },
5833 { set_le, false, MGMT_SETTING_SIZE },
5834 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5835 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5836 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5837 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5838 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5839 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5840 { disconnect, false, MGMT_DISCONNECT_SIZE },
5841 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5842 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5843 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5844 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5845 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5846 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5847 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5848 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5849 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5850 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5851 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5852 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5853 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5854 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5855 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5856 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5857 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5858 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5859 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5860 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5861 { set_advertising, false, MGMT_SETTING_SIZE },
5862 { set_bredr, false, MGMT_SETTING_SIZE },
5863 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5864 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5865 { set_secure_conn, false, MGMT_SETTING_SIZE },
5866 { set_debug_keys, false, MGMT_SETTING_SIZE },
5867 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5868 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5869 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5870 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5871 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5872 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5873 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5874 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5875 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5876 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5877 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5878 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
5881 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5883 void *buf;
5884 u8 *cp;
5885 struct mgmt_hdr *hdr;
5886 u16 opcode, index, len;
5887 struct hci_dev *hdev = NULL;
5888 const struct mgmt_handler *handler;
5889 int err;
5891 BT_DBG("got %zu bytes", msglen);
5893 if (msglen < sizeof(*hdr))
5894 return -EINVAL;
5896 buf = kmalloc(msglen, GFP_KERNEL);
5897 if (!buf)
5898 return -ENOMEM;
5900 if (memcpy_from_msg(buf, msg, msglen)) {
5901 err = -EFAULT;
5902 goto done;
5905 hdr = buf;
5906 opcode = __le16_to_cpu(hdr->opcode);
5907 index = __le16_to_cpu(hdr->index);
5908 len = __le16_to_cpu(hdr->len);
5910 if (len != msglen - sizeof(*hdr)) {
5911 err = -EINVAL;
5912 goto done;
5915 if (index != MGMT_INDEX_NONE) {
5916 hdev = hci_dev_get(index);
5917 if (!hdev) {
5918 err = cmd_status(sk, index, opcode,
5919 MGMT_STATUS_INVALID_INDEX);
5920 goto done;
5923 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5924 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5925 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5926 err = cmd_status(sk, index, opcode,
5927 MGMT_STATUS_INVALID_INDEX);
5928 goto done;
5931 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5932 opcode != MGMT_OP_READ_CONFIG_INFO &&
5933 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5934 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5935 err = cmd_status(sk, index, opcode,
5936 MGMT_STATUS_INVALID_INDEX);
5937 goto done;
5941 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5942 mgmt_handlers[opcode].func == NULL) {
5943 BT_DBG("Unknown op %u", opcode);
5944 err = cmd_status(sk, index, opcode,
5945 MGMT_STATUS_UNKNOWN_COMMAND);
5946 goto done;
5949 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5950 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5951 err = cmd_status(sk, index, opcode,
5952 MGMT_STATUS_INVALID_INDEX);
5953 goto done;
5956 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5957 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5958 err = cmd_status(sk, index, opcode,
5959 MGMT_STATUS_INVALID_INDEX);
5960 goto done;
5963 handler = &mgmt_handlers[opcode];
5965 if ((handler->var_len && len < handler->data_len) ||
5966 (!handler->var_len && len != handler->data_len)) {
5967 err = cmd_status(sk, index, opcode,
5968 MGMT_STATUS_INVALID_PARAMS);
5969 goto done;
5972 if (hdev)
5973 mgmt_init_hdev(sk, hdev);
5975 cp = buf + sizeof(*hdr);
5977 err = handler->func(sk, hdev, cp, len);
5978 if (err < 0)
5979 goto done;
5981 err = msglen;
5983 done:
5984 if (hdev)
5985 hci_dev_put(hdev);
5987 kfree(buf);
5988 return err;
5991 void mgmt_index_added(struct hci_dev *hdev)
5993 if (hdev->dev_type != HCI_BREDR)
5994 return;
5996 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5997 return;
5999 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6000 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
6001 else
6002 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
6005 void mgmt_index_removed(struct hci_dev *hdev)
6007 u8 status = MGMT_STATUS_INVALID_INDEX;
6009 if (hdev->dev_type != HCI_BREDR)
6010 return;
6012 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6013 return;
6015 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6017 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
6018 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
6019 else
6020 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
6023 /* This function requires the caller holds hdev->lock */
6024 static void restart_le_actions(struct hci_dev *hdev)
6026 struct hci_conn_params *p;
6028 list_for_each_entry(p, &hdev->le_conn_params, list) {
6029 /* Needed for AUTO_OFF case where might not "really"
6030 * have been powered off.
6032 list_del_init(&p->action);
6034 switch (p->auto_connect) {
6035 case HCI_AUTO_CONN_DIRECT:
6036 case HCI_AUTO_CONN_ALWAYS:
6037 list_add(&p->action, &hdev->pend_le_conns);
6038 break;
6039 case HCI_AUTO_CONN_REPORT:
6040 list_add(&p->action, &hdev->pend_le_reports);
6041 break;
6042 default:
6043 break;
6047 hci_update_background_scan(hdev);
6050 static void powered_complete(struct hci_dev *hdev, u8 status)
6052 struct cmd_lookup match = { NULL, hdev };
6054 BT_DBG("status 0x%02x", status);
6056 hci_dev_lock(hdev);
6058 restart_le_actions(hdev);
6060 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6062 new_settings(hdev, match.sk);
6064 hci_dev_unlock(hdev);
6066 if (match.sk)
6067 sock_put(match.sk);
6070 static int powered_update_hci(struct hci_dev *hdev)
6072 struct hci_request req;
6073 u8 link_sec;
6075 hci_req_init(&req, hdev);
6077 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
6078 !lmp_host_ssp_capable(hdev)) {
6079 u8 ssp = 1;
6081 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
6084 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
6085 lmp_bredr_capable(hdev)) {
6086 struct hci_cp_write_le_host_supported cp;
6088 cp.le = 0x01;
6089 cp.simul = 0x00;
6091 /* Check first if we already have the right
6092 * host state (host features set)
6094 if (cp.le != lmp_host_le_capable(hdev) ||
6095 cp.simul != lmp_host_le_br_capable(hdev))
6096 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6097 sizeof(cp), &cp);
6100 if (lmp_le_capable(hdev)) {
6101 /* Make sure the controller has a good default for
6102 * advertising data. This also applies to the case
6103 * where BR/EDR was toggled during the AUTO_OFF phase.
6105 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
6106 update_adv_data(&req);
6107 update_scan_rsp_data(&req);
6110 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6111 enable_advertising(&req);
6114 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6115 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6116 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6117 sizeof(link_sec), &link_sec);
6119 if (lmp_bredr_capable(hdev)) {
6120 write_fast_connectable(&req, false);
6121 hci_update_page_scan(hdev, &req);
6122 update_class(&req);
6123 update_name(&req);
6124 update_eir(&req);
6127 return hci_req_run(&req, powered_complete);
6130 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6132 struct cmd_lookup match = { NULL, hdev };
6133 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
6134 u8 zero_cod[] = { 0, 0, 0 };
6135 int err;
6137 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6138 return 0;
6140 if (powered) {
6141 if (powered_update_hci(hdev) == 0)
6142 return 0;
6144 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6145 &match);
6146 goto new_settings;
6149 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6150 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status_not_powered);
6152 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6153 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6154 zero_cod, sizeof(zero_cod), NULL);
6156 new_settings:
6157 err = new_settings(hdev, match.sk);
6159 if (match.sk)
6160 sock_put(match.sk);
6162 return err;
6165 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6167 struct pending_cmd *cmd;
6168 u8 status;
6170 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6171 if (!cmd)
6172 return;
6174 if (err == -ERFKILL)
6175 status = MGMT_STATUS_RFKILLED;
6176 else
6177 status = MGMT_STATUS_FAILED;
6179 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6181 mgmt_pending_remove(cmd);
6184 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6186 struct hci_request req;
6188 hci_dev_lock(hdev);
6190 /* When discoverable timeout triggers, then just make sure
6191 * the limited discoverable flag is cleared. Even in the case
6192 * of a timeout triggered from general discoverable, it is
6193 * safe to unconditionally clear the flag.
6195 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6196 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6198 hci_req_init(&req, hdev);
6199 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6200 u8 scan = SCAN_PAGE;
6201 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6202 sizeof(scan), &scan);
6204 update_class(&req);
6205 update_adv_data(&req);
6206 hci_req_run(&req, NULL);
6208 hdev->discov_timeout = 0;
6210 new_settings(hdev, NULL);
6212 hci_dev_unlock(hdev);
6215 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6216 bool persistent)
6218 struct mgmt_ev_new_link_key ev;
6220 memset(&ev, 0, sizeof(ev));
6222 ev.store_hint = persistent;
6223 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6224 ev.key.addr.type = BDADDR_BREDR;
6225 ev.key.type = key->type;
6226 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6227 ev.key.pin_len = key->pin_len;
6229 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6232 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6234 switch (ltk->type) {
6235 case SMP_LTK:
6236 case SMP_LTK_SLAVE:
6237 if (ltk->authenticated)
6238 return MGMT_LTK_AUTHENTICATED;
6239 return MGMT_LTK_UNAUTHENTICATED;
6240 case SMP_LTK_P256:
6241 if (ltk->authenticated)
6242 return MGMT_LTK_P256_AUTH;
6243 return MGMT_LTK_P256_UNAUTH;
6244 case SMP_LTK_P256_DEBUG:
6245 return MGMT_LTK_P256_DEBUG;
6248 return MGMT_LTK_UNAUTHENTICATED;
6251 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6253 struct mgmt_ev_new_long_term_key ev;
6255 memset(&ev, 0, sizeof(ev));
6257 /* Devices using resolvable or non-resolvable random addresses
6258 * without providing an indentity resolving key don't require
6259 * to store long term keys. Their addresses will change the
6260 * next time around.
6262 * Only when a remote device provides an identity address
6263 * make sure the long term key is stored. If the remote
6264 * identity is known, the long term keys are internally
6265 * mapped to the identity address. So allow static random
6266 * and public addresses here.
6268 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6269 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6270 ev.store_hint = 0x00;
6271 else
6272 ev.store_hint = persistent;
6274 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6275 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6276 ev.key.type = mgmt_ltk_type(key);
6277 ev.key.enc_size = key->enc_size;
6278 ev.key.ediv = key->ediv;
6279 ev.key.rand = key->rand;
6281 if (key->type == SMP_LTK)
6282 ev.key.master = 1;
6284 memcpy(ev.key.val, key->val, sizeof(key->val));
6286 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6289 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6291 struct mgmt_ev_new_irk ev;
6293 memset(&ev, 0, sizeof(ev));
6295 /* For identity resolving keys from devices that are already
6296 * using a public address or static random address, do not
6297 * ask for storing this key. The identity resolving key really
6298 * is only mandatory for devices using resovlable random
6299 * addresses.
6301 * Storing all identity resolving keys has the downside that
6302 * they will be also loaded on next boot of they system. More
6303 * identity resolving keys, means more time during scanning is
6304 * needed to actually resolve these addresses.
6306 if (bacmp(&irk->rpa, BDADDR_ANY))
6307 ev.store_hint = 0x01;
6308 else
6309 ev.store_hint = 0x00;
6311 bacpy(&ev.rpa, &irk->rpa);
6312 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6313 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6314 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6316 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6319 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6320 bool persistent)
6322 struct mgmt_ev_new_csrk ev;
6324 memset(&ev, 0, sizeof(ev));
6326 /* Devices using resolvable or non-resolvable random addresses
6327 * without providing an indentity resolving key don't require
6328 * to store signature resolving keys. Their addresses will change
6329 * the next time around.
6331 * Only when a remote device provides an identity address
6332 * make sure the signature resolving key is stored. So allow
6333 * static random and public addresses here.
6335 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6336 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6337 ev.store_hint = 0x00;
6338 else
6339 ev.store_hint = persistent;
6341 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6342 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6343 ev.key.master = csrk->master;
6344 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6346 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6349 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6350 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6351 u16 max_interval, u16 latency, u16 timeout)
6353 struct mgmt_ev_new_conn_param ev;
6355 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6356 return;
6358 memset(&ev, 0, sizeof(ev));
6359 bacpy(&ev.addr.bdaddr, bdaddr);
6360 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6361 ev.store_hint = store_hint;
6362 ev.min_interval = cpu_to_le16(min_interval);
6363 ev.max_interval = cpu_to_le16(max_interval);
6364 ev.latency = cpu_to_le16(latency);
6365 ev.timeout = cpu_to_le16(timeout);
6367 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6370 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6371 u8 data_len)
6373 eir[eir_len++] = sizeof(type) + data_len;
6374 eir[eir_len++] = type;
6375 memcpy(&eir[eir_len], data, data_len);
6376 eir_len += data_len;
6378 return eir_len;
6381 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6382 u32 flags, u8 *name, u8 name_len)
6384 char buf[512];
6385 struct mgmt_ev_device_connected *ev = (void *) buf;
6386 u16 eir_len = 0;
6388 bacpy(&ev->addr.bdaddr, &conn->dst);
6389 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6391 ev->flags = __cpu_to_le32(flags);
6393 /* We must ensure that the EIR Data fields are ordered and
6394 * unique. Keep it simple for now and avoid the problem by not
6395 * adding any BR/EDR data to the LE adv.
6397 if (conn->le_adv_data_len > 0) {
6398 memcpy(&ev->eir[eir_len],
6399 conn->le_adv_data, conn->le_adv_data_len);
6400 eir_len = conn->le_adv_data_len;
6401 } else {
6402 if (name_len > 0)
6403 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6404 name, name_len);
6406 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6407 eir_len = eir_append_data(ev->eir, eir_len,
6408 EIR_CLASS_OF_DEV,
6409 conn->dev_class, 3);
6412 ev->eir_len = cpu_to_le16(eir_len);
6414 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6415 sizeof(*ev) + eir_len, NULL);
6418 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6420 struct sock **sk = data;
6422 cmd->cmd_complete(cmd, 0);
6424 *sk = cmd->sk;
6425 sock_hold(*sk);
6427 mgmt_pending_remove(cmd);
6430 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6432 struct hci_dev *hdev = data;
6433 struct mgmt_cp_unpair_device *cp = cmd->param;
6435 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6437 cmd->cmd_complete(cmd, 0);
6438 mgmt_pending_remove(cmd);
6441 bool mgmt_powering_down(struct hci_dev *hdev)
6443 struct pending_cmd *cmd;
6444 struct mgmt_mode *cp;
6446 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6447 if (!cmd)
6448 return false;
6450 cp = cmd->param;
6451 if (!cp->val)
6452 return true;
6454 return false;
6457 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6458 u8 link_type, u8 addr_type, u8 reason,
6459 bool mgmt_connected)
6461 struct mgmt_ev_device_disconnected ev;
6462 struct sock *sk = NULL;
6464 /* The connection is still in hci_conn_hash so test for 1
6465 * instead of 0 to know if this is the last one.
6467 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6468 cancel_delayed_work(&hdev->power_off);
6469 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6472 if (!mgmt_connected)
6473 return;
6475 if (link_type != ACL_LINK && link_type != LE_LINK)
6476 return;
6478 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6480 bacpy(&ev.addr.bdaddr, bdaddr);
6481 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6482 ev.reason = reason;
6484 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6486 if (sk)
6487 sock_put(sk);
6489 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6490 hdev);
6493 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 u8 link_type, u8 addr_type, u8 status)
6496 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6497 struct mgmt_cp_disconnect *cp;
6498 struct pending_cmd *cmd;
6500 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6501 hdev);
6503 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6504 if (!cmd)
6505 return;
6507 cp = cmd->param;
6509 if (bacmp(bdaddr, &cp->addr.bdaddr))
6510 return;
6512 if (cp->addr.type != bdaddr_type)
6513 return;
6515 cmd->cmd_complete(cmd, mgmt_status(status));
6516 mgmt_pending_remove(cmd);
6519 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6520 u8 addr_type, u8 status)
6522 struct mgmt_ev_connect_failed ev;
6524 /* The connection is still in hci_conn_hash so test for 1
6525 * instead of 0 to know if this is the last one.
6527 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6528 cancel_delayed_work(&hdev->power_off);
6529 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6532 bacpy(&ev.addr.bdaddr, bdaddr);
6533 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6534 ev.status = mgmt_status(status);
6536 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6539 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6541 struct mgmt_ev_pin_code_request ev;
6543 bacpy(&ev.addr.bdaddr, bdaddr);
6544 ev.addr.type = BDADDR_BREDR;
6545 ev.secure = secure;
6547 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6550 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6551 u8 status)
6553 struct pending_cmd *cmd;
6555 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6556 if (!cmd)
6557 return;
6559 cmd->cmd_complete(cmd, mgmt_status(status));
6560 mgmt_pending_remove(cmd);
6563 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6564 u8 status)
6566 struct pending_cmd *cmd;
6568 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6569 if (!cmd)
6570 return;
6572 cmd->cmd_complete(cmd, mgmt_status(status));
6573 mgmt_pending_remove(cmd);
6576 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6577 u8 link_type, u8 addr_type, u32 value,
6578 u8 confirm_hint)
6580 struct mgmt_ev_user_confirm_request ev;
6582 BT_DBG("%s", hdev->name);
6584 bacpy(&ev.addr.bdaddr, bdaddr);
6585 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6586 ev.confirm_hint = confirm_hint;
6587 ev.value = cpu_to_le32(value);
6589 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6590 NULL);
6593 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6594 u8 link_type, u8 addr_type)
6596 struct mgmt_ev_user_passkey_request ev;
6598 BT_DBG("%s", hdev->name);
6600 bacpy(&ev.addr.bdaddr, bdaddr);
6601 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6603 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6604 NULL);
6607 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6608 u8 link_type, u8 addr_type, u8 status,
6609 u8 opcode)
6611 struct pending_cmd *cmd;
6613 cmd = mgmt_pending_find(opcode, hdev);
6614 if (!cmd)
6615 return -ENOENT;
6617 cmd->cmd_complete(cmd, mgmt_status(status));
6618 mgmt_pending_remove(cmd);
6620 return 0;
6623 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6624 u8 link_type, u8 addr_type, u8 status)
6626 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6627 status, MGMT_OP_USER_CONFIRM_REPLY);
6630 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6631 u8 link_type, u8 addr_type, u8 status)
6633 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6634 status,
6635 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6638 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6639 u8 link_type, u8 addr_type, u8 status)
6641 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6642 status, MGMT_OP_USER_PASSKEY_REPLY);
6645 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6646 u8 link_type, u8 addr_type, u8 status)
6648 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6649 status,
6650 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6653 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6654 u8 link_type, u8 addr_type, u32 passkey,
6655 u8 entered)
6657 struct mgmt_ev_passkey_notify ev;
6659 BT_DBG("%s", hdev->name);
6661 bacpy(&ev.addr.bdaddr, bdaddr);
6662 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6663 ev.passkey = __cpu_to_le32(passkey);
6664 ev.entered = entered;
6666 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6669 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6671 struct mgmt_ev_auth_failed ev;
6672 struct pending_cmd *cmd;
6673 u8 status = mgmt_status(hci_status);
6675 bacpy(&ev.addr.bdaddr, &conn->dst);
6676 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6677 ev.status = status;
6679 cmd = find_pairing(conn);
6681 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6682 cmd ? cmd->sk : NULL);
6684 if (cmd)
6685 pairing_complete(cmd, status);
6688 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6690 struct cmd_lookup match = { NULL, hdev };
6691 bool changed;
6693 if (status) {
6694 u8 mgmt_err = mgmt_status(status);
6695 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6696 cmd_status_rsp, &mgmt_err);
6697 return;
6700 if (test_bit(HCI_AUTH, &hdev->flags))
6701 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6702 &hdev->dev_flags);
6703 else
6704 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6705 &hdev->dev_flags);
6707 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6708 &match);
6710 if (changed)
6711 new_settings(hdev, match.sk);
6713 if (match.sk)
6714 sock_put(match.sk);
6717 static void clear_eir(struct hci_request *req)
6719 struct hci_dev *hdev = req->hdev;
6720 struct hci_cp_write_eir cp;
6722 if (!lmp_ext_inq_capable(hdev))
6723 return;
6725 memset(hdev->eir, 0, sizeof(hdev->eir));
6727 memset(&cp, 0, sizeof(cp));
6729 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6732 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6734 struct cmd_lookup match = { NULL, hdev };
6735 struct hci_request req;
6736 bool changed = false;
6738 if (status) {
6739 u8 mgmt_err = mgmt_status(status);
6741 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6742 &hdev->dev_flags)) {
6743 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6744 new_settings(hdev, NULL);
6747 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6748 &mgmt_err);
6749 return;
6752 if (enable) {
6753 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6754 } else {
6755 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6756 if (!changed)
6757 changed = test_and_clear_bit(HCI_HS_ENABLED,
6758 &hdev->dev_flags);
6759 else
6760 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6763 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6765 if (changed)
6766 new_settings(hdev, match.sk);
6768 if (match.sk)
6769 sock_put(match.sk);
6771 hci_req_init(&req, hdev);
6773 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6774 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6775 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6776 sizeof(enable), &enable);
6777 update_eir(&req);
6778 } else {
6779 clear_eir(&req);
6782 hci_req_run(&req, NULL);
6785 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6787 struct cmd_lookup match = { NULL, hdev };
6788 bool changed = false;
6790 if (status) {
6791 u8 mgmt_err = mgmt_status(status);
6793 if (enable) {
6794 if (test_and_clear_bit(HCI_SC_ENABLED,
6795 &hdev->dev_flags))
6796 new_settings(hdev, NULL);
6797 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6800 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6801 cmd_status_rsp, &mgmt_err);
6802 return;
6805 if (enable) {
6806 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6807 } else {
6808 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6809 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6812 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6813 settings_rsp, &match);
6815 if (changed)
6816 new_settings(hdev, match.sk);
6818 if (match.sk)
6819 sock_put(match.sk);
6822 static void sk_lookup(struct pending_cmd *cmd, void *data)
6824 struct cmd_lookup *match = data;
6826 if (match->sk == NULL) {
6827 match->sk = cmd->sk;
6828 sock_hold(match->sk);
6832 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6833 u8 status)
6835 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6837 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6838 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6839 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6841 if (!status)
6842 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6843 NULL);
6845 if (match.sk)
6846 sock_put(match.sk);
6849 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6851 struct mgmt_cp_set_local_name ev;
6852 struct pending_cmd *cmd;
6854 if (status)
6855 return;
6857 memset(&ev, 0, sizeof(ev));
6858 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6859 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6861 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6862 if (!cmd) {
6863 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6865 /* If this is a HCI command related to powering on the
6866 * HCI dev don't send any mgmt signals.
6868 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6869 return;
6872 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6873 cmd ? cmd->sk : NULL);
6876 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6877 u8 *rand192, u8 *hash256, u8 *rand256,
6878 u8 status)
6880 struct pending_cmd *cmd;
6882 BT_DBG("%s status %u", hdev->name, status);
6884 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6885 if (!cmd)
6886 return;
6888 if (status) {
6889 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6890 mgmt_status(status));
6891 } else {
6892 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6893 struct mgmt_rp_read_local_oob_ext_data rp;
6895 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6896 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6898 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6899 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6901 cmd_complete(cmd->sk, hdev->id,
6902 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6903 &rp, sizeof(rp));
6904 } else {
6905 struct mgmt_rp_read_local_oob_data rp;
6907 memcpy(rp.hash, hash192, sizeof(rp.hash));
6908 memcpy(rp.rand, rand192, sizeof(rp.rand));
6910 cmd_complete(cmd->sk, hdev->id,
6911 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6912 &rp, sizeof(rp));
6916 mgmt_pending_remove(cmd);
6919 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
6921 int i;
6923 for (i = 0; i < uuid_count; i++) {
6924 if (!memcmp(uuid, uuids[i], 16))
6925 return true;
6928 return false;
6931 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
6933 u16 parsed = 0;
6935 while (parsed < eir_len) {
6936 u8 field_len = eir[0];
6937 u8 uuid[16];
6938 int i;
6940 if (field_len == 0)
6941 break;
6943 if (eir_len - parsed < field_len + 1)
6944 break;
6946 switch (eir[1]) {
6947 case EIR_UUID16_ALL:
6948 case EIR_UUID16_SOME:
6949 for (i = 0; i + 3 <= field_len; i += 2) {
6950 memcpy(uuid, bluetooth_base_uuid, 16);
6951 uuid[13] = eir[i + 3];
6952 uuid[12] = eir[i + 2];
6953 if (has_uuid(uuid, uuid_count, uuids))
6954 return true;
6956 break;
6957 case EIR_UUID32_ALL:
6958 case EIR_UUID32_SOME:
6959 for (i = 0; i + 5 <= field_len; i += 4) {
6960 memcpy(uuid, bluetooth_base_uuid, 16);
6961 uuid[15] = eir[i + 5];
6962 uuid[14] = eir[i + 4];
6963 uuid[13] = eir[i + 3];
6964 uuid[12] = eir[i + 2];
6965 if (has_uuid(uuid, uuid_count, uuids))
6966 return true;
6968 break;
6969 case EIR_UUID128_ALL:
6970 case EIR_UUID128_SOME:
6971 for (i = 0; i + 17 <= field_len; i += 16) {
6972 memcpy(uuid, eir + i + 2, 16);
6973 if (has_uuid(uuid, uuid_count, uuids))
6974 return true;
6976 break;
6979 parsed += field_len + 1;
6980 eir += field_len + 1;
6983 return false;
6986 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6987 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6988 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6990 char buf[512];
6991 struct mgmt_ev_device_found *ev = (void *) buf;
6992 size_t ev_size;
6993 bool match;
6995 /* Don't send events for a non-kernel initiated discovery. With
6996 * LE one exception is if we have pend_le_reports > 0 in which
6997 * case we're doing passive scanning and want these events.
6999 if (!hci_discovery_active(hdev)) {
7000 if (link_type == ACL_LINK)
7001 return;
7002 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7003 return;
7006 /* When using service discovery with a RSSI threshold, then check
7007 * if such a RSSI threshold is specified. If a RSSI threshold has
7008 * been specified, then all results with a RSSI smaller than the
7009 * RSSI threshold will be dropped.
7011 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7012 * the results are also dropped.
7014 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7015 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
7016 return;
7018 /* Make sure that the buffer is big enough. The 5 extra bytes
7019 * are for the potential CoD field.
7021 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7022 return;
7024 memset(buf, 0, sizeof(buf));
7026 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7027 * RSSI value was reported as 0 when not available. This behavior
7028 * is kept when using device discovery. This is required for full
7029 * backwards compatibility with the API.
7031 * However when using service discovery, the value 127 will be
7032 * returned when the RSSI is not available.
7034 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi)
7035 rssi = 0;
7037 bacpy(&ev->addr.bdaddr, bdaddr);
7038 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7039 ev->rssi = rssi;
7040 ev->flags = cpu_to_le32(flags);
7042 if (eir_len > 0) {
7043 /* When using service discovery and a list of UUID is
7044 * provided, results with no matching UUID should be
7045 * dropped. In case there is a match the result is
7046 * kept and checking possible scan response data
7047 * will be skipped.
7049 if (hdev->discovery.uuid_count > 0) {
7050 match = eir_has_uuids(eir, eir_len,
7051 hdev->discovery.uuid_count,
7052 hdev->discovery.uuids);
7053 if (!match)
7054 return;
7057 /* Copy EIR or advertising data into event */
7058 memcpy(ev->eir, eir, eir_len);
7059 } else {
7060 /* When using service discovery and a list of UUID is
7061 * provided, results with empty EIR or advertising data
7062 * should be dropped since they do not match any UUID.
7064 if (hdev->discovery.uuid_count > 0)
7065 return;
7068 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7069 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7070 dev_class, 3);
7072 if (scan_rsp_len > 0) {
7073 /* When using service discovery and a list of UUID is
7074 * provided, results with no matching UUID should be
7075 * dropped if there is no previous match from the
7076 * advertising data.
7078 if (hdev->discovery.uuid_count > 0) {
7079 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
7080 hdev->discovery.uuid_count,
7081 hdev->discovery.uuids))
7082 return;
7085 /* Append scan response data to event */
7086 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7087 } else {
7088 /* When using service discovery and a list of UUID is
7089 * provided, results with empty scan response and no
7090 * previous matched advertising data should be dropped.
7092 if (hdev->discovery.uuid_count > 0 && !match)
7093 return;
7096 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7097 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7099 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7102 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7103 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7105 struct mgmt_ev_device_found *ev;
7106 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7107 u16 eir_len;
7109 ev = (struct mgmt_ev_device_found *) buf;
7111 memset(buf, 0, sizeof(buf));
7113 bacpy(&ev->addr.bdaddr, bdaddr);
7114 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7115 ev->rssi = rssi;
7117 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7118 name_len);
7120 ev->eir_len = cpu_to_le16(eir_len);
7122 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7125 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7127 struct mgmt_ev_discovering ev;
7129 BT_DBG("%s discovering %u", hdev->name, discovering);
7131 memset(&ev, 0, sizeof(ev));
7132 ev.type = hdev->discovery.type;
7133 ev.discovering = discovering;
7135 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7138 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
7140 BT_DBG("%s status %u", hdev->name, status);
7143 void mgmt_reenable_advertising(struct hci_dev *hdev)
7145 struct hci_request req;
7147 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7148 return;
7150 hci_req_init(&req, hdev);
7151 enable_advertising(&req);
7152 hci_req_run(&req, adv_enable_complete);