ALSA: hda - Fix pop noises on reboot for Dell XPS 13 9333
[linux/fpc-iii.git] / net / bluetooth / mgmt.c
blobaf8e0a6243b7520617156f79d7d430ce12ef4be9
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "smp.h"
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 6
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
90 static const u16 mgmt_events[] = {
91 MGMT_EV_CONTROLLER_ERROR,
92 MGMT_EV_INDEX_ADDED,
93 MGMT_EV_INDEX_REMOVED,
94 MGMT_EV_NEW_SETTINGS,
95 MGMT_EV_CLASS_OF_DEV_CHANGED,
96 MGMT_EV_LOCAL_NAME_CHANGED,
97 MGMT_EV_NEW_LINK_KEY,
98 MGMT_EV_NEW_LONG_TERM_KEY,
99 MGMT_EV_DEVICE_CONNECTED,
100 MGMT_EV_DEVICE_DISCONNECTED,
101 MGMT_EV_CONNECT_FAILED,
102 MGMT_EV_PIN_CODE_REQUEST,
103 MGMT_EV_USER_CONFIRM_REQUEST,
104 MGMT_EV_USER_PASSKEY_REQUEST,
105 MGMT_EV_AUTH_FAILED,
106 MGMT_EV_DEVICE_FOUND,
107 MGMT_EV_DISCOVERING,
108 MGMT_EV_DEVICE_BLOCKED,
109 MGMT_EV_DEVICE_UNBLOCKED,
110 MGMT_EV_DEVICE_UNPAIRED,
111 MGMT_EV_PASSKEY_NOTIFY,
112 MGMT_EV_NEW_IRK,
113 MGMT_EV_NEW_CSRK,
116 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
121 struct pending_cmd {
122 struct list_head list;
123 u16 opcode;
124 int index;
125 void *param;
126 struct sock *sk;
127 void *user_data;
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table[] = {
132 MGMT_STATUS_SUCCESS,
133 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
134 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
135 MGMT_STATUS_FAILED, /* Hardware Failure */
136 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
137 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
138 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
139 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
140 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
141 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
142 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
143 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
144 MGMT_STATUS_BUSY, /* Command Disallowed */
145 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
146 MGMT_STATUS_REJECTED, /* Rejected Security */
147 MGMT_STATUS_REJECTED, /* Rejected Personal */
148 MGMT_STATUS_TIMEOUT, /* Host Timeout */
149 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
150 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
151 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
152 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
153 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
154 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
155 MGMT_STATUS_BUSY, /* Repeated Attempts */
156 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
157 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
158 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
159 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
160 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
161 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
162 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
163 MGMT_STATUS_FAILED, /* Unspecified Error */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
165 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
166 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
167 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
168 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
169 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
170 MGMT_STATUS_FAILED, /* Unit Link Key Used */
171 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
172 MGMT_STATUS_TIMEOUT, /* Instant Passed */
173 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
174 MGMT_STATUS_FAILED, /* Transaction Collision */
175 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
176 MGMT_STATUS_REJECTED, /* QoS Rejected */
177 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
178 MGMT_STATUS_REJECTED, /* Insufficient Security */
179 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
180 MGMT_STATUS_BUSY, /* Role Switch Pending */
181 MGMT_STATUS_FAILED, /* Slot Violation */
182 MGMT_STATUS_FAILED, /* Role Switch Failed */
183 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
184 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
185 MGMT_STATUS_BUSY, /* Host Busy Pairing */
186 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
187 MGMT_STATUS_BUSY, /* Controller Busy */
188 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
189 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
190 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
191 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
192 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
195 static u8 mgmt_status(u8 hci_status)
197 if (hci_status < ARRAY_SIZE(mgmt_status_table))
198 return mgmt_status_table[hci_status];
200 return MGMT_STATUS_FAILED;
203 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
205 struct sk_buff *skb;
206 struct mgmt_hdr *hdr;
207 struct mgmt_ev_cmd_status *ev;
208 int err;
210 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
212 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
213 if (!skb)
214 return -ENOMEM;
216 hdr = (void *) skb_put(skb, sizeof(*hdr));
218 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
219 hdr->index = cpu_to_le16(index);
220 hdr->len = cpu_to_le16(sizeof(*ev));
222 ev = (void *) skb_put(skb, sizeof(*ev));
223 ev->status = status;
224 ev->opcode = cpu_to_le16(cmd);
226 err = sock_queue_rcv_skb(sk, skb);
227 if (err < 0)
228 kfree_skb(skb);
230 return err;
233 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
234 void *rp, size_t rp_len)
236 struct sk_buff *skb;
237 struct mgmt_hdr *hdr;
238 struct mgmt_ev_cmd_complete *ev;
239 int err;
241 BT_DBG("sock %p", sk);
243 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
244 if (!skb)
245 return -ENOMEM;
247 hdr = (void *) skb_put(skb, sizeof(*hdr));
249 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
250 hdr->index = cpu_to_le16(index);
251 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
253 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
254 ev->opcode = cpu_to_le16(cmd);
255 ev->status = status;
257 if (rp)
258 memcpy(ev->data, rp, rp_len);
260 err = sock_queue_rcv_skb(sk, skb);
261 if (err < 0)
262 kfree_skb(skb);
264 return err;
267 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
268 u16 data_len)
270 struct mgmt_rp_read_version rp;
272 BT_DBG("sock %p", sk);
274 rp.version = MGMT_VERSION;
275 rp.revision = cpu_to_le16(MGMT_REVISION);
277 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
278 sizeof(rp));
281 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
282 u16 data_len)
284 struct mgmt_rp_read_commands *rp;
285 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
286 const u16 num_events = ARRAY_SIZE(mgmt_events);
287 __le16 *opcode;
288 size_t rp_size;
289 int i, err;
291 BT_DBG("sock %p", sk);
293 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
295 rp = kmalloc(rp_size, GFP_KERNEL);
296 if (!rp)
297 return -ENOMEM;
299 rp->num_commands = cpu_to_le16(num_commands);
300 rp->num_events = cpu_to_le16(num_events);
302 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
303 put_unaligned_le16(mgmt_commands[i], opcode);
305 for (i = 0; i < num_events; i++, opcode++)
306 put_unaligned_le16(mgmt_events[i], opcode);
308 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 rp_size);
310 kfree(rp);
312 return err;
315 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
316 u16 data_len)
318 struct mgmt_rp_read_index_list *rp;
319 struct hci_dev *d;
320 size_t rp_len;
321 u16 count;
322 int err;
324 BT_DBG("sock %p", sk);
326 read_lock(&hci_dev_list_lock);
328 count = 0;
329 list_for_each_entry(d, &hci_dev_list, list) {
330 if (d->dev_type == HCI_BREDR)
331 count++;
334 rp_len = sizeof(*rp) + (2 * count);
335 rp = kmalloc(rp_len, GFP_ATOMIC);
336 if (!rp) {
337 read_unlock(&hci_dev_list_lock);
338 return -ENOMEM;
341 count = 0;
342 list_for_each_entry(d, &hci_dev_list, list) {
343 if (test_bit(HCI_SETUP, &d->dev_flags))
344 continue;
346 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
347 continue;
349 if (d->dev_type == HCI_BREDR) {
350 rp->index[count++] = cpu_to_le16(d->id);
351 BT_DBG("Added hci%u", d->id);
355 rp->num_controllers = cpu_to_le16(count);
356 rp_len = sizeof(*rp) + (2 * count);
358 read_unlock(&hci_dev_list_lock);
360 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
361 rp_len);
363 kfree(rp);
365 return err;
368 static u32 get_supported_settings(struct hci_dev *hdev)
370 u32 settings = 0;
372 settings |= MGMT_SETTING_POWERED;
373 settings |= MGMT_SETTING_PAIRABLE;
374 settings |= MGMT_SETTING_DEBUG_KEYS;
376 if (lmp_bredr_capable(hdev)) {
377 settings |= MGMT_SETTING_CONNECTABLE;
378 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
379 settings |= MGMT_SETTING_FAST_CONNECTABLE;
380 settings |= MGMT_SETTING_DISCOVERABLE;
381 settings |= MGMT_SETTING_BREDR;
382 settings |= MGMT_SETTING_LINK_SECURITY;
384 if (lmp_ssp_capable(hdev)) {
385 settings |= MGMT_SETTING_SSP;
386 settings |= MGMT_SETTING_HS;
389 if (lmp_sc_capable(hdev) ||
390 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
391 settings |= MGMT_SETTING_SECURE_CONN;
394 if (lmp_le_capable(hdev)) {
395 settings |= MGMT_SETTING_LE;
396 settings |= MGMT_SETTING_ADVERTISING;
397 settings |= MGMT_SETTING_PRIVACY;
400 return settings;
403 static u32 get_current_settings(struct hci_dev *hdev)
405 u32 settings = 0;
407 if (hdev_is_powered(hdev))
408 settings |= MGMT_SETTING_POWERED;
410 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_CONNECTABLE;
413 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_FAST_CONNECTABLE;
416 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_DISCOVERABLE;
419 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_PAIRABLE;
422 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_BREDR;
425 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LE;
428 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
429 settings |= MGMT_SETTING_LINK_SECURITY;
431 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SSP;
434 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_HS;
437 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
438 settings |= MGMT_SETTING_ADVERTISING;
440 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SECURE_CONN;
443 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
444 settings |= MGMT_SETTING_DEBUG_KEYS;
446 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
447 settings |= MGMT_SETTING_PRIVACY;
449 return settings;
452 #define PNP_INFO_SVCLASS_ID 0x1200
454 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
456 u8 *ptr = data, *uuids_start = NULL;
457 struct bt_uuid *uuid;
459 if (len < 4)
460 return ptr;
462 list_for_each_entry(uuid, &hdev->uuids, list) {
463 u16 uuid16;
465 if (uuid->size != 16)
466 continue;
468 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
469 if (uuid16 < 0x1100)
470 continue;
472 if (uuid16 == PNP_INFO_SVCLASS_ID)
473 continue;
475 if (!uuids_start) {
476 uuids_start = ptr;
477 uuids_start[0] = 1;
478 uuids_start[1] = EIR_UUID16_ALL;
479 ptr += 2;
482 /* Stop if not enough space to put next UUID */
483 if ((ptr - data) + sizeof(u16) > len) {
484 uuids_start[1] = EIR_UUID16_SOME;
485 break;
488 *ptr++ = (uuid16 & 0x00ff);
489 *ptr++ = (uuid16 & 0xff00) >> 8;
490 uuids_start[0] += sizeof(uuid16);
493 return ptr;
496 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
498 u8 *ptr = data, *uuids_start = NULL;
499 struct bt_uuid *uuid;
501 if (len < 6)
502 return ptr;
504 list_for_each_entry(uuid, &hdev->uuids, list) {
505 if (uuid->size != 32)
506 continue;
508 if (!uuids_start) {
509 uuids_start = ptr;
510 uuids_start[0] = 1;
511 uuids_start[1] = EIR_UUID32_ALL;
512 ptr += 2;
515 /* Stop if not enough space to put next UUID */
516 if ((ptr - data) + sizeof(u32) > len) {
517 uuids_start[1] = EIR_UUID32_SOME;
518 break;
521 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
522 ptr += sizeof(u32);
523 uuids_start[0] += sizeof(u32);
526 return ptr;
529 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
531 u8 *ptr = data, *uuids_start = NULL;
532 struct bt_uuid *uuid;
534 if (len < 18)
535 return ptr;
537 list_for_each_entry(uuid, &hdev->uuids, list) {
538 if (uuid->size != 128)
539 continue;
541 if (!uuids_start) {
542 uuids_start = ptr;
543 uuids_start[0] = 1;
544 uuids_start[1] = EIR_UUID128_ALL;
545 ptr += 2;
548 /* Stop if not enough space to put next UUID */
549 if ((ptr - data) + 16 > len) {
550 uuids_start[1] = EIR_UUID128_SOME;
551 break;
554 memcpy(ptr, uuid->uuid, 16);
555 ptr += 16;
556 uuids_start[0] += 16;
559 return ptr;
562 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
564 struct pending_cmd *cmd;
566 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
567 if (cmd->opcode == opcode)
568 return cmd;
571 return NULL;
574 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
576 u8 ad_len = 0;
577 size_t name_len;
579 name_len = strlen(hdev->dev_name);
580 if (name_len > 0) {
581 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
583 if (name_len > max_len) {
584 name_len = max_len;
585 ptr[1] = EIR_NAME_SHORT;
586 } else
587 ptr[1] = EIR_NAME_COMPLETE;
589 ptr[0] = name_len + 1;
591 memcpy(ptr + 2, hdev->dev_name, name_len);
593 ad_len += (name_len + 2);
594 ptr += (name_len + 2);
597 return ad_len;
600 static void update_scan_rsp_data(struct hci_request *req)
602 struct hci_dev *hdev = req->hdev;
603 struct hci_cp_le_set_scan_rsp_data cp;
604 u8 len;
606 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
607 return;
609 memset(&cp, 0, sizeof(cp));
611 len = create_scan_rsp_data(hdev, cp.data);
613 if (hdev->scan_rsp_data_len == len &&
614 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
615 return;
617 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
618 hdev->scan_rsp_data_len = len;
620 cp.length = len;
622 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
625 static u8 get_adv_discov_flags(struct hci_dev *hdev)
627 struct pending_cmd *cmd;
629 /* If there's a pending mgmt command the flags will not yet have
630 * their final values, so check for this first.
632 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
633 if (cmd) {
634 struct mgmt_mode *cp = cmd->param;
635 if (cp->val == 0x01)
636 return LE_AD_GENERAL;
637 else if (cp->val == 0x02)
638 return LE_AD_LIMITED;
639 } else {
640 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
641 return LE_AD_LIMITED;
642 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
643 return LE_AD_GENERAL;
646 return 0;
649 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
651 u8 ad_len = 0, flags = 0;
653 flags |= get_adv_discov_flags(hdev);
655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
656 flags |= LE_AD_NO_BREDR;
658 if (flags) {
659 BT_DBG("adv flags 0x%02x", flags);
661 ptr[0] = 2;
662 ptr[1] = EIR_FLAGS;
663 ptr[2] = flags;
665 ad_len += 3;
666 ptr += 3;
669 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
670 ptr[0] = 2;
671 ptr[1] = EIR_TX_POWER;
672 ptr[2] = (u8) hdev->adv_tx_power;
674 ad_len += 3;
675 ptr += 3;
678 return ad_len;
681 static void update_adv_data(struct hci_request *req)
683 struct hci_dev *hdev = req->hdev;
684 struct hci_cp_le_set_adv_data cp;
685 u8 len;
687 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
688 return;
690 memset(&cp, 0, sizeof(cp));
692 len = create_adv_data(hdev, cp.data);
694 if (hdev->adv_data_len == len &&
695 memcmp(cp.data, hdev->adv_data, len) == 0)
696 return;
698 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
699 hdev->adv_data_len = len;
701 cp.length = len;
703 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
706 static void create_eir(struct hci_dev *hdev, u8 *data)
708 u8 *ptr = data;
709 size_t name_len;
711 name_len = strlen(hdev->dev_name);
713 if (name_len > 0) {
714 /* EIR Data type */
715 if (name_len > 48) {
716 name_len = 48;
717 ptr[1] = EIR_NAME_SHORT;
718 } else
719 ptr[1] = EIR_NAME_COMPLETE;
721 /* EIR Data length */
722 ptr[0] = name_len + 1;
724 memcpy(ptr + 2, hdev->dev_name, name_len);
726 ptr += (name_len + 2);
729 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
730 ptr[0] = 2;
731 ptr[1] = EIR_TX_POWER;
732 ptr[2] = (u8) hdev->inq_tx_power;
734 ptr += 3;
737 if (hdev->devid_source > 0) {
738 ptr[0] = 9;
739 ptr[1] = EIR_DEVICE_ID;
741 put_unaligned_le16(hdev->devid_source, ptr + 2);
742 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
743 put_unaligned_le16(hdev->devid_product, ptr + 6);
744 put_unaligned_le16(hdev->devid_version, ptr + 8);
746 ptr += 10;
749 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
754 static void update_eir(struct hci_request *req)
756 struct hci_dev *hdev = req->hdev;
757 struct hci_cp_write_eir cp;
759 if (!hdev_is_powered(hdev))
760 return;
762 if (!lmp_ext_inq_capable(hdev))
763 return;
765 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
766 return;
768 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
769 return;
771 memset(&cp, 0, sizeof(cp));
773 create_eir(hdev, cp.data);
775 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
776 return;
778 memcpy(hdev->eir, cp.data, sizeof(cp.data));
780 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
783 static u8 get_service_classes(struct hci_dev *hdev)
785 struct bt_uuid *uuid;
786 u8 val = 0;
788 list_for_each_entry(uuid, &hdev->uuids, list)
789 val |= uuid->svc_hint;
791 return val;
794 static void update_class(struct hci_request *req)
796 struct hci_dev *hdev = req->hdev;
797 u8 cod[3];
799 BT_DBG("%s", hdev->name);
801 if (!hdev_is_powered(hdev))
802 return;
804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
805 return;
807 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
808 return;
810 cod[0] = hdev->minor_class;
811 cod[1] = hdev->major_class;
812 cod[2] = get_service_classes(hdev);
814 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
815 cod[1] |= 0x20;
817 if (memcmp(cod, hdev->dev_class, 3) == 0)
818 return;
820 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
823 static bool get_connectable(struct hci_dev *hdev)
825 struct pending_cmd *cmd;
827 /* If there's a pending mgmt command the flag will not yet have
828 * it's final value, so check for this first.
830 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
831 if (cmd) {
832 struct mgmt_mode *cp = cmd->param;
833 return cp->val;
836 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
839 static void enable_advertising(struct hci_request *req)
841 struct hci_dev *hdev = req->hdev;
842 struct hci_cp_le_set_adv_param cp;
843 u8 own_addr_type, enable = 0x01;
844 bool connectable;
846 /* Clear the HCI_ADVERTISING bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes.
851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
853 connectable = get_connectable(hdev);
855 /* Set require_privacy to true only when non-connectable
856 * advertising is used. In that case it is fine to use a
857 * non-resolvable private address.
859 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
860 return;
862 memset(&cp, 0, sizeof(cp));
863 cp.min_interval = cpu_to_le16(0x0800);
864 cp.max_interval = cpu_to_le16(0x0800);
865 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
866 cp.own_address_type = own_addr_type;
867 cp.channel_map = hdev->le_adv_channel_map;
869 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
871 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
874 static void disable_advertising(struct hci_request *req)
876 u8 enable = 0x00;
878 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
881 static void service_cache_off(struct work_struct *work)
883 struct hci_dev *hdev = container_of(work, struct hci_dev,
884 service_cache.work);
885 struct hci_request req;
887 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
888 return;
890 hci_req_init(&req, hdev);
892 hci_dev_lock(hdev);
894 update_eir(&req);
895 update_class(&req);
897 hci_dev_unlock(hdev);
899 hci_req_run(&req, NULL);
902 static void rpa_expired(struct work_struct *work)
904 struct hci_dev *hdev = container_of(work, struct hci_dev,
905 rpa_expired.work);
906 struct hci_request req;
908 BT_DBG("");
910 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
912 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
913 hci_conn_num(hdev, LE_LINK) > 0)
914 return;
916 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function.
920 hci_req_init(&req, hdev);
922 disable_advertising(&req);
923 enable_advertising(&req);
925 hci_req_run(&req, NULL);
928 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
930 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
931 return;
933 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
934 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
936 /* Non-mgmt controlled devices get this bit set
937 * implicitly so that pairing works for them, however
938 * for mgmt we require user-space to explicitly enable
939 * it
941 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
944 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
945 void *data, u16 data_len)
947 struct mgmt_rp_read_info rp;
949 BT_DBG("sock %p %s", sk, hdev->name);
951 hci_dev_lock(hdev);
953 memset(&rp, 0, sizeof(rp));
955 bacpy(&rp.bdaddr, &hdev->bdaddr);
957 rp.version = hdev->hci_ver;
958 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
960 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
961 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
963 memcpy(rp.dev_class, hdev->dev_class, 3);
965 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
966 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
968 hci_dev_unlock(hdev);
970 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
971 sizeof(rp));
974 static void mgmt_pending_free(struct pending_cmd *cmd)
976 sock_put(cmd->sk);
977 kfree(cmd->param);
978 kfree(cmd);
981 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
982 struct hci_dev *hdev, void *data,
983 u16 len)
985 struct pending_cmd *cmd;
987 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
988 if (!cmd)
989 return NULL;
991 cmd->opcode = opcode;
992 cmd->index = hdev->id;
994 cmd->param = kmalloc(len, GFP_KERNEL);
995 if (!cmd->param) {
996 kfree(cmd);
997 return NULL;
1000 if (data)
1001 memcpy(cmd->param, data, len);
1003 cmd->sk = sk;
1004 sock_hold(sk);
1006 list_add(&cmd->list, &hdev->mgmt_pending);
1008 return cmd;
1011 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1012 void (*cb)(struct pending_cmd *cmd,
1013 void *data),
1014 void *data)
1016 struct pending_cmd *cmd, *tmp;
1018 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1019 if (opcode > 0 && cmd->opcode != opcode)
1020 continue;
1022 cb(cmd, data);
1026 static void mgmt_pending_remove(struct pending_cmd *cmd)
1028 list_del(&cmd->list);
1029 mgmt_pending_free(cmd);
1032 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1034 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1036 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1037 sizeof(settings));
1040 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1042 BT_DBG("%s status 0x%02x", hdev->name, status);
1044 if (hci_conn_count(hdev) == 0) {
1045 cancel_delayed_work(&hdev->power_off);
1046 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1050 static void hci_stop_discovery(struct hci_request *req)
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1060 } else {
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1065 break;
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING);
1070 if (!e)
1071 return;
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp);
1077 break;
1079 default:
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1083 break;
1087 static int clean_up_hci_state(struct hci_dev *hdev)
1089 struct hci_request req;
1090 struct hci_conn *conn;
1092 hci_req_init(&req, hdev);
1094 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1095 test_bit(HCI_PSCAN, &hdev->flags)) {
1096 u8 scan = 0x00;
1097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1101 disable_advertising(&req);
1103 hci_stop_discovery(&req);
1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1106 struct hci_cp_disconnect dc;
1107 struct hci_cp_reject_conn_req rej;
1109 switch (conn->state) {
1110 case BT_CONNECTED:
1111 case BT_CONFIG:
1112 dc.handle = cpu_to_le16(conn->handle);
1113 dc.reason = 0x15; /* Terminated due to Power Off */
1114 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1115 break;
1116 case BT_CONNECT:
1117 if (conn->type == LE_LINK)
1118 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1119 0, NULL);
1120 else if (conn->type == ACL_LINK)
1121 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1122 6, &conn->dst);
1123 break;
1124 case BT_CONNECT2:
1125 bacpy(&rej.bdaddr, &conn->dst);
1126 rej.reason = 0x15; /* Terminated due to Power Off */
1127 if (conn->type == ACL_LINK)
1128 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1129 sizeof(rej), &rej);
1130 else if (conn->type == SCO_LINK)
1131 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1132 sizeof(rej), &rej);
1133 break;
1137 return hci_req_run(&req, clean_up_hci_complete);
1140 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1141 u16 len)
1143 struct mgmt_mode *cp = data;
1144 struct pending_cmd *cmd;
1145 int err;
1147 BT_DBG("request for %s", hdev->name);
1149 if (cp->val != 0x00 && cp->val != 0x01)
1150 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1151 MGMT_STATUS_INVALID_PARAMS);
1153 hci_dev_lock(hdev);
1155 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1156 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1157 MGMT_STATUS_BUSY);
1158 goto failed;
1161 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1162 cancel_delayed_work(&hdev->power_off);
1164 if (cp->val) {
1165 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1166 data, len);
1167 err = mgmt_powered(hdev, 1);
1168 goto failed;
1172 if (!!cp->val == hdev_is_powered(hdev)) {
1173 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1174 goto failed;
1177 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1178 if (!cmd) {
1179 err = -ENOMEM;
1180 goto failed;
1183 if (cp->val) {
1184 queue_work(hdev->req_workqueue, &hdev->power_on);
1185 err = 0;
1186 } else {
1187 /* Disconnect connections, stop scans, etc */
1188 err = clean_up_hci_state(hdev);
1189 if (!err)
1190 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1191 HCI_POWER_OFF_TIMEOUT);
1193 /* ENODATA means there were no HCI commands queued */
1194 if (err == -ENODATA) {
1195 cancel_delayed_work(&hdev->power_off);
1196 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1197 err = 0;
1201 failed:
1202 hci_dev_unlock(hdev);
1203 return err;
1206 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1207 struct sock *skip_sk)
1209 struct sk_buff *skb;
1210 struct mgmt_hdr *hdr;
1212 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1213 if (!skb)
1214 return -ENOMEM;
1216 hdr = (void *) skb_put(skb, sizeof(*hdr));
1217 hdr->opcode = cpu_to_le16(event);
1218 if (hdev)
1219 hdr->index = cpu_to_le16(hdev->id);
1220 else
1221 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1222 hdr->len = cpu_to_le16(data_len);
1224 if (data)
1225 memcpy(skb_put(skb, data_len), data, data_len);
1227 /* Time stamp */
1228 __net_timestamp(skb);
1230 hci_send_to_control(skb, skip_sk);
1231 kfree_skb(skb);
1233 return 0;
1236 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1238 __le32 ev;
1240 ev = cpu_to_le32(get_current_settings(hdev));
1242 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1245 struct cmd_lookup {
1246 struct sock *sk;
1247 struct hci_dev *hdev;
1248 u8 mgmt_status;
1251 static void settings_rsp(struct pending_cmd *cmd, void *data)
1253 struct cmd_lookup *match = data;
1255 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1257 list_del(&cmd->list);
1259 if (match->sk == NULL) {
1260 match->sk = cmd->sk;
1261 sock_hold(match->sk);
1264 mgmt_pending_free(cmd);
1267 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1269 u8 *status = data;
1271 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1272 mgmt_pending_remove(cmd);
1275 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1277 if (!lmp_bredr_capable(hdev))
1278 return MGMT_STATUS_NOT_SUPPORTED;
1279 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1280 return MGMT_STATUS_REJECTED;
1281 else
1282 return MGMT_STATUS_SUCCESS;
1285 static u8 mgmt_le_support(struct hci_dev *hdev)
1287 if (!lmp_le_capable(hdev))
1288 return MGMT_STATUS_NOT_SUPPORTED;
1289 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1290 return MGMT_STATUS_REJECTED;
1291 else
1292 return MGMT_STATUS_SUCCESS;
1295 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1297 struct pending_cmd *cmd;
1298 struct mgmt_mode *cp;
1299 struct hci_request req;
1300 bool changed;
1302 BT_DBG("status 0x%02x", status);
1304 hci_dev_lock(hdev);
1306 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1307 if (!cmd)
1308 goto unlock;
1310 if (status) {
1311 u8 mgmt_err = mgmt_status(status);
1312 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1313 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1314 goto remove_cmd;
1317 cp = cmd->param;
1318 if (cp->val) {
1319 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1320 &hdev->dev_flags);
1322 if (hdev->discov_timeout > 0) {
1323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1324 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1325 to);
1327 } else {
1328 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1329 &hdev->dev_flags);
1332 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1334 if (changed)
1335 new_settings(hdev, cmd->sk);
1337 /* When the discoverable mode gets changed, make sure
1338 * that class of device has the limited discoverable
1339 * bit correctly set.
1341 hci_req_init(&req, hdev);
1342 update_class(&req);
1343 hci_req_run(&req, NULL);
1345 remove_cmd:
1346 mgmt_pending_remove(cmd);
1348 unlock:
1349 hci_dev_unlock(hdev);
1352 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1353 u16 len)
1355 struct mgmt_cp_set_discoverable *cp = data;
1356 struct pending_cmd *cmd;
1357 struct hci_request req;
1358 u16 timeout;
1359 u8 scan;
1360 int err;
1362 BT_DBG("request for %s", hdev->name);
1364 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1365 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1366 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1367 MGMT_STATUS_REJECTED);
1369 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1370 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1371 MGMT_STATUS_INVALID_PARAMS);
1373 timeout = __le16_to_cpu(cp->timeout);
1375 /* Disabling discoverable requires that no timeout is set,
1376 * and enabling limited discoverable requires a timeout.
1378 if ((cp->val == 0x00 && timeout > 0) ||
1379 (cp->val == 0x02 && timeout == 0))
1380 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 MGMT_STATUS_INVALID_PARAMS);
1383 hci_dev_lock(hdev);
1385 if (!hdev_is_powered(hdev) && timeout > 0) {
1386 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1387 MGMT_STATUS_NOT_POWERED);
1388 goto failed;
1391 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1392 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1393 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 MGMT_STATUS_BUSY);
1395 goto failed;
1398 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1399 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 MGMT_STATUS_REJECTED);
1401 goto failed;
1404 if (!hdev_is_powered(hdev)) {
1405 bool changed = false;
1407 /* Setting limited discoverable when powered off is
1408 * not a valid operation since it requires a timeout
1409 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1411 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1412 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1413 changed = true;
1416 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 if (err < 0)
1418 goto failed;
1420 if (changed)
1421 err = new_settings(hdev, sk);
1423 goto failed;
1426 /* If the current mode is the same, then just update the timeout
1427 * value with the new value. And if only the timeout gets updated,
1428 * then no need for any HCI transactions.
1430 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1431 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1432 &hdev->dev_flags)) {
1433 cancel_delayed_work(&hdev->discov_off);
1434 hdev->discov_timeout = timeout;
1436 if (cp->val && hdev->discov_timeout > 0) {
1437 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1438 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1439 to);
1442 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1443 goto failed;
1446 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1447 if (!cmd) {
1448 err = -ENOMEM;
1449 goto failed;
1452 /* Cancel any potential discoverable timeout that might be
1453 * still active and store new timeout value. The arming of
1454 * the timeout happens in the complete handler.
1456 cancel_delayed_work(&hdev->discov_off);
1457 hdev->discov_timeout = timeout;
1459 /* Limited discoverable mode */
1460 if (cp->val == 0x02)
1461 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1462 else
1463 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1465 hci_req_init(&req, hdev);
1467 /* The procedure for LE-only controllers is much simpler - just
1468 * update the advertising data.
1470 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1471 goto update_ad;
1473 scan = SCAN_PAGE;
1475 if (cp->val) {
1476 struct hci_cp_write_current_iac_lap hci_cp;
1478 if (cp->val == 0x02) {
1479 /* Limited discoverable mode */
1480 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1481 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1482 hci_cp.iac_lap[1] = 0x8b;
1483 hci_cp.iac_lap[2] = 0x9e;
1484 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1485 hci_cp.iac_lap[4] = 0x8b;
1486 hci_cp.iac_lap[5] = 0x9e;
1487 } else {
1488 /* General discoverable mode */
1489 hci_cp.num_iac = 1;
1490 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1491 hci_cp.iac_lap[1] = 0x8b;
1492 hci_cp.iac_lap[2] = 0x9e;
1495 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1496 (hci_cp.num_iac * 3) + 1, &hci_cp);
1498 scan |= SCAN_INQUIRY;
1499 } else {
1500 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1503 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1505 update_ad:
1506 update_adv_data(&req);
1508 err = hci_req_run(&req, set_discoverable_complete);
1509 if (err < 0)
1510 mgmt_pending_remove(cmd);
1512 failed:
1513 hci_dev_unlock(hdev);
1514 return err;
1517 static void write_fast_connectable(struct hci_request *req, bool enable)
1519 struct hci_dev *hdev = req->hdev;
1520 struct hci_cp_write_page_scan_activity acp;
1521 u8 type;
1523 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1524 return;
1526 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1527 return;
1529 if (enable) {
1530 type = PAGE_SCAN_TYPE_INTERLACED;
1532 /* 160 msec page scan interval */
1533 acp.interval = cpu_to_le16(0x0100);
1534 } else {
1535 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1537 /* default 1.28 sec page scan */
1538 acp.interval = cpu_to_le16(0x0800);
1541 acp.window = cpu_to_le16(0x0012);
1543 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1544 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1545 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1546 sizeof(acp), &acp);
1548 if (hdev->page_scan_type != type)
1549 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1552 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1554 struct pending_cmd *cmd;
1555 struct mgmt_mode *cp;
1556 bool changed;
1558 BT_DBG("status 0x%02x", status);
1560 hci_dev_lock(hdev);
1562 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1563 if (!cmd)
1564 goto unlock;
1566 if (status) {
1567 u8 mgmt_err = mgmt_status(status);
1568 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1569 goto remove_cmd;
1572 cp = cmd->param;
1573 if (cp->val)
1574 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1575 else
1576 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1578 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1580 if (changed)
1581 new_settings(hdev, cmd->sk);
1583 remove_cmd:
1584 mgmt_pending_remove(cmd);
1586 unlock:
1587 hci_dev_unlock(hdev);
1590 static int set_connectable_update_settings(struct hci_dev *hdev,
1591 struct sock *sk, u8 val)
1593 bool changed = false;
1594 int err;
1596 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1597 changed = true;
1599 if (val) {
1600 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1601 } else {
1602 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1603 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1606 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1607 if (err < 0)
1608 return err;
1610 if (changed)
1611 return new_settings(hdev, sk);
1613 return 0;
1616 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1617 u16 len)
1619 struct mgmt_mode *cp = data;
1620 struct pending_cmd *cmd;
1621 struct hci_request req;
1622 u8 scan;
1623 int err;
1625 BT_DBG("request for %s", hdev->name);
1627 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1628 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1629 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1630 MGMT_STATUS_REJECTED);
1632 if (cp->val != 0x00 && cp->val != 0x01)
1633 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1634 MGMT_STATUS_INVALID_PARAMS);
1636 hci_dev_lock(hdev);
1638 if (!hdev_is_powered(hdev)) {
1639 err = set_connectable_update_settings(hdev, sk, cp->val);
1640 goto failed;
1643 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1644 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1645 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1646 MGMT_STATUS_BUSY);
1647 goto failed;
1650 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1651 if (!cmd) {
1652 err = -ENOMEM;
1653 goto failed;
1656 hci_req_init(&req, hdev);
1658 /* If BR/EDR is not enabled and we disable advertising as a
1659 * by-product of disabling connectable, we need to update the
1660 * advertising flags.
1662 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1663 if (!cp->val) {
1664 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1665 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1667 update_adv_data(&req);
1668 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1669 if (cp->val) {
1670 scan = SCAN_PAGE;
1671 } else {
1672 scan = 0;
1674 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1675 hdev->discov_timeout > 0)
1676 cancel_delayed_work(&hdev->discov_off);
1679 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1682 /* If we're going from non-connectable to connectable or
1683 * vice-versa when fast connectable is enabled ensure that fast
1684 * connectable gets disabled. write_fast_connectable won't do
1685 * anything if the page scan parameters are already what they
1686 * should be.
1688 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1689 write_fast_connectable(&req, false);
1691 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1692 hci_conn_num(hdev, LE_LINK) == 0) {
1693 disable_advertising(&req);
1694 enable_advertising(&req);
1697 err = hci_req_run(&req, set_connectable_complete);
1698 if (err < 0) {
1699 mgmt_pending_remove(cmd);
1700 if (err == -ENODATA)
1701 err = set_connectable_update_settings(hdev, sk,
1702 cp->val);
1703 goto failed;
1706 failed:
1707 hci_dev_unlock(hdev);
1708 return err;
1711 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1712 u16 len)
1714 struct mgmt_mode *cp = data;
1715 bool changed;
1716 int err;
1718 BT_DBG("request for %s", hdev->name);
1720 if (cp->val != 0x00 && cp->val != 0x01)
1721 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1722 MGMT_STATUS_INVALID_PARAMS);
1724 hci_dev_lock(hdev);
1726 if (cp->val)
1727 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1728 else
1729 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1731 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1732 if (err < 0)
1733 goto unlock;
1735 if (changed)
1736 err = new_settings(hdev, sk);
1738 unlock:
1739 hci_dev_unlock(hdev);
1740 return err;
1743 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1744 u16 len)
1746 struct mgmt_mode *cp = data;
1747 struct pending_cmd *cmd;
1748 u8 val, status;
1749 int err;
1751 BT_DBG("request for %s", hdev->name);
1753 status = mgmt_bredr_support(hdev);
1754 if (status)
1755 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1756 status);
1758 if (cp->val != 0x00 && cp->val != 0x01)
1759 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1760 MGMT_STATUS_INVALID_PARAMS);
1762 hci_dev_lock(hdev);
1764 if (!hdev_is_powered(hdev)) {
1765 bool changed = false;
1767 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1768 &hdev->dev_flags)) {
1769 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1770 changed = true;
1773 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1774 if (err < 0)
1775 goto failed;
1777 if (changed)
1778 err = new_settings(hdev, sk);
1780 goto failed;
1783 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1784 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1785 MGMT_STATUS_BUSY);
1786 goto failed;
1789 val = !!cp->val;
1791 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1792 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1793 goto failed;
1796 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1797 if (!cmd) {
1798 err = -ENOMEM;
1799 goto failed;
1802 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1803 if (err < 0) {
1804 mgmt_pending_remove(cmd);
1805 goto failed;
1808 failed:
1809 hci_dev_unlock(hdev);
1810 return err;
1813 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1815 struct mgmt_mode *cp = data;
1816 struct pending_cmd *cmd;
1817 u8 status;
1818 int err;
1820 BT_DBG("request for %s", hdev->name);
1822 status = mgmt_bredr_support(hdev);
1823 if (status)
1824 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1826 if (!lmp_ssp_capable(hdev))
1827 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1828 MGMT_STATUS_NOT_SUPPORTED);
1830 if (cp->val != 0x00 && cp->val != 0x01)
1831 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1832 MGMT_STATUS_INVALID_PARAMS);
1834 hci_dev_lock(hdev);
1836 if (!hdev_is_powered(hdev)) {
1837 bool changed;
1839 if (cp->val) {
1840 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1841 &hdev->dev_flags);
1842 } else {
1843 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1844 &hdev->dev_flags);
1845 if (!changed)
1846 changed = test_and_clear_bit(HCI_HS_ENABLED,
1847 &hdev->dev_flags);
1848 else
1849 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1852 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1853 if (err < 0)
1854 goto failed;
1856 if (changed)
1857 err = new_settings(hdev, sk);
1859 goto failed;
1862 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1863 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1864 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1865 MGMT_STATUS_BUSY);
1866 goto failed;
1869 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1870 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1871 goto failed;
1874 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1875 if (!cmd) {
1876 err = -ENOMEM;
1877 goto failed;
1880 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1881 if (err < 0) {
1882 mgmt_pending_remove(cmd);
1883 goto failed;
1886 failed:
1887 hci_dev_unlock(hdev);
1888 return err;
1891 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1893 struct mgmt_mode *cp = data;
1894 bool changed;
1895 u8 status;
1896 int err;
1898 BT_DBG("request for %s", hdev->name);
1900 status = mgmt_bredr_support(hdev);
1901 if (status)
1902 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1904 if (!lmp_ssp_capable(hdev))
1905 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1906 MGMT_STATUS_NOT_SUPPORTED);
1908 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1909 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1910 MGMT_STATUS_REJECTED);
1912 if (cp->val != 0x00 && cp->val != 0x01)
1913 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1914 MGMT_STATUS_INVALID_PARAMS);
1916 hci_dev_lock(hdev);
1918 if (cp->val) {
1919 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1920 } else {
1921 if (hdev_is_powered(hdev)) {
1922 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1923 MGMT_STATUS_REJECTED);
1924 goto unlock;
1927 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1930 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1931 if (err < 0)
1932 goto unlock;
1934 if (changed)
1935 err = new_settings(hdev, sk);
1937 unlock:
1938 hci_dev_unlock(hdev);
1939 return err;
1942 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1944 struct cmd_lookup match = { NULL, hdev };
1946 if (status) {
1947 u8 mgmt_err = mgmt_status(status);
1949 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1950 &mgmt_err);
1951 return;
1954 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1956 new_settings(hdev, match.sk);
1958 if (match.sk)
1959 sock_put(match.sk);
1961 /* Make sure the controller has a good default for
1962 * advertising data. Restrict the update to when LE
1963 * has actually been enabled. During power on, the
1964 * update in powered_update_hci will take care of it.
1966 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1967 struct hci_request req;
1969 hci_dev_lock(hdev);
1971 hci_req_init(&req, hdev);
1972 update_adv_data(&req);
1973 update_scan_rsp_data(&req);
1974 hci_req_run(&req, NULL);
1976 hci_dev_unlock(hdev);
1980 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1982 struct mgmt_mode *cp = data;
1983 struct hci_cp_write_le_host_supported hci_cp;
1984 struct pending_cmd *cmd;
1985 struct hci_request req;
1986 int err;
1987 u8 val, enabled;
1989 BT_DBG("request for %s", hdev->name);
1991 if (!lmp_le_capable(hdev))
1992 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 MGMT_STATUS_NOT_SUPPORTED);
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1997 MGMT_STATUS_INVALID_PARAMS);
1999 /* LE-only devices do not allow toggling LE on/off */
2000 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2001 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2002 MGMT_STATUS_REJECTED);
2004 hci_dev_lock(hdev);
2006 val = !!cp->val;
2007 enabled = lmp_host_le_capable(hdev);
2009 if (!hdev_is_powered(hdev) || val == enabled) {
2010 bool changed = false;
2012 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2013 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2014 changed = true;
2017 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2018 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2019 changed = true;
2022 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2023 if (err < 0)
2024 goto unlock;
2026 if (changed)
2027 err = new_settings(hdev, sk);
2029 goto unlock;
2032 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2033 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2034 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2035 MGMT_STATUS_BUSY);
2036 goto unlock;
2039 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2040 if (!cmd) {
2041 err = -ENOMEM;
2042 goto unlock;
2045 hci_req_init(&req, hdev);
2047 memset(&hci_cp, 0, sizeof(hci_cp));
2049 if (val) {
2050 hci_cp.le = val;
2051 hci_cp.simul = lmp_le_br_capable(hdev);
2052 } else {
2053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2054 disable_advertising(&req);
2057 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2058 &hci_cp);
2060 err = hci_req_run(&req, le_enable_complete);
2061 if (err < 0)
2062 mgmt_pending_remove(cmd);
2064 unlock:
2065 hci_dev_unlock(hdev);
2066 return err;
2069 /* This is a helper function to test for pending mgmt commands that can
2070 * cause CoD or EIR HCI commands. We can only allow one such pending
2071 * mgmt command at a time since otherwise we cannot easily track what
2072 * the current values are, will be, and based on that calculate if a new
2073 * HCI command needs to be sent and if yes with what value.
2075 static bool pending_eir_or_class(struct hci_dev *hdev)
2077 struct pending_cmd *cmd;
2079 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2080 switch (cmd->opcode) {
2081 case MGMT_OP_ADD_UUID:
2082 case MGMT_OP_REMOVE_UUID:
2083 case MGMT_OP_SET_DEV_CLASS:
2084 case MGMT_OP_SET_POWERED:
2085 return true;
2089 return false;
2092 static const u8 bluetooth_base_uuid[] = {
2093 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2094 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2097 static u8 get_uuid_size(const u8 *uuid)
2099 u32 val;
2101 if (memcmp(uuid, bluetooth_base_uuid, 12))
2102 return 128;
2104 val = get_unaligned_le32(&uuid[12]);
2105 if (val > 0xffff)
2106 return 32;
2108 return 16;
2111 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2113 struct pending_cmd *cmd;
2115 hci_dev_lock(hdev);
2117 cmd = mgmt_pending_find(mgmt_op, hdev);
2118 if (!cmd)
2119 goto unlock;
2121 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2122 hdev->dev_class, 3);
2124 mgmt_pending_remove(cmd);
2126 unlock:
2127 hci_dev_unlock(hdev);
2130 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2132 BT_DBG("status 0x%02x", status);
2134 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2137 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2139 struct mgmt_cp_add_uuid *cp = data;
2140 struct pending_cmd *cmd;
2141 struct hci_request req;
2142 struct bt_uuid *uuid;
2143 int err;
2145 BT_DBG("request for %s", hdev->name);
2147 hci_dev_lock(hdev);
2149 if (pending_eir_or_class(hdev)) {
2150 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2151 MGMT_STATUS_BUSY);
2152 goto failed;
2155 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2156 if (!uuid) {
2157 err = -ENOMEM;
2158 goto failed;
2161 memcpy(uuid->uuid, cp->uuid, 16);
2162 uuid->svc_hint = cp->svc_hint;
2163 uuid->size = get_uuid_size(cp->uuid);
2165 list_add_tail(&uuid->list, &hdev->uuids);
2167 hci_req_init(&req, hdev);
2169 update_class(&req);
2170 update_eir(&req);
2172 err = hci_req_run(&req, add_uuid_complete);
2173 if (err < 0) {
2174 if (err != -ENODATA)
2175 goto failed;
2177 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2178 hdev->dev_class, 3);
2179 goto failed;
2182 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2183 if (!cmd) {
2184 err = -ENOMEM;
2185 goto failed;
2188 err = 0;
2190 failed:
2191 hci_dev_unlock(hdev);
2192 return err;
2195 static bool enable_service_cache(struct hci_dev *hdev)
2197 if (!hdev_is_powered(hdev))
2198 return false;
2200 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2201 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2202 CACHE_TIMEOUT);
2203 return true;
2206 return false;
2209 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2211 BT_DBG("status 0x%02x", status);
2213 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2216 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2217 u16 len)
2219 struct mgmt_cp_remove_uuid *cp = data;
2220 struct pending_cmd *cmd;
2221 struct bt_uuid *match, *tmp;
2222 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2223 struct hci_request req;
2224 int err, found;
2226 BT_DBG("request for %s", hdev->name);
2228 hci_dev_lock(hdev);
2230 if (pending_eir_or_class(hdev)) {
2231 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2232 MGMT_STATUS_BUSY);
2233 goto unlock;
2236 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2237 hci_uuids_clear(hdev);
2239 if (enable_service_cache(hdev)) {
2240 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2241 0, hdev->dev_class, 3);
2242 goto unlock;
2245 goto update_class;
2248 found = 0;
2250 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2251 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2252 continue;
2254 list_del(&match->list);
2255 kfree(match);
2256 found++;
2259 if (found == 0) {
2260 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2261 MGMT_STATUS_INVALID_PARAMS);
2262 goto unlock;
2265 update_class:
2266 hci_req_init(&req, hdev);
2268 update_class(&req);
2269 update_eir(&req);
2271 err = hci_req_run(&req, remove_uuid_complete);
2272 if (err < 0) {
2273 if (err != -ENODATA)
2274 goto unlock;
2276 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2277 hdev->dev_class, 3);
2278 goto unlock;
2281 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2282 if (!cmd) {
2283 err = -ENOMEM;
2284 goto unlock;
2287 err = 0;
2289 unlock:
2290 hci_dev_unlock(hdev);
2291 return err;
2294 static void set_class_complete(struct hci_dev *hdev, u8 status)
2296 BT_DBG("status 0x%02x", status);
2298 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2301 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2302 u16 len)
2304 struct mgmt_cp_set_dev_class *cp = data;
2305 struct pending_cmd *cmd;
2306 struct hci_request req;
2307 int err;
2309 BT_DBG("request for %s", hdev->name);
2311 if (!lmp_bredr_capable(hdev))
2312 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2313 MGMT_STATUS_NOT_SUPPORTED);
2315 hci_dev_lock(hdev);
2317 if (pending_eir_or_class(hdev)) {
2318 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2319 MGMT_STATUS_BUSY);
2320 goto unlock;
2323 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2324 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2325 MGMT_STATUS_INVALID_PARAMS);
2326 goto unlock;
2329 hdev->major_class = cp->major;
2330 hdev->minor_class = cp->minor;
2332 if (!hdev_is_powered(hdev)) {
2333 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2334 hdev->dev_class, 3);
2335 goto unlock;
2338 hci_req_init(&req, hdev);
2340 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2341 hci_dev_unlock(hdev);
2342 cancel_delayed_work_sync(&hdev->service_cache);
2343 hci_dev_lock(hdev);
2344 update_eir(&req);
2347 update_class(&req);
2349 err = hci_req_run(&req, set_class_complete);
2350 if (err < 0) {
2351 if (err != -ENODATA)
2352 goto unlock;
2354 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2355 hdev->dev_class, 3);
2356 goto unlock;
2359 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2360 if (!cmd) {
2361 err = -ENOMEM;
2362 goto unlock;
2365 err = 0;
2367 unlock:
2368 hci_dev_unlock(hdev);
2369 return err;
2372 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2373 u16 len)
2375 struct mgmt_cp_load_link_keys *cp = data;
2376 u16 key_count, expected_len;
2377 bool changed;
2378 int i;
2380 BT_DBG("request for %s", hdev->name);
2382 if (!lmp_bredr_capable(hdev))
2383 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2384 MGMT_STATUS_NOT_SUPPORTED);
2386 key_count = __le16_to_cpu(cp->key_count);
2388 expected_len = sizeof(*cp) + key_count *
2389 sizeof(struct mgmt_link_key_info);
2390 if (expected_len != len) {
2391 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2392 expected_len, len);
2393 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2394 MGMT_STATUS_INVALID_PARAMS);
2397 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2398 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2399 MGMT_STATUS_INVALID_PARAMS);
2401 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2402 key_count);
2404 for (i = 0; i < key_count; i++) {
2405 struct mgmt_link_key_info *key = &cp->keys[i];
2407 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2408 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2409 MGMT_STATUS_INVALID_PARAMS);
2412 hci_dev_lock(hdev);
2414 hci_link_keys_clear(hdev);
2416 if (cp->debug_keys)
2417 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2418 else
2419 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2421 if (changed)
2422 new_settings(hdev, NULL);
2424 for (i = 0; i < key_count; i++) {
2425 struct mgmt_link_key_info *key = &cp->keys[i];
2427 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2428 key->type, key->pin_len);
2431 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2433 hci_dev_unlock(hdev);
2435 return 0;
2438 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2439 u8 addr_type, struct sock *skip_sk)
2441 struct mgmt_ev_device_unpaired ev;
2443 bacpy(&ev.addr.bdaddr, bdaddr);
2444 ev.addr.type = addr_type;
2446 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2447 skip_sk);
2450 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2451 u16 len)
2453 struct mgmt_cp_unpair_device *cp = data;
2454 struct mgmt_rp_unpair_device rp;
2455 struct hci_cp_disconnect dc;
2456 struct pending_cmd *cmd;
2457 struct hci_conn *conn;
2458 int err;
2460 memset(&rp, 0, sizeof(rp));
2461 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2462 rp.addr.type = cp->addr.type;
2464 if (!bdaddr_type_is_valid(cp->addr.type))
2465 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_INVALID_PARAMS,
2467 &rp, sizeof(rp));
2469 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2470 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2471 MGMT_STATUS_INVALID_PARAMS,
2472 &rp, sizeof(rp));
2474 hci_dev_lock(hdev);
2476 if (!hdev_is_powered(hdev)) {
2477 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2478 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2479 goto unlock;
2482 if (cp->addr.type == BDADDR_BREDR) {
2483 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2484 } else {
2485 u8 addr_type;
2487 if (cp->addr.type == BDADDR_LE_PUBLIC)
2488 addr_type = ADDR_LE_DEV_PUBLIC;
2489 else
2490 addr_type = ADDR_LE_DEV_RANDOM;
2492 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2494 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2496 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2499 if (err < 0) {
2500 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2501 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2502 goto unlock;
2505 if (cp->disconnect) {
2506 if (cp->addr.type == BDADDR_BREDR)
2507 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2508 &cp->addr.bdaddr);
2509 else
2510 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2511 &cp->addr.bdaddr);
2512 } else {
2513 conn = NULL;
2516 if (!conn) {
2517 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2518 &rp, sizeof(rp));
2519 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2520 goto unlock;
2523 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2524 sizeof(*cp));
2525 if (!cmd) {
2526 err = -ENOMEM;
2527 goto unlock;
2530 dc.handle = cpu_to_le16(conn->handle);
2531 dc.reason = 0x13; /* Remote User Terminated Connection */
2532 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2533 if (err < 0)
2534 mgmt_pending_remove(cmd);
2536 unlock:
2537 hci_dev_unlock(hdev);
2538 return err;
2541 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2542 u16 len)
2544 struct mgmt_cp_disconnect *cp = data;
2545 struct mgmt_rp_disconnect rp;
2546 struct hci_cp_disconnect dc;
2547 struct pending_cmd *cmd;
2548 struct hci_conn *conn;
2549 int err;
2551 BT_DBG("");
2553 memset(&rp, 0, sizeof(rp));
2554 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2555 rp.addr.type = cp->addr.type;
2557 if (!bdaddr_type_is_valid(cp->addr.type))
2558 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2559 MGMT_STATUS_INVALID_PARAMS,
2560 &rp, sizeof(rp));
2562 hci_dev_lock(hdev);
2564 if (!test_bit(HCI_UP, &hdev->flags)) {
2565 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2566 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2567 goto failed;
2570 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2571 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2572 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2573 goto failed;
2576 if (cp->addr.type == BDADDR_BREDR)
2577 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2578 &cp->addr.bdaddr);
2579 else
2580 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2582 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2583 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2585 goto failed;
2588 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2589 if (!cmd) {
2590 err = -ENOMEM;
2591 goto failed;
2594 dc.handle = cpu_to_le16(conn->handle);
2595 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2597 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2598 if (err < 0)
2599 mgmt_pending_remove(cmd);
2601 failed:
2602 hci_dev_unlock(hdev);
2603 return err;
2606 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2608 switch (link_type) {
2609 case LE_LINK:
2610 switch (addr_type) {
2611 case ADDR_LE_DEV_PUBLIC:
2612 return BDADDR_LE_PUBLIC;
2614 default:
2615 /* Fallback to LE Random address type */
2616 return BDADDR_LE_RANDOM;
2619 default:
2620 /* Fallback to BR/EDR type */
2621 return BDADDR_BREDR;
2625 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2626 u16 data_len)
2628 struct mgmt_rp_get_connections *rp;
2629 struct hci_conn *c;
2630 size_t rp_len;
2631 int err;
2632 u16 i;
2634 BT_DBG("");
2636 hci_dev_lock(hdev);
2638 if (!hdev_is_powered(hdev)) {
2639 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2640 MGMT_STATUS_NOT_POWERED);
2641 goto unlock;
2644 i = 0;
2645 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2646 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2647 i++;
2650 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2651 rp = kmalloc(rp_len, GFP_KERNEL);
2652 if (!rp) {
2653 err = -ENOMEM;
2654 goto unlock;
2657 i = 0;
2658 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2659 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2660 continue;
2661 bacpy(&rp->addr[i].bdaddr, &c->dst);
2662 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2663 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2664 continue;
2665 i++;
2668 rp->conn_count = cpu_to_le16(i);
2670 /* Recalculate length in case of filtered SCO connections, etc */
2671 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2673 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2674 rp_len);
2676 kfree(rp);
2678 unlock:
2679 hci_dev_unlock(hdev);
2680 return err;
2683 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2684 struct mgmt_cp_pin_code_neg_reply *cp)
2686 struct pending_cmd *cmd;
2687 int err;
2689 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2690 sizeof(*cp));
2691 if (!cmd)
2692 return -ENOMEM;
2694 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2695 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2696 if (err < 0)
2697 mgmt_pending_remove(cmd);
2699 return err;
2702 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2703 u16 len)
2705 struct hci_conn *conn;
2706 struct mgmt_cp_pin_code_reply *cp = data;
2707 struct hci_cp_pin_code_reply reply;
2708 struct pending_cmd *cmd;
2709 int err;
2711 BT_DBG("");
2713 hci_dev_lock(hdev);
2715 if (!hdev_is_powered(hdev)) {
2716 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2717 MGMT_STATUS_NOT_POWERED);
2718 goto failed;
2721 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2722 if (!conn) {
2723 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2724 MGMT_STATUS_NOT_CONNECTED);
2725 goto failed;
2728 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2729 struct mgmt_cp_pin_code_neg_reply ncp;
2731 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2733 BT_ERR("PIN code is not 16 bytes long");
2735 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2736 if (err >= 0)
2737 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 MGMT_STATUS_INVALID_PARAMS);
2740 goto failed;
2743 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2744 if (!cmd) {
2745 err = -ENOMEM;
2746 goto failed;
2749 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2750 reply.pin_len = cp->pin_len;
2751 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2753 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2754 if (err < 0)
2755 mgmt_pending_remove(cmd);
2757 failed:
2758 hci_dev_unlock(hdev);
2759 return err;
2762 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2763 u16 len)
2765 struct mgmt_cp_set_io_capability *cp = data;
2767 BT_DBG("");
2769 hci_dev_lock(hdev);
2771 hdev->io_capability = cp->io_capability;
2773 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2774 hdev->io_capability);
2776 hci_dev_unlock(hdev);
2778 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2782 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2784 struct hci_dev *hdev = conn->hdev;
2785 struct pending_cmd *cmd;
2787 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2788 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2789 continue;
2791 if (cmd->user_data != conn)
2792 continue;
2794 return cmd;
2797 return NULL;
2800 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2802 struct mgmt_rp_pair_device rp;
2803 struct hci_conn *conn = cmd->user_data;
2805 bacpy(&rp.addr.bdaddr, &conn->dst);
2806 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2808 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2809 &rp, sizeof(rp));
2811 /* So we don't get further callbacks for this connection */
2812 conn->connect_cfm_cb = NULL;
2813 conn->security_cfm_cb = NULL;
2814 conn->disconn_cfm_cb = NULL;
2816 hci_conn_drop(conn);
2818 mgmt_pending_remove(cmd);
2821 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2823 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2824 struct pending_cmd *cmd;
2826 cmd = find_pairing(conn);
2827 if (cmd)
2828 pairing_complete(cmd, status);
2831 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2833 struct pending_cmd *cmd;
2835 BT_DBG("status %u", status);
2837 cmd = find_pairing(conn);
2838 if (!cmd)
2839 BT_DBG("Unable to find a pending command");
2840 else
2841 pairing_complete(cmd, mgmt_status(status));
2844 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2846 struct pending_cmd *cmd;
2848 BT_DBG("status %u", status);
2850 if (!status)
2851 return;
2853 cmd = find_pairing(conn);
2854 if (!cmd)
2855 BT_DBG("Unable to find a pending command");
2856 else
2857 pairing_complete(cmd, mgmt_status(status));
2860 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2861 u16 len)
2863 struct mgmt_cp_pair_device *cp = data;
2864 struct mgmt_rp_pair_device rp;
2865 struct pending_cmd *cmd;
2866 u8 sec_level, auth_type;
2867 struct hci_conn *conn;
2868 int err;
2870 BT_DBG("");
2872 memset(&rp, 0, sizeof(rp));
2873 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2874 rp.addr.type = cp->addr.type;
2876 if (!bdaddr_type_is_valid(cp->addr.type))
2877 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2878 MGMT_STATUS_INVALID_PARAMS,
2879 &rp, sizeof(rp));
2881 hci_dev_lock(hdev);
2883 if (!hdev_is_powered(hdev)) {
2884 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2885 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2886 goto unlock;
2889 sec_level = BT_SECURITY_MEDIUM;
2890 auth_type = HCI_AT_DEDICATED_BONDING;
2892 if (cp->addr.type == BDADDR_BREDR) {
2893 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2894 auth_type);
2895 } else {
2896 u8 addr_type;
2898 /* Convert from L2CAP channel address type to HCI address type
2900 if (cp->addr.type == BDADDR_LE_PUBLIC)
2901 addr_type = ADDR_LE_DEV_PUBLIC;
2902 else
2903 addr_type = ADDR_LE_DEV_RANDOM;
2905 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2906 sec_level, auth_type);
2909 if (IS_ERR(conn)) {
2910 int status;
2912 if (PTR_ERR(conn) == -EBUSY)
2913 status = MGMT_STATUS_BUSY;
2914 else
2915 status = MGMT_STATUS_CONNECT_FAILED;
2917 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2918 status, &rp,
2919 sizeof(rp));
2920 goto unlock;
2923 if (conn->connect_cfm_cb) {
2924 hci_conn_drop(conn);
2925 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2927 goto unlock;
2930 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2931 if (!cmd) {
2932 err = -ENOMEM;
2933 hci_conn_drop(conn);
2934 goto unlock;
2937 /* For LE, just connecting isn't a proof that the pairing finished */
2938 if (cp->addr.type == BDADDR_BREDR) {
2939 conn->connect_cfm_cb = pairing_complete_cb;
2940 conn->security_cfm_cb = pairing_complete_cb;
2941 conn->disconn_cfm_cb = pairing_complete_cb;
2942 } else {
2943 conn->connect_cfm_cb = le_pairing_complete_cb;
2944 conn->security_cfm_cb = le_pairing_complete_cb;
2945 conn->disconn_cfm_cb = le_pairing_complete_cb;
2948 conn->io_capability = cp->io_cap;
2949 cmd->user_data = conn;
2951 if (conn->state == BT_CONNECTED &&
2952 hci_conn_security(conn, sec_level, auth_type))
2953 pairing_complete(cmd, 0);
2955 err = 0;
2957 unlock:
2958 hci_dev_unlock(hdev);
2959 return err;
2962 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2963 u16 len)
2965 struct mgmt_addr_info *addr = data;
2966 struct pending_cmd *cmd;
2967 struct hci_conn *conn;
2968 int err;
2970 BT_DBG("");
2972 hci_dev_lock(hdev);
2974 if (!hdev_is_powered(hdev)) {
2975 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2976 MGMT_STATUS_NOT_POWERED);
2977 goto unlock;
2980 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2981 if (!cmd) {
2982 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2983 MGMT_STATUS_INVALID_PARAMS);
2984 goto unlock;
2987 conn = cmd->user_data;
2989 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2990 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2991 MGMT_STATUS_INVALID_PARAMS);
2992 goto unlock;
2995 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2997 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2998 addr, sizeof(*addr));
2999 unlock:
3000 hci_dev_unlock(hdev);
3001 return err;
3004 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3005 struct mgmt_addr_info *addr, u16 mgmt_op,
3006 u16 hci_op, __le32 passkey)
3008 struct pending_cmd *cmd;
3009 struct hci_conn *conn;
3010 int err;
3012 hci_dev_lock(hdev);
3014 if (!hdev_is_powered(hdev)) {
3015 err = cmd_complete(sk, hdev->id, mgmt_op,
3016 MGMT_STATUS_NOT_POWERED, addr,
3017 sizeof(*addr));
3018 goto done;
3021 if (addr->type == BDADDR_BREDR)
3022 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3023 else
3024 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3026 if (!conn) {
3027 err = cmd_complete(sk, hdev->id, mgmt_op,
3028 MGMT_STATUS_NOT_CONNECTED, addr,
3029 sizeof(*addr));
3030 goto done;
3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3038 hci_dev_unlock(hdev);
3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3042 if (!err)
3043 err = cmd_complete(sk, hdev->id, mgmt_op,
3044 MGMT_STATUS_SUCCESS, addr,
3045 sizeof(*addr));
3046 else
3047 err = cmd_complete(sk, hdev->id, mgmt_op,
3048 MGMT_STATUS_FAILED, addr,
3049 sizeof(*addr));
3051 goto done;
3054 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3055 if (!cmd) {
3056 err = -ENOMEM;
3057 goto done;
3060 /* Continue with pairing via HCI */
3061 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3062 struct hci_cp_user_passkey_reply cp;
3064 bacpy(&cp.bdaddr, &addr->bdaddr);
3065 cp.passkey = passkey;
3066 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3067 } else
3068 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3069 &addr->bdaddr);
3071 if (err < 0)
3072 mgmt_pending_remove(cmd);
3074 done:
3075 hci_dev_unlock(hdev);
3076 return err;
3079 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3080 void *data, u16 len)
3082 struct mgmt_cp_pin_code_neg_reply *cp = data;
3084 BT_DBG("");
3086 return user_pairing_resp(sk, hdev, &cp->addr,
3087 MGMT_OP_PIN_CODE_NEG_REPLY,
3088 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3091 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3092 u16 len)
3094 struct mgmt_cp_user_confirm_reply *cp = data;
3096 BT_DBG("");
3098 if (len != sizeof(*cp))
3099 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3100 MGMT_STATUS_INVALID_PARAMS);
3102 return user_pairing_resp(sk, hdev, &cp->addr,
3103 MGMT_OP_USER_CONFIRM_REPLY,
3104 HCI_OP_USER_CONFIRM_REPLY, 0);
3107 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3108 void *data, u16 len)
3110 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3112 BT_DBG("");
3114 return user_pairing_resp(sk, hdev, &cp->addr,
3115 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3116 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3119 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3120 u16 len)
3122 struct mgmt_cp_user_passkey_reply *cp = data;
3124 BT_DBG("");
3126 return user_pairing_resp(sk, hdev, &cp->addr,
3127 MGMT_OP_USER_PASSKEY_REPLY,
3128 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3131 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3132 void *data, u16 len)
3134 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3136 BT_DBG("");
3138 return user_pairing_resp(sk, hdev, &cp->addr,
3139 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3140 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3143 static void update_name(struct hci_request *req)
3145 struct hci_dev *hdev = req->hdev;
3146 struct hci_cp_write_local_name cp;
3148 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3150 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3153 static void set_name_complete(struct hci_dev *hdev, u8 status)
3155 struct mgmt_cp_set_local_name *cp;
3156 struct pending_cmd *cmd;
3158 BT_DBG("status 0x%02x", status);
3160 hci_dev_lock(hdev);
3162 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3163 if (!cmd)
3164 goto unlock;
3166 cp = cmd->param;
3168 if (status)
3169 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3170 mgmt_status(status));
3171 else
3172 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3173 cp, sizeof(*cp));
3175 mgmt_pending_remove(cmd);
3177 unlock:
3178 hci_dev_unlock(hdev);
3181 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3182 u16 len)
3184 struct mgmt_cp_set_local_name *cp = data;
3185 struct pending_cmd *cmd;
3186 struct hci_request req;
3187 int err;
3189 BT_DBG("");
3191 hci_dev_lock(hdev);
3193 /* If the old values are the same as the new ones just return a
3194 * direct command complete event.
3196 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3197 !memcmp(hdev->short_name, cp->short_name,
3198 sizeof(hdev->short_name))) {
3199 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3200 data, len);
3201 goto failed;
3204 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3206 if (!hdev_is_powered(hdev)) {
3207 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3209 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3210 data, len);
3211 if (err < 0)
3212 goto failed;
3214 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3215 sk);
3217 goto failed;
3220 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3221 if (!cmd) {
3222 err = -ENOMEM;
3223 goto failed;
3226 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3228 hci_req_init(&req, hdev);
3230 if (lmp_bredr_capable(hdev)) {
3231 update_name(&req);
3232 update_eir(&req);
3235 /* The name is stored in the scan response data and so
3236 * no need to udpate the advertising data here.
3238 if (lmp_le_capable(hdev))
3239 update_scan_rsp_data(&req);
3241 err = hci_req_run(&req, set_name_complete);
3242 if (err < 0)
3243 mgmt_pending_remove(cmd);
3245 failed:
3246 hci_dev_unlock(hdev);
3247 return err;
3250 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3251 void *data, u16 data_len)
3253 struct pending_cmd *cmd;
3254 int err;
3256 BT_DBG("%s", hdev->name);
3258 hci_dev_lock(hdev);
3260 if (!hdev_is_powered(hdev)) {
3261 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3262 MGMT_STATUS_NOT_POWERED);
3263 goto unlock;
3266 if (!lmp_ssp_capable(hdev)) {
3267 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3268 MGMT_STATUS_NOT_SUPPORTED);
3269 goto unlock;
3272 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3273 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3274 MGMT_STATUS_BUSY);
3275 goto unlock;
3278 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3279 if (!cmd) {
3280 err = -ENOMEM;
3281 goto unlock;
3284 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3285 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3286 0, NULL);
3287 else
3288 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3290 if (err < 0)
3291 mgmt_pending_remove(cmd);
3293 unlock:
3294 hci_dev_unlock(hdev);
3295 return err;
3298 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3299 void *data, u16 len)
3301 int err;
3303 BT_DBG("%s ", hdev->name);
3305 hci_dev_lock(hdev);
3307 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3308 struct mgmt_cp_add_remote_oob_data *cp = data;
3309 u8 status;
3311 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3312 cp->hash, cp->randomizer);
3313 if (err < 0)
3314 status = MGMT_STATUS_FAILED;
3315 else
3316 status = MGMT_STATUS_SUCCESS;
3318 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3319 status, &cp->addr, sizeof(cp->addr));
3320 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3321 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3322 u8 status;
3324 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3325 cp->hash192,
3326 cp->randomizer192,
3327 cp->hash256,
3328 cp->randomizer256);
3329 if (err < 0)
3330 status = MGMT_STATUS_FAILED;
3331 else
3332 status = MGMT_STATUS_SUCCESS;
3334 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3335 status, &cp->addr, sizeof(cp->addr));
3336 } else {
3337 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3338 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3339 MGMT_STATUS_INVALID_PARAMS);
3342 hci_dev_unlock(hdev);
3343 return err;
3346 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3347 void *data, u16 len)
3349 struct mgmt_cp_remove_remote_oob_data *cp = data;
3350 u8 status;
3351 int err;
3353 BT_DBG("%s", hdev->name);
3355 hci_dev_lock(hdev);
3357 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3358 if (err < 0)
3359 status = MGMT_STATUS_INVALID_PARAMS;
3360 else
3361 status = MGMT_STATUS_SUCCESS;
3363 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3364 status, &cp->addr, sizeof(cp->addr));
3366 hci_dev_unlock(hdev);
3367 return err;
3370 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3372 struct pending_cmd *cmd;
3373 u8 type;
3374 int err;
3376 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3378 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3379 if (!cmd)
3380 return -ENOENT;
3382 type = hdev->discovery.type;
3384 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3385 &type, sizeof(type));
3386 mgmt_pending_remove(cmd);
3388 return err;
3391 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3393 unsigned long timeout = 0;
3395 BT_DBG("status %d", status);
3397 if (status) {
3398 hci_dev_lock(hdev);
3399 mgmt_start_discovery_failed(hdev, status);
3400 hci_dev_unlock(hdev);
3401 return;
3404 hci_dev_lock(hdev);
3405 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3406 hci_dev_unlock(hdev);
3408 switch (hdev->discovery.type) {
3409 case DISCOV_TYPE_LE:
3410 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3411 break;
3413 case DISCOV_TYPE_INTERLEAVED:
3414 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3415 break;
3417 case DISCOV_TYPE_BREDR:
3418 break;
3420 default:
3421 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3424 if (!timeout)
3425 return;
3427 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3430 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3431 void *data, u16 len)
3433 struct mgmt_cp_start_discovery *cp = data;
3434 struct pending_cmd *cmd;
3435 struct hci_cp_le_set_scan_param param_cp;
3436 struct hci_cp_le_set_scan_enable enable_cp;
3437 struct hci_cp_inquiry inq_cp;
3438 struct hci_request req;
3439 /* General inquiry access code (GIAC) */
3440 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3441 u8 status, own_addr_type;
3442 int err;
3444 BT_DBG("%s", hdev->name);
3446 hci_dev_lock(hdev);
3448 if (!hdev_is_powered(hdev)) {
3449 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3450 MGMT_STATUS_NOT_POWERED);
3451 goto failed;
3454 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3455 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3456 MGMT_STATUS_BUSY);
3457 goto failed;
3460 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3461 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3462 MGMT_STATUS_BUSY);
3463 goto failed;
3466 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3467 if (!cmd) {
3468 err = -ENOMEM;
3469 goto failed;
3472 hdev->discovery.type = cp->type;
3474 hci_req_init(&req, hdev);
3476 switch (hdev->discovery.type) {
3477 case DISCOV_TYPE_BREDR:
3478 status = mgmt_bredr_support(hdev);
3479 if (status) {
3480 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3481 status);
3482 mgmt_pending_remove(cmd);
3483 goto failed;
3486 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3487 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3488 MGMT_STATUS_BUSY);
3489 mgmt_pending_remove(cmd);
3490 goto failed;
3493 hci_inquiry_cache_flush(hdev);
3495 memset(&inq_cp, 0, sizeof(inq_cp));
3496 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3497 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3498 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3499 break;
3501 case DISCOV_TYPE_LE:
3502 case DISCOV_TYPE_INTERLEAVED:
3503 status = mgmt_le_support(hdev);
3504 if (status) {
3505 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3506 status);
3507 mgmt_pending_remove(cmd);
3508 goto failed;
3511 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3512 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3513 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3514 MGMT_STATUS_NOT_SUPPORTED);
3515 mgmt_pending_remove(cmd);
3516 goto failed;
3519 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3520 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3521 MGMT_STATUS_REJECTED);
3522 mgmt_pending_remove(cmd);
3523 goto failed;
3526 /* If controller is scanning, it means the background scanning
3527 * is running. Thus, we should temporarily stop it in order to
3528 * set the discovery scanning parameters.
3530 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3531 hci_req_add_le_scan_disable(&req);
3533 memset(&param_cp, 0, sizeof(param_cp));
3535 /* All active scans will be done with either a resolvable
3536 * private address (when privacy feature has been enabled)
3537 * or unresolvable private address.
3539 err = hci_update_random_address(&req, true, &own_addr_type);
3540 if (err < 0) {
3541 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3542 MGMT_STATUS_FAILED);
3543 mgmt_pending_remove(cmd);
3544 goto failed;
3547 param_cp.type = LE_SCAN_ACTIVE;
3548 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3549 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3550 param_cp.own_address_type = own_addr_type;
3551 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3552 &param_cp);
3554 memset(&enable_cp, 0, sizeof(enable_cp));
3555 enable_cp.enable = LE_SCAN_ENABLE;
3556 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3557 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3558 &enable_cp);
3559 break;
3561 default:
3562 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3563 MGMT_STATUS_INVALID_PARAMS);
3564 mgmt_pending_remove(cmd);
3565 goto failed;
3568 err = hci_req_run(&req, start_discovery_complete);
3569 if (err < 0)
3570 mgmt_pending_remove(cmd);
3571 else
3572 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3574 failed:
3575 hci_dev_unlock(hdev);
3576 return err;
3579 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3581 struct pending_cmd *cmd;
3582 int err;
3584 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3585 if (!cmd)
3586 return -ENOENT;
3588 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3589 &hdev->discovery.type, sizeof(hdev->discovery.type));
3590 mgmt_pending_remove(cmd);
3592 return err;
3595 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3597 BT_DBG("status %d", status);
3599 hci_dev_lock(hdev);
3601 if (status) {
3602 mgmt_stop_discovery_failed(hdev, status);
3603 goto unlock;
3606 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3608 unlock:
3609 hci_dev_unlock(hdev);
3612 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3613 u16 len)
3615 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3616 struct pending_cmd *cmd;
3617 struct hci_request req;
3618 int err;
3620 BT_DBG("%s", hdev->name);
3622 hci_dev_lock(hdev);
3624 if (!hci_discovery_active(hdev)) {
3625 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3626 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3627 sizeof(mgmt_cp->type));
3628 goto unlock;
3631 if (hdev->discovery.type != mgmt_cp->type) {
3632 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3633 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3634 sizeof(mgmt_cp->type));
3635 goto unlock;
3638 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3639 if (!cmd) {
3640 err = -ENOMEM;
3641 goto unlock;
3644 hci_req_init(&req, hdev);
3646 hci_stop_discovery(&req);
3648 err = hci_req_run(&req, stop_discovery_complete);
3649 if (!err) {
3650 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3651 goto unlock;
3654 mgmt_pending_remove(cmd);
3656 /* If no HCI commands were sent we're done */
3657 if (err == -ENODATA) {
3658 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3659 &mgmt_cp->type, sizeof(mgmt_cp->type));
3660 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3663 unlock:
3664 hci_dev_unlock(hdev);
3665 return err;
3668 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3669 u16 len)
3671 struct mgmt_cp_confirm_name *cp = data;
3672 struct inquiry_entry *e;
3673 int err;
3675 BT_DBG("%s", hdev->name);
3677 hci_dev_lock(hdev);
3679 if (!hci_discovery_active(hdev)) {
3680 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3681 MGMT_STATUS_FAILED, &cp->addr,
3682 sizeof(cp->addr));
3683 goto failed;
3686 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3687 if (!e) {
3688 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3689 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3690 sizeof(cp->addr));
3691 goto failed;
3694 if (cp->name_known) {
3695 e->name_state = NAME_KNOWN;
3696 list_del(&e->list);
3697 } else {
3698 e->name_state = NAME_NEEDED;
3699 hci_inquiry_cache_update_resolve(hdev, e);
3702 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3703 sizeof(cp->addr));
3705 failed:
3706 hci_dev_unlock(hdev);
3707 return err;
3710 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3711 u16 len)
3713 struct mgmt_cp_block_device *cp = data;
3714 u8 status;
3715 int err;
3717 BT_DBG("%s", hdev->name);
3719 if (!bdaddr_type_is_valid(cp->addr.type))
3720 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3721 MGMT_STATUS_INVALID_PARAMS,
3722 &cp->addr, sizeof(cp->addr));
3724 hci_dev_lock(hdev);
3726 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3727 if (err < 0)
3728 status = MGMT_STATUS_FAILED;
3729 else
3730 status = MGMT_STATUS_SUCCESS;
3732 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3733 &cp->addr, sizeof(cp->addr));
3735 hci_dev_unlock(hdev);
3737 return err;
3740 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3741 u16 len)
3743 struct mgmt_cp_unblock_device *cp = data;
3744 u8 status;
3745 int err;
3747 BT_DBG("%s", hdev->name);
3749 if (!bdaddr_type_is_valid(cp->addr.type))
3750 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3751 MGMT_STATUS_INVALID_PARAMS,
3752 &cp->addr, sizeof(cp->addr));
3754 hci_dev_lock(hdev);
3756 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3757 if (err < 0)
3758 status = MGMT_STATUS_INVALID_PARAMS;
3759 else
3760 status = MGMT_STATUS_SUCCESS;
3762 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3763 &cp->addr, sizeof(cp->addr));
3765 hci_dev_unlock(hdev);
3767 return err;
3770 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3771 u16 len)
3773 struct mgmt_cp_set_device_id *cp = data;
3774 struct hci_request req;
3775 int err;
3776 __u16 source;
3778 BT_DBG("%s", hdev->name);
3780 source = __le16_to_cpu(cp->source);
3782 if (source > 0x0002)
3783 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3784 MGMT_STATUS_INVALID_PARAMS);
3786 hci_dev_lock(hdev);
3788 hdev->devid_source = source;
3789 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3790 hdev->devid_product = __le16_to_cpu(cp->product);
3791 hdev->devid_version = __le16_to_cpu(cp->version);
3793 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3795 hci_req_init(&req, hdev);
3796 update_eir(&req);
3797 hci_req_run(&req, NULL);
3799 hci_dev_unlock(hdev);
3801 return err;
3804 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3806 struct cmd_lookup match = { NULL, hdev };
3808 if (status) {
3809 u8 mgmt_err = mgmt_status(status);
3811 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3812 cmd_status_rsp, &mgmt_err);
3813 return;
3816 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3817 &match);
3819 new_settings(hdev, match.sk);
3821 if (match.sk)
3822 sock_put(match.sk);
3825 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3826 u16 len)
3828 struct mgmt_mode *cp = data;
3829 struct pending_cmd *cmd;
3830 struct hci_request req;
3831 u8 val, enabled, status;
3832 int err;
3834 BT_DBG("request for %s", hdev->name);
3836 status = mgmt_le_support(hdev);
3837 if (status)
3838 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3839 status);
3841 if (cp->val != 0x00 && cp->val != 0x01)
3842 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3843 MGMT_STATUS_INVALID_PARAMS);
3845 hci_dev_lock(hdev);
3847 val = !!cp->val;
3848 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3850 /* The following conditions are ones which mean that we should
3851 * not do any HCI communication but directly send a mgmt
3852 * response to user space (after toggling the flag if
3853 * necessary).
3855 if (!hdev_is_powered(hdev) || val == enabled ||
3856 hci_conn_num(hdev, LE_LINK) > 0) {
3857 bool changed = false;
3859 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3860 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3861 changed = true;
3864 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3865 if (err < 0)
3866 goto unlock;
3868 if (changed)
3869 err = new_settings(hdev, sk);
3871 goto unlock;
3874 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3875 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3876 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3877 MGMT_STATUS_BUSY);
3878 goto unlock;
3881 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3882 if (!cmd) {
3883 err = -ENOMEM;
3884 goto unlock;
3887 hci_req_init(&req, hdev);
3889 if (val)
3890 enable_advertising(&req);
3891 else
3892 disable_advertising(&req);
3894 err = hci_req_run(&req, set_advertising_complete);
3895 if (err < 0)
3896 mgmt_pending_remove(cmd);
3898 unlock:
3899 hci_dev_unlock(hdev);
3900 return err;
3903 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3904 void *data, u16 len)
3906 struct mgmt_cp_set_static_address *cp = data;
3907 int err;
3909 BT_DBG("%s", hdev->name);
3911 if (!lmp_le_capable(hdev))
3912 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3913 MGMT_STATUS_NOT_SUPPORTED);
3915 if (hdev_is_powered(hdev))
3916 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3917 MGMT_STATUS_REJECTED);
3919 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3920 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3921 return cmd_status(sk, hdev->id,
3922 MGMT_OP_SET_STATIC_ADDRESS,
3923 MGMT_STATUS_INVALID_PARAMS);
3925 /* Two most significant bits shall be set */
3926 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3927 return cmd_status(sk, hdev->id,
3928 MGMT_OP_SET_STATIC_ADDRESS,
3929 MGMT_STATUS_INVALID_PARAMS);
3932 hci_dev_lock(hdev);
3934 bacpy(&hdev->static_addr, &cp->bdaddr);
3936 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3938 hci_dev_unlock(hdev);
3940 return err;
3943 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3944 void *data, u16 len)
3946 struct mgmt_cp_set_scan_params *cp = data;
3947 __u16 interval, window;
3948 int err;
3950 BT_DBG("%s", hdev->name);
3952 if (!lmp_le_capable(hdev))
3953 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3954 MGMT_STATUS_NOT_SUPPORTED);
3956 interval = __le16_to_cpu(cp->interval);
3958 if (interval < 0x0004 || interval > 0x4000)
3959 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3960 MGMT_STATUS_INVALID_PARAMS);
3962 window = __le16_to_cpu(cp->window);
3964 if (window < 0x0004 || window > 0x4000)
3965 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3966 MGMT_STATUS_INVALID_PARAMS);
3968 if (window > interval)
3969 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3970 MGMT_STATUS_INVALID_PARAMS);
3972 hci_dev_lock(hdev);
3974 hdev->le_scan_interval = interval;
3975 hdev->le_scan_window = window;
3977 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3979 /* If background scan is running, restart it so new parameters are
3980 * loaded.
3982 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3983 hdev->discovery.state == DISCOVERY_STOPPED) {
3984 struct hci_request req;
3986 hci_req_init(&req, hdev);
3988 hci_req_add_le_scan_disable(&req);
3989 hci_req_add_le_passive_scan(&req);
3991 hci_req_run(&req, NULL);
3994 hci_dev_unlock(hdev);
3996 return err;
3999 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4001 struct pending_cmd *cmd;
4003 BT_DBG("status 0x%02x", status);
4005 hci_dev_lock(hdev);
4007 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4008 if (!cmd)
4009 goto unlock;
4011 if (status) {
4012 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4013 mgmt_status(status));
4014 } else {
4015 struct mgmt_mode *cp = cmd->param;
4017 if (cp->val)
4018 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4019 else
4020 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4022 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4023 new_settings(hdev, cmd->sk);
4026 mgmt_pending_remove(cmd);
4028 unlock:
4029 hci_dev_unlock(hdev);
4032 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4033 void *data, u16 len)
4035 struct mgmt_mode *cp = data;
4036 struct pending_cmd *cmd;
4037 struct hci_request req;
4038 int err;
4040 BT_DBG("%s", hdev->name);
4042 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4043 hdev->hci_ver < BLUETOOTH_VER_1_2)
4044 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4045 MGMT_STATUS_NOT_SUPPORTED);
4047 if (cp->val != 0x00 && cp->val != 0x01)
4048 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4049 MGMT_STATUS_INVALID_PARAMS);
4051 if (!hdev_is_powered(hdev))
4052 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4053 MGMT_STATUS_NOT_POWERED);
4055 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4056 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4057 MGMT_STATUS_REJECTED);
4059 hci_dev_lock(hdev);
4061 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4062 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4063 MGMT_STATUS_BUSY);
4064 goto unlock;
4067 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4068 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4069 hdev);
4070 goto unlock;
4073 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4074 data, len);
4075 if (!cmd) {
4076 err = -ENOMEM;
4077 goto unlock;
4080 hci_req_init(&req, hdev);
4082 write_fast_connectable(&req, cp->val);
4084 err = hci_req_run(&req, fast_connectable_complete);
4085 if (err < 0) {
4086 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4087 MGMT_STATUS_FAILED);
4088 mgmt_pending_remove(cmd);
4091 unlock:
4092 hci_dev_unlock(hdev);
4094 return err;
4097 static void set_bredr_scan(struct hci_request *req)
4099 struct hci_dev *hdev = req->hdev;
4100 u8 scan = 0;
4102 /* Ensure that fast connectable is disabled. This function will
4103 * not do anything if the page scan parameters are already what
4104 * they should be.
4106 write_fast_connectable(req, false);
4108 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4109 scan |= SCAN_PAGE;
4110 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4111 scan |= SCAN_INQUIRY;
4113 if (scan)
4114 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4117 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4119 struct pending_cmd *cmd;
4121 BT_DBG("status 0x%02x", status);
4123 hci_dev_lock(hdev);
4125 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4126 if (!cmd)
4127 goto unlock;
4129 if (status) {
4130 u8 mgmt_err = mgmt_status(status);
4132 /* We need to restore the flag if related HCI commands
4133 * failed.
4135 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4137 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4138 } else {
4139 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4140 new_settings(hdev, cmd->sk);
4143 mgmt_pending_remove(cmd);
4145 unlock:
4146 hci_dev_unlock(hdev);
4149 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4151 struct mgmt_mode *cp = data;
4152 struct pending_cmd *cmd;
4153 struct hci_request req;
4154 int err;
4156 BT_DBG("request for %s", hdev->name);
4158 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4159 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4160 MGMT_STATUS_NOT_SUPPORTED);
4162 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4163 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4164 MGMT_STATUS_REJECTED);
4166 if (cp->val != 0x00 && cp->val != 0x01)
4167 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4168 MGMT_STATUS_INVALID_PARAMS);
4170 hci_dev_lock(hdev);
4172 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4173 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4174 goto unlock;
4177 if (!hdev_is_powered(hdev)) {
4178 if (!cp->val) {
4179 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4180 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4181 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4182 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4183 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4186 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4188 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4189 if (err < 0)
4190 goto unlock;
4192 err = new_settings(hdev, sk);
4193 goto unlock;
4196 /* Reject disabling when powered on */
4197 if (!cp->val) {
4198 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4199 MGMT_STATUS_REJECTED);
4200 goto unlock;
4203 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4204 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4205 MGMT_STATUS_BUSY);
4206 goto unlock;
4209 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4210 if (!cmd) {
4211 err = -ENOMEM;
4212 goto unlock;
4215 /* We need to flip the bit already here so that update_adv_data
4216 * generates the correct flags.
4218 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4220 hci_req_init(&req, hdev);
4222 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4223 set_bredr_scan(&req);
4225 /* Since only the advertising data flags will change, there
4226 * is no need to update the scan response data.
4228 update_adv_data(&req);
4230 err = hci_req_run(&req, set_bredr_complete);
4231 if (err < 0)
4232 mgmt_pending_remove(cmd);
4234 unlock:
4235 hci_dev_unlock(hdev);
4236 return err;
4239 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4240 void *data, u16 len)
4242 struct mgmt_mode *cp = data;
4243 struct pending_cmd *cmd;
4244 u8 val, status;
4245 int err;
4247 BT_DBG("request for %s", hdev->name);
4249 status = mgmt_bredr_support(hdev);
4250 if (status)
4251 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4252 status);
4254 if (!lmp_sc_capable(hdev) &&
4255 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4257 MGMT_STATUS_NOT_SUPPORTED);
4259 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4260 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4261 MGMT_STATUS_INVALID_PARAMS);
4263 hci_dev_lock(hdev);
4265 if (!hdev_is_powered(hdev)) {
4266 bool changed;
4268 if (cp->val) {
4269 changed = !test_and_set_bit(HCI_SC_ENABLED,
4270 &hdev->dev_flags);
4271 if (cp->val == 0x02)
4272 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4273 else
4274 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4275 } else {
4276 changed = test_and_clear_bit(HCI_SC_ENABLED,
4277 &hdev->dev_flags);
4278 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4281 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4282 if (err < 0)
4283 goto failed;
4285 if (changed)
4286 err = new_settings(hdev, sk);
4288 goto failed;
4291 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4292 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4293 MGMT_STATUS_BUSY);
4294 goto failed;
4297 val = !!cp->val;
4299 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4300 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4301 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4302 goto failed;
4305 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4306 if (!cmd) {
4307 err = -ENOMEM;
4308 goto failed;
4311 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4312 if (err < 0) {
4313 mgmt_pending_remove(cmd);
4314 goto failed;
4317 if (cp->val == 0x02)
4318 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4319 else
4320 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4322 failed:
4323 hci_dev_unlock(hdev);
4324 return err;
4327 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 len)
4330 struct mgmt_mode *cp = data;
4331 bool changed;
4332 int err;
4334 BT_DBG("request for %s", hdev->name);
4336 if (cp->val != 0x00 && cp->val != 0x01)
4337 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4338 MGMT_STATUS_INVALID_PARAMS);
4340 hci_dev_lock(hdev);
4342 if (cp->val)
4343 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4344 else
4345 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4347 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4348 if (err < 0)
4349 goto unlock;
4351 if (changed)
4352 err = new_settings(hdev, sk);
4354 unlock:
4355 hci_dev_unlock(hdev);
4356 return err;
4359 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4360 u16 len)
4362 struct mgmt_cp_set_privacy *cp = cp_data;
4363 bool changed;
4364 int err;
4366 BT_DBG("request for %s", hdev->name);
4368 if (!lmp_le_capable(hdev))
4369 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4370 MGMT_STATUS_NOT_SUPPORTED);
4372 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4373 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4374 MGMT_STATUS_INVALID_PARAMS);
4376 if (hdev_is_powered(hdev))
4377 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4378 MGMT_STATUS_REJECTED);
4380 hci_dev_lock(hdev);
4382 /* If user space supports this command it is also expected to
4383 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4385 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4387 if (cp->privacy) {
4388 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4389 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4390 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4391 } else {
4392 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4393 memset(hdev->irk, 0, sizeof(hdev->irk));
4394 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4397 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4398 if (err < 0)
4399 goto unlock;
4401 if (changed)
4402 err = new_settings(hdev, sk);
4404 unlock:
4405 hci_dev_unlock(hdev);
4406 return err;
4409 static bool irk_is_valid(struct mgmt_irk_info *irk)
4411 switch (irk->addr.type) {
4412 case BDADDR_LE_PUBLIC:
4413 return true;
4415 case BDADDR_LE_RANDOM:
4416 /* Two most significant bits shall be set */
4417 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4418 return false;
4419 return true;
4422 return false;
4425 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4426 u16 len)
4428 struct mgmt_cp_load_irks *cp = cp_data;
4429 u16 irk_count, expected_len;
4430 int i, err;
4432 BT_DBG("request for %s", hdev->name);
4434 if (!lmp_le_capable(hdev))
4435 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4436 MGMT_STATUS_NOT_SUPPORTED);
4438 irk_count = __le16_to_cpu(cp->irk_count);
4440 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4441 if (expected_len != len) {
4442 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4443 expected_len, len);
4444 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4445 MGMT_STATUS_INVALID_PARAMS);
4448 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4450 for (i = 0; i < irk_count; i++) {
4451 struct mgmt_irk_info *key = &cp->irks[i];
4453 if (!irk_is_valid(key))
4454 return cmd_status(sk, hdev->id,
4455 MGMT_OP_LOAD_IRKS,
4456 MGMT_STATUS_INVALID_PARAMS);
4459 hci_dev_lock(hdev);
4461 hci_smp_irks_clear(hdev);
4463 for (i = 0; i < irk_count; i++) {
4464 struct mgmt_irk_info *irk = &cp->irks[i];
4465 u8 addr_type;
4467 if (irk->addr.type == BDADDR_LE_PUBLIC)
4468 addr_type = ADDR_LE_DEV_PUBLIC;
4469 else
4470 addr_type = ADDR_LE_DEV_RANDOM;
4472 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4473 BDADDR_ANY);
4476 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4478 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4480 hci_dev_unlock(hdev);
4482 return err;
4485 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4487 if (key->master != 0x00 && key->master != 0x01)
4488 return false;
4490 switch (key->addr.type) {
4491 case BDADDR_LE_PUBLIC:
4492 return true;
4494 case BDADDR_LE_RANDOM:
4495 /* Two most significant bits shall be set */
4496 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4497 return false;
4498 return true;
4501 return false;
4504 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4505 void *cp_data, u16 len)
4507 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4508 u16 key_count, expected_len;
4509 int i, err;
4511 BT_DBG("request for %s", hdev->name);
4513 if (!lmp_le_capable(hdev))
4514 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4515 MGMT_STATUS_NOT_SUPPORTED);
4517 key_count = __le16_to_cpu(cp->key_count);
4519 expected_len = sizeof(*cp) + key_count *
4520 sizeof(struct mgmt_ltk_info);
4521 if (expected_len != len) {
4522 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4523 expected_len, len);
4524 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4525 MGMT_STATUS_INVALID_PARAMS);
4528 BT_DBG("%s key_count %u", hdev->name, key_count);
4530 for (i = 0; i < key_count; i++) {
4531 struct mgmt_ltk_info *key = &cp->keys[i];
4533 if (!ltk_is_valid(key))
4534 return cmd_status(sk, hdev->id,
4535 MGMT_OP_LOAD_LONG_TERM_KEYS,
4536 MGMT_STATUS_INVALID_PARAMS);
4539 hci_dev_lock(hdev);
4541 hci_smp_ltks_clear(hdev);
4543 for (i = 0; i < key_count; i++) {
4544 struct mgmt_ltk_info *key = &cp->keys[i];
4545 u8 type, addr_type, authenticated;
4547 if (key->addr.type == BDADDR_LE_PUBLIC)
4548 addr_type = ADDR_LE_DEV_PUBLIC;
4549 else
4550 addr_type = ADDR_LE_DEV_RANDOM;
4552 if (key->master)
4553 type = HCI_SMP_LTK;
4554 else
4555 type = HCI_SMP_LTK_SLAVE;
4557 switch (key->type) {
4558 case MGMT_LTK_UNAUTHENTICATED:
4559 authenticated = 0x00;
4560 break;
4561 case MGMT_LTK_AUTHENTICATED:
4562 authenticated = 0x01;
4563 break;
4564 default:
4565 continue;
4568 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4569 authenticated, key->val, key->enc_size, key->ediv,
4570 key->rand);
4573 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4574 NULL, 0);
4576 hci_dev_unlock(hdev);
4578 return err;
4581 struct cmd_conn_lookup {
4582 struct hci_conn *conn;
4583 bool valid_tx_power;
4584 u8 mgmt_status;
4587 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4589 struct cmd_conn_lookup *match = data;
4590 struct mgmt_cp_get_conn_info *cp;
4591 struct mgmt_rp_get_conn_info rp;
4592 struct hci_conn *conn = cmd->user_data;
4594 if (conn != match->conn)
4595 return;
4597 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4599 memset(&rp, 0, sizeof(rp));
4600 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4601 rp.addr.type = cp->addr.type;
4603 if (!match->mgmt_status) {
4604 rp.rssi = conn->rssi;
4606 if (match->valid_tx_power) {
4607 rp.tx_power = conn->tx_power;
4608 rp.max_tx_power = conn->max_tx_power;
4609 } else {
4610 rp.tx_power = HCI_TX_POWER_INVALID;
4611 rp.max_tx_power = HCI_TX_POWER_INVALID;
4615 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4616 match->mgmt_status, &rp, sizeof(rp));
4618 hci_conn_drop(conn);
4620 mgmt_pending_remove(cmd);
4623 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4625 struct hci_cp_read_rssi *cp;
4626 struct hci_conn *conn;
4627 struct cmd_conn_lookup match;
4628 u16 handle;
4630 BT_DBG("status 0x%02x", status);
4632 hci_dev_lock(hdev);
4634 /* TX power data is valid in case request completed successfully,
4635 * otherwise we assume it's not valid. At the moment we assume that
4636 * either both or none of current and max values are valid to keep code
4637 * simple.
4639 match.valid_tx_power = !status;
4641 /* Commands sent in request are either Read RSSI or Read Transmit Power
4642 * Level so we check which one was last sent to retrieve connection
4643 * handle. Both commands have handle as first parameter so it's safe to
4644 * cast data on the same command struct.
4646 * First command sent is always Read RSSI and we fail only if it fails.
4647 * In other case we simply override error to indicate success as we
4648 * already remembered if TX power value is actually valid.
4650 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4651 if (!cp) {
4652 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4653 status = 0;
4656 if (!cp) {
4657 BT_ERR("invalid sent_cmd in response");
4658 goto unlock;
4661 handle = __le16_to_cpu(cp->handle);
4662 conn = hci_conn_hash_lookup_handle(hdev, handle);
4663 if (!conn) {
4664 BT_ERR("unknown handle (%d) in response", handle);
4665 goto unlock;
4668 match.conn = conn;
4669 match.mgmt_status = mgmt_status(status);
4671 /* Cache refresh is complete, now reply for mgmt request for given
4672 * connection only.
4674 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4675 get_conn_info_complete, &match);
4677 unlock:
4678 hci_dev_unlock(hdev);
4681 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4682 u16 len)
4684 struct mgmt_cp_get_conn_info *cp = data;
4685 struct mgmt_rp_get_conn_info rp;
4686 struct hci_conn *conn;
4687 unsigned long conn_info_age;
4688 int err = 0;
4690 BT_DBG("%s", hdev->name);
4692 memset(&rp, 0, sizeof(rp));
4693 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4694 rp.addr.type = cp->addr.type;
4696 if (!bdaddr_type_is_valid(cp->addr.type))
4697 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4698 MGMT_STATUS_INVALID_PARAMS,
4699 &rp, sizeof(rp));
4701 hci_dev_lock(hdev);
4703 if (!hdev_is_powered(hdev)) {
4704 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4705 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4706 goto unlock;
4709 if (cp->addr.type == BDADDR_BREDR)
4710 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4711 &cp->addr.bdaddr);
4712 else
4713 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4715 if (!conn || conn->state != BT_CONNECTED) {
4716 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4717 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4718 goto unlock;
4721 /* To avoid client trying to guess when to poll again for information we
4722 * calculate conn info age as random value between min/max set in hdev.
4724 conn_info_age = hdev->conn_info_min_age +
4725 prandom_u32_max(hdev->conn_info_max_age -
4726 hdev->conn_info_min_age);
4728 /* Query controller to refresh cached values if they are too old or were
4729 * never read.
4731 if (time_after(jiffies, conn->conn_info_timestamp +
4732 msecs_to_jiffies(conn_info_age)) ||
4733 !conn->conn_info_timestamp) {
4734 struct hci_request req;
4735 struct hci_cp_read_tx_power req_txp_cp;
4736 struct hci_cp_read_rssi req_rssi_cp;
4737 struct pending_cmd *cmd;
4739 hci_req_init(&req, hdev);
4740 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4741 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4742 &req_rssi_cp);
4744 /* For LE links TX power does not change thus we don't need to
4745 * query for it once value is known.
4747 if (!bdaddr_type_is_le(cp->addr.type) ||
4748 conn->tx_power == HCI_TX_POWER_INVALID) {
4749 req_txp_cp.handle = cpu_to_le16(conn->handle);
4750 req_txp_cp.type = 0x00;
4751 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4752 sizeof(req_txp_cp), &req_txp_cp);
4755 /* Max TX power needs to be read only once per connection */
4756 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4757 req_txp_cp.handle = cpu_to_le16(conn->handle);
4758 req_txp_cp.type = 0x01;
4759 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4760 sizeof(req_txp_cp), &req_txp_cp);
4763 err = hci_req_run(&req, conn_info_refresh_complete);
4764 if (err < 0)
4765 goto unlock;
4767 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4768 data, len);
4769 if (!cmd) {
4770 err = -ENOMEM;
4771 goto unlock;
4774 hci_conn_hold(conn);
4775 cmd->user_data = conn;
4777 conn->conn_info_timestamp = jiffies;
4778 } else {
4779 /* Cache is valid, just reply with values cached in hci_conn */
4780 rp.rssi = conn->rssi;
4781 rp.tx_power = conn->tx_power;
4782 rp.max_tx_power = conn->max_tx_power;
4784 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4785 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4788 unlock:
4789 hci_dev_unlock(hdev);
4790 return err;
4793 static const struct mgmt_handler {
4794 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4795 u16 data_len);
4796 bool var_len;
4797 size_t data_len;
4798 } mgmt_handlers[] = {
4799 { NULL }, /* 0x0000 (no command) */
4800 { read_version, false, MGMT_READ_VERSION_SIZE },
4801 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4802 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4803 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4804 { set_powered, false, MGMT_SETTING_SIZE },
4805 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4806 { set_connectable, false, MGMT_SETTING_SIZE },
4807 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4808 { set_pairable, false, MGMT_SETTING_SIZE },
4809 { set_link_security, false, MGMT_SETTING_SIZE },
4810 { set_ssp, false, MGMT_SETTING_SIZE },
4811 { set_hs, false, MGMT_SETTING_SIZE },
4812 { set_le, false, MGMT_SETTING_SIZE },
4813 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4814 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4815 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4816 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4817 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4818 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4819 { disconnect, false, MGMT_DISCONNECT_SIZE },
4820 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4821 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4822 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4823 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4824 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4825 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4826 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4827 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4828 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4829 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4830 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4831 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4832 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4833 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4834 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4835 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4836 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4837 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4838 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4839 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4840 { set_advertising, false, MGMT_SETTING_SIZE },
4841 { set_bredr, false, MGMT_SETTING_SIZE },
4842 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4843 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4844 { set_secure_conn, false, MGMT_SETTING_SIZE },
4845 { set_debug_keys, false, MGMT_SETTING_SIZE },
4846 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4847 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4848 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
4852 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4854 void *buf;
4855 u8 *cp;
4856 struct mgmt_hdr *hdr;
4857 u16 opcode, index, len;
4858 struct hci_dev *hdev = NULL;
4859 const struct mgmt_handler *handler;
4860 int err;
4862 BT_DBG("got %zu bytes", msglen);
4864 if (msglen < sizeof(*hdr))
4865 return -EINVAL;
4867 buf = kmalloc(msglen, GFP_KERNEL);
4868 if (!buf)
4869 return -ENOMEM;
4871 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4872 err = -EFAULT;
4873 goto done;
4876 hdr = buf;
4877 opcode = __le16_to_cpu(hdr->opcode);
4878 index = __le16_to_cpu(hdr->index);
4879 len = __le16_to_cpu(hdr->len);
4881 if (len != msglen - sizeof(*hdr)) {
4882 err = -EINVAL;
4883 goto done;
4886 if (index != MGMT_INDEX_NONE) {
4887 hdev = hci_dev_get(index);
4888 if (!hdev) {
4889 err = cmd_status(sk, index, opcode,
4890 MGMT_STATUS_INVALID_INDEX);
4891 goto done;
4894 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4895 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4896 err = cmd_status(sk, index, opcode,
4897 MGMT_STATUS_INVALID_INDEX);
4898 goto done;
4902 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4903 mgmt_handlers[opcode].func == NULL) {
4904 BT_DBG("Unknown op %u", opcode);
4905 err = cmd_status(sk, index, opcode,
4906 MGMT_STATUS_UNKNOWN_COMMAND);
4907 goto done;
4910 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4911 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4912 err = cmd_status(sk, index, opcode,
4913 MGMT_STATUS_INVALID_INDEX);
4914 goto done;
4917 handler = &mgmt_handlers[opcode];
4919 if ((handler->var_len && len < handler->data_len) ||
4920 (!handler->var_len && len != handler->data_len)) {
4921 err = cmd_status(sk, index, opcode,
4922 MGMT_STATUS_INVALID_PARAMS);
4923 goto done;
4926 if (hdev)
4927 mgmt_init_hdev(sk, hdev);
4929 cp = buf + sizeof(*hdr);
4931 err = handler->func(sk, hdev, cp, len);
4932 if (err < 0)
4933 goto done;
4935 err = msglen;
4937 done:
4938 if (hdev)
4939 hci_dev_put(hdev);
4941 kfree(buf);
4942 return err;
4945 void mgmt_index_added(struct hci_dev *hdev)
4947 if (hdev->dev_type != HCI_BREDR)
4948 return;
4950 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4953 void mgmt_index_removed(struct hci_dev *hdev)
4955 u8 status = MGMT_STATUS_INVALID_INDEX;
4957 if (hdev->dev_type != HCI_BREDR)
4958 return;
4960 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4962 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4965 /* This function requires the caller holds hdev->lock */
4966 static void restart_le_auto_conns(struct hci_dev *hdev)
4968 struct hci_conn_params *p;
4970 list_for_each_entry(p, &hdev->le_conn_params, list) {
4971 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4972 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4976 static void powered_complete(struct hci_dev *hdev, u8 status)
4978 struct cmd_lookup match = { NULL, hdev };
4980 BT_DBG("status 0x%02x", status);
4982 hci_dev_lock(hdev);
4984 restart_le_auto_conns(hdev);
4986 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4988 new_settings(hdev, match.sk);
4990 hci_dev_unlock(hdev);
4992 if (match.sk)
4993 sock_put(match.sk);
4996 static int powered_update_hci(struct hci_dev *hdev)
4998 struct hci_request req;
4999 u8 link_sec;
5001 hci_req_init(&req, hdev);
5003 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5004 !lmp_host_ssp_capable(hdev)) {
5005 u8 ssp = 1;
5007 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5010 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5011 lmp_bredr_capable(hdev)) {
5012 struct hci_cp_write_le_host_supported cp;
5014 cp.le = 1;
5015 cp.simul = lmp_le_br_capable(hdev);
5017 /* Check first if we already have the right
5018 * host state (host features set)
5020 if (cp.le != lmp_host_le_capable(hdev) ||
5021 cp.simul != lmp_host_le_br_capable(hdev))
5022 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5023 sizeof(cp), &cp);
5026 if (lmp_le_capable(hdev)) {
5027 /* Make sure the controller has a good default for
5028 * advertising data. This also applies to the case
5029 * where BR/EDR was toggled during the AUTO_OFF phase.
5031 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5032 update_adv_data(&req);
5033 update_scan_rsp_data(&req);
5036 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5037 enable_advertising(&req);
5040 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5041 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5042 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5043 sizeof(link_sec), &link_sec);
5045 if (lmp_bredr_capable(hdev)) {
5046 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5047 set_bredr_scan(&req);
5048 update_class(&req);
5049 update_name(&req);
5050 update_eir(&req);
5053 return hci_req_run(&req, powered_complete);
5056 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5058 struct cmd_lookup match = { NULL, hdev };
5059 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5060 u8 zero_cod[] = { 0, 0, 0 };
5061 int err;
5063 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5064 return 0;
5066 if (powered) {
5067 if (powered_update_hci(hdev) == 0)
5068 return 0;
5070 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5071 &match);
5072 goto new_settings;
5075 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5076 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5078 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5079 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5080 zero_cod, sizeof(zero_cod), NULL);
5082 new_settings:
5083 err = new_settings(hdev, match.sk);
5085 if (match.sk)
5086 sock_put(match.sk);
5088 return err;
5091 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5093 struct pending_cmd *cmd;
5094 u8 status;
5096 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5097 if (!cmd)
5098 return;
5100 if (err == -ERFKILL)
5101 status = MGMT_STATUS_RFKILLED;
5102 else
5103 status = MGMT_STATUS_FAILED;
5105 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5107 mgmt_pending_remove(cmd);
5110 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5112 struct hci_request req;
5114 hci_dev_lock(hdev);
5116 /* When discoverable timeout triggers, then just make sure
5117 * the limited discoverable flag is cleared. Even in the case
5118 * of a timeout triggered from general discoverable, it is
5119 * safe to unconditionally clear the flag.
5121 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5122 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5124 hci_req_init(&req, hdev);
5125 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5126 u8 scan = SCAN_PAGE;
5127 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5128 sizeof(scan), &scan);
5130 update_class(&req);
5131 update_adv_data(&req);
5132 hci_req_run(&req, NULL);
5134 hdev->discov_timeout = 0;
5136 new_settings(hdev, NULL);
5138 hci_dev_unlock(hdev);
5141 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5143 bool changed;
5145 /* Nothing needed here if there's a pending command since that
5146 * commands request completion callback takes care of everything
5147 * necessary.
5149 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5150 return;
5152 /* Powering off may clear the scan mode - don't let that interfere */
5153 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5154 return;
5156 if (discoverable) {
5157 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5158 } else {
5159 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5160 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5163 if (changed) {
5164 struct hci_request req;
5166 /* In case this change in discoverable was triggered by
5167 * a disabling of connectable there could be a need to
5168 * update the advertising flags.
5170 hci_req_init(&req, hdev);
5171 update_adv_data(&req);
5172 hci_req_run(&req, NULL);
5174 new_settings(hdev, NULL);
5178 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5180 bool changed;
5182 /* Nothing needed here if there's a pending command since that
5183 * commands request completion callback takes care of everything
5184 * necessary.
5186 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5187 return;
5189 /* Powering off may clear the scan mode - don't let that interfere */
5190 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5191 return;
5193 if (connectable)
5194 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5195 else
5196 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5198 if (changed)
5199 new_settings(hdev, NULL);
5202 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5204 /* Powering off may stop advertising - don't let that interfere */
5205 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5206 return;
5208 if (advertising)
5209 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5210 else
5211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5214 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5216 u8 mgmt_err = mgmt_status(status);
5218 if (scan & SCAN_PAGE)
5219 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5220 cmd_status_rsp, &mgmt_err);
5222 if (scan & SCAN_INQUIRY)
5223 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5224 cmd_status_rsp, &mgmt_err);
5227 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5228 bool persistent)
5230 struct mgmt_ev_new_link_key ev;
5232 memset(&ev, 0, sizeof(ev));
5234 ev.store_hint = persistent;
5235 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5236 ev.key.addr.type = BDADDR_BREDR;
5237 ev.key.type = key->type;
5238 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5239 ev.key.pin_len = key->pin_len;
5241 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5244 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5246 if (ltk->authenticated)
5247 return MGMT_LTK_AUTHENTICATED;
5249 return MGMT_LTK_UNAUTHENTICATED;
5252 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5254 struct mgmt_ev_new_long_term_key ev;
5256 memset(&ev, 0, sizeof(ev));
5258 /* Devices using resolvable or non-resolvable random addresses
5259 * without providing an indentity resolving key don't require
5260 * to store long term keys. Their addresses will change the
5261 * next time around.
5263 * Only when a remote device provides an identity address
5264 * make sure the long term key is stored. If the remote
5265 * identity is known, the long term keys are internally
5266 * mapped to the identity address. So allow static random
5267 * and public addresses here.
5269 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5270 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5271 ev.store_hint = 0x00;
5272 else
5273 ev.store_hint = persistent;
5275 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5276 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5277 ev.key.type = mgmt_ltk_type(key);
5278 ev.key.enc_size = key->enc_size;
5279 ev.key.ediv = key->ediv;
5280 ev.key.rand = key->rand;
5282 if (key->type == HCI_SMP_LTK)
5283 ev.key.master = 1;
5285 memcpy(ev.key.val, key->val, sizeof(key->val));
5287 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5290 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5292 struct mgmt_ev_new_irk ev;
5294 memset(&ev, 0, sizeof(ev));
5296 /* For identity resolving keys from devices that are already
5297 * using a public address or static random address, do not
5298 * ask for storing this key. The identity resolving key really
5299 * is only mandatory for devices using resovlable random
5300 * addresses.
5302 * Storing all identity resolving keys has the downside that
5303 * they will be also loaded on next boot of they system. More
5304 * identity resolving keys, means more time during scanning is
5305 * needed to actually resolve these addresses.
5307 if (bacmp(&irk->rpa, BDADDR_ANY))
5308 ev.store_hint = 0x01;
5309 else
5310 ev.store_hint = 0x00;
5312 bacpy(&ev.rpa, &irk->rpa);
5313 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5314 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5315 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5317 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5320 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5321 bool persistent)
5323 struct mgmt_ev_new_csrk ev;
5325 memset(&ev, 0, sizeof(ev));
5327 /* Devices using resolvable or non-resolvable random addresses
5328 * without providing an indentity resolving key don't require
5329 * to store signature resolving keys. Their addresses will change
5330 * the next time around.
5332 * Only when a remote device provides an identity address
5333 * make sure the signature resolving key is stored. So allow
5334 * static random and public addresses here.
5336 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5337 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5338 ev.store_hint = 0x00;
5339 else
5340 ev.store_hint = persistent;
5342 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5343 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5344 ev.key.master = csrk->master;
5345 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5347 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5350 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5351 u8 data_len)
5353 eir[eir_len++] = sizeof(type) + data_len;
5354 eir[eir_len++] = type;
5355 memcpy(&eir[eir_len], data, data_len);
5356 eir_len += data_len;
5358 return eir_len;
5361 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5362 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5363 u8 *dev_class)
5365 char buf[512];
5366 struct mgmt_ev_device_connected *ev = (void *) buf;
5367 u16 eir_len = 0;
5369 bacpy(&ev->addr.bdaddr, bdaddr);
5370 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5372 ev->flags = __cpu_to_le32(flags);
5374 if (name_len > 0)
5375 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5376 name, name_len);
5378 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5379 eir_len = eir_append_data(ev->eir, eir_len,
5380 EIR_CLASS_OF_DEV, dev_class, 3);
5382 ev->eir_len = cpu_to_le16(eir_len);
5384 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5385 sizeof(*ev) + eir_len, NULL);
5388 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5390 struct mgmt_cp_disconnect *cp = cmd->param;
5391 struct sock **sk = data;
5392 struct mgmt_rp_disconnect rp;
5394 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5395 rp.addr.type = cp->addr.type;
5397 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5398 sizeof(rp));
5400 *sk = cmd->sk;
5401 sock_hold(*sk);
5403 mgmt_pending_remove(cmd);
5406 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5408 struct hci_dev *hdev = data;
5409 struct mgmt_cp_unpair_device *cp = cmd->param;
5410 struct mgmt_rp_unpair_device rp;
5412 memset(&rp, 0, sizeof(rp));
5413 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5414 rp.addr.type = cp->addr.type;
5416 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5418 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5420 mgmt_pending_remove(cmd);
5423 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5424 u8 link_type, u8 addr_type, u8 reason,
5425 bool mgmt_connected)
5427 struct mgmt_ev_device_disconnected ev;
5428 struct pending_cmd *power_off;
5429 struct sock *sk = NULL;
5431 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5432 if (power_off) {
5433 struct mgmt_mode *cp = power_off->param;
5435 /* The connection is still in hci_conn_hash so test for 1
5436 * instead of 0 to know if this is the last one.
5438 if (!cp->val && hci_conn_count(hdev) == 1) {
5439 cancel_delayed_work(&hdev->power_off);
5440 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5444 if (!mgmt_connected)
5445 return;
5447 if (link_type != ACL_LINK && link_type != LE_LINK)
5448 return;
5450 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5452 bacpy(&ev.addr.bdaddr, bdaddr);
5453 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5454 ev.reason = reason;
5456 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5458 if (sk)
5459 sock_put(sk);
5461 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5462 hdev);
5465 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5466 u8 link_type, u8 addr_type, u8 status)
5468 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5469 struct mgmt_cp_disconnect *cp;
5470 struct mgmt_rp_disconnect rp;
5471 struct pending_cmd *cmd;
5473 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5474 hdev);
5476 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5477 if (!cmd)
5478 return;
5480 cp = cmd->param;
5482 if (bacmp(bdaddr, &cp->addr.bdaddr))
5483 return;
5485 if (cp->addr.type != bdaddr_type)
5486 return;
5488 bacpy(&rp.addr.bdaddr, bdaddr);
5489 rp.addr.type = bdaddr_type;
5491 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5492 mgmt_status(status), &rp, sizeof(rp));
5494 mgmt_pending_remove(cmd);
5497 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5498 u8 addr_type, u8 status)
5500 struct mgmt_ev_connect_failed ev;
5501 struct pending_cmd *power_off;
5503 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5504 if (power_off) {
5505 struct mgmt_mode *cp = power_off->param;
5507 /* The connection is still in hci_conn_hash so test for 1
5508 * instead of 0 to know if this is the last one.
5510 if (!cp->val && hci_conn_count(hdev) == 1) {
5511 cancel_delayed_work(&hdev->power_off);
5512 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5516 bacpy(&ev.addr.bdaddr, bdaddr);
5517 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5518 ev.status = mgmt_status(status);
5520 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5523 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5525 struct mgmt_ev_pin_code_request ev;
5527 bacpy(&ev.addr.bdaddr, bdaddr);
5528 ev.addr.type = BDADDR_BREDR;
5529 ev.secure = secure;
5531 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5534 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5535 u8 status)
5537 struct pending_cmd *cmd;
5538 struct mgmt_rp_pin_code_reply rp;
5540 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5541 if (!cmd)
5542 return;
5544 bacpy(&rp.addr.bdaddr, bdaddr);
5545 rp.addr.type = BDADDR_BREDR;
5547 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5548 mgmt_status(status), &rp, sizeof(rp));
5550 mgmt_pending_remove(cmd);
5553 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5554 u8 status)
5556 struct pending_cmd *cmd;
5557 struct mgmt_rp_pin_code_reply rp;
5559 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5560 if (!cmd)
5561 return;
5563 bacpy(&rp.addr.bdaddr, bdaddr);
5564 rp.addr.type = BDADDR_BREDR;
5566 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5567 mgmt_status(status), &rp, sizeof(rp));
5569 mgmt_pending_remove(cmd);
5572 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5573 u8 link_type, u8 addr_type, u32 value,
5574 u8 confirm_hint)
5576 struct mgmt_ev_user_confirm_request ev;
5578 BT_DBG("%s", hdev->name);
5580 bacpy(&ev.addr.bdaddr, bdaddr);
5581 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5582 ev.confirm_hint = confirm_hint;
5583 ev.value = cpu_to_le32(value);
5585 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5586 NULL);
5589 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5590 u8 link_type, u8 addr_type)
5592 struct mgmt_ev_user_passkey_request ev;
5594 BT_DBG("%s", hdev->name);
5596 bacpy(&ev.addr.bdaddr, bdaddr);
5597 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5599 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5600 NULL);
5603 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5604 u8 link_type, u8 addr_type, u8 status,
5605 u8 opcode)
5607 struct pending_cmd *cmd;
5608 struct mgmt_rp_user_confirm_reply rp;
5609 int err;
5611 cmd = mgmt_pending_find(opcode, hdev);
5612 if (!cmd)
5613 return -ENOENT;
5615 bacpy(&rp.addr.bdaddr, bdaddr);
5616 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5617 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5618 &rp, sizeof(rp));
5620 mgmt_pending_remove(cmd);
5622 return err;
5625 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5626 u8 link_type, u8 addr_type, u8 status)
5628 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5629 status, MGMT_OP_USER_CONFIRM_REPLY);
5632 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5633 u8 link_type, u8 addr_type, u8 status)
5635 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5636 status,
5637 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5640 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5641 u8 link_type, u8 addr_type, u8 status)
5643 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5644 status, MGMT_OP_USER_PASSKEY_REPLY);
5647 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5648 u8 link_type, u8 addr_type, u8 status)
5650 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5651 status,
5652 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5655 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5656 u8 link_type, u8 addr_type, u32 passkey,
5657 u8 entered)
5659 struct mgmt_ev_passkey_notify ev;
5661 BT_DBG("%s", hdev->name);
5663 bacpy(&ev.addr.bdaddr, bdaddr);
5664 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5665 ev.passkey = __cpu_to_le32(passkey);
5666 ev.entered = entered;
5668 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5671 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5672 u8 addr_type, u8 status)
5674 struct mgmt_ev_auth_failed ev;
5676 bacpy(&ev.addr.bdaddr, bdaddr);
5677 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5678 ev.status = mgmt_status(status);
5680 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5683 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5685 struct cmd_lookup match = { NULL, hdev };
5686 bool changed;
5688 if (status) {
5689 u8 mgmt_err = mgmt_status(status);
5690 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5691 cmd_status_rsp, &mgmt_err);
5692 return;
5695 if (test_bit(HCI_AUTH, &hdev->flags))
5696 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5697 &hdev->dev_flags);
5698 else
5699 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5700 &hdev->dev_flags);
5702 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5703 &match);
5705 if (changed)
5706 new_settings(hdev, match.sk);
5708 if (match.sk)
5709 sock_put(match.sk);
5712 static void clear_eir(struct hci_request *req)
5714 struct hci_dev *hdev = req->hdev;
5715 struct hci_cp_write_eir cp;
5717 if (!lmp_ext_inq_capable(hdev))
5718 return;
5720 memset(hdev->eir, 0, sizeof(hdev->eir));
5722 memset(&cp, 0, sizeof(cp));
5724 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5727 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5729 struct cmd_lookup match = { NULL, hdev };
5730 struct hci_request req;
5731 bool changed = false;
5733 if (status) {
5734 u8 mgmt_err = mgmt_status(status);
5736 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5737 &hdev->dev_flags)) {
5738 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5739 new_settings(hdev, NULL);
5742 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5743 &mgmt_err);
5744 return;
5747 if (enable) {
5748 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5749 } else {
5750 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5751 if (!changed)
5752 changed = test_and_clear_bit(HCI_HS_ENABLED,
5753 &hdev->dev_flags);
5754 else
5755 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5758 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5760 if (changed)
5761 new_settings(hdev, match.sk);
5763 if (match.sk)
5764 sock_put(match.sk);
5766 hci_req_init(&req, hdev);
5768 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5769 update_eir(&req);
5770 else
5771 clear_eir(&req);
5773 hci_req_run(&req, NULL);
5776 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5778 struct cmd_lookup match = { NULL, hdev };
5779 bool changed = false;
5781 if (status) {
5782 u8 mgmt_err = mgmt_status(status);
5784 if (enable) {
5785 if (test_and_clear_bit(HCI_SC_ENABLED,
5786 &hdev->dev_flags))
5787 new_settings(hdev, NULL);
5788 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5791 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5792 cmd_status_rsp, &mgmt_err);
5793 return;
5796 if (enable) {
5797 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5798 } else {
5799 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5800 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5803 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5804 settings_rsp, &match);
5806 if (changed)
5807 new_settings(hdev, match.sk);
5809 if (match.sk)
5810 sock_put(match.sk);
5813 static void sk_lookup(struct pending_cmd *cmd, void *data)
5815 struct cmd_lookup *match = data;
5817 if (match->sk == NULL) {
5818 match->sk = cmd->sk;
5819 sock_hold(match->sk);
5823 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5824 u8 status)
5826 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5828 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5829 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5830 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5832 if (!status)
5833 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5834 NULL);
5836 if (match.sk)
5837 sock_put(match.sk);
5840 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5842 struct mgmt_cp_set_local_name ev;
5843 struct pending_cmd *cmd;
5845 if (status)
5846 return;
5848 memset(&ev, 0, sizeof(ev));
5849 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5850 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5852 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5853 if (!cmd) {
5854 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5856 /* If this is a HCI command related to powering on the
5857 * HCI dev don't send any mgmt signals.
5859 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5860 return;
5863 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5864 cmd ? cmd->sk : NULL);
5867 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5868 u8 *randomizer192, u8 *hash256,
5869 u8 *randomizer256, u8 status)
5871 struct pending_cmd *cmd;
5873 BT_DBG("%s status %u", hdev->name, status);
5875 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5876 if (!cmd)
5877 return;
5879 if (status) {
5880 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5881 mgmt_status(status));
5882 } else {
5883 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5884 hash256 && randomizer256) {
5885 struct mgmt_rp_read_local_oob_ext_data rp;
5887 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5888 memcpy(rp.randomizer192, randomizer192,
5889 sizeof(rp.randomizer192));
5891 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5892 memcpy(rp.randomizer256, randomizer256,
5893 sizeof(rp.randomizer256));
5895 cmd_complete(cmd->sk, hdev->id,
5896 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5897 &rp, sizeof(rp));
5898 } else {
5899 struct mgmt_rp_read_local_oob_data rp;
5901 memcpy(rp.hash, hash192, sizeof(rp.hash));
5902 memcpy(rp.randomizer, randomizer192,
5903 sizeof(rp.randomizer));
5905 cmd_complete(cmd->sk, hdev->id,
5906 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5907 &rp, sizeof(rp));
5911 mgmt_pending_remove(cmd);
5914 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5915 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5916 u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5917 u8 scan_rsp_len)
5919 char buf[512];
5920 struct mgmt_ev_device_found *ev = (void *) buf;
5921 struct smp_irk *irk;
5922 size_t ev_size;
5924 if (!hci_discovery_active(hdev))
5925 return;
5927 /* Make sure that the buffer is big enough. The 5 extra bytes
5928 * are for the potential CoD field.
5930 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5931 return;
5933 memset(buf, 0, sizeof(buf));
5935 irk = hci_get_irk(hdev, bdaddr, addr_type);
5936 if (irk) {
5937 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5938 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5939 } else {
5940 bacpy(&ev->addr.bdaddr, bdaddr);
5941 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5944 ev->rssi = rssi;
5945 if (cfm_name)
5946 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5947 if (!ssp)
5948 ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5950 if (eir_len > 0)
5951 memcpy(ev->eir, eir, eir_len);
5953 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5954 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5955 dev_class, 3);
5957 if (scan_rsp_len > 0)
5958 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5960 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5961 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5963 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5966 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5967 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5969 struct mgmt_ev_device_found *ev;
5970 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5971 u16 eir_len;
5973 ev = (struct mgmt_ev_device_found *) buf;
5975 memset(buf, 0, sizeof(buf));
5977 bacpy(&ev->addr.bdaddr, bdaddr);
5978 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5979 ev->rssi = rssi;
5981 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5982 name_len);
5984 ev->eir_len = cpu_to_le16(eir_len);
5986 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5989 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5991 struct mgmt_ev_discovering ev;
5992 struct pending_cmd *cmd;
5994 BT_DBG("%s discovering %u", hdev->name, discovering);
5996 if (discovering)
5997 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5998 else
5999 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6001 if (cmd != NULL) {
6002 u8 type = hdev->discovery.type;
6004 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6005 sizeof(type));
6006 mgmt_pending_remove(cmd);
6009 memset(&ev, 0, sizeof(ev));
6010 ev.type = hdev->discovery.type;
6011 ev.discovering = discovering;
6013 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6016 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6018 struct pending_cmd *cmd;
6019 struct mgmt_ev_device_blocked ev;
6021 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6023 bacpy(&ev.addr.bdaddr, bdaddr);
6024 ev.addr.type = type;
6026 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6027 cmd ? cmd->sk : NULL);
6030 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6032 struct pending_cmd *cmd;
6033 struct mgmt_ev_device_unblocked ev;
6035 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6037 bacpy(&ev.addr.bdaddr, bdaddr);
6038 ev.addr.type = type;
6040 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6041 cmd ? cmd->sk : NULL);
6044 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6046 BT_DBG("%s status %u", hdev->name, status);
6048 /* Clear the advertising mgmt setting if we failed to re-enable it */
6049 if (status) {
6050 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6051 new_settings(hdev, NULL);
6055 void mgmt_reenable_advertising(struct hci_dev *hdev)
6057 struct hci_request req;
6059 if (hci_conn_num(hdev, LE_LINK) > 0)
6060 return;
6062 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6063 return;
6065 hci_req_init(&req, hdev);
6066 enable_advertising(&req);
6068 /* If this fails we have no option but to let user space know
6069 * that we've disabled advertising.
6071 if (hci_req_run(&req, adv_enable_complete) < 0) {
6072 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6073 new_settings(hdev, NULL);