2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
38 #define MGMT_VERSION 1
39 #define MGMT_REVISION 8
41 static const u16 mgmt_commands
[] = {
42 MGMT_OP_READ_INDEX_LIST
,
45 MGMT_OP_SET_DISCOVERABLE
,
46 MGMT_OP_SET_CONNECTABLE
,
47 MGMT_OP_SET_FAST_CONNECTABLE
,
49 MGMT_OP_SET_LINK_SECURITY
,
53 MGMT_OP_SET_DEV_CLASS
,
54 MGMT_OP_SET_LOCAL_NAME
,
57 MGMT_OP_LOAD_LINK_KEYS
,
58 MGMT_OP_LOAD_LONG_TERM_KEYS
,
60 MGMT_OP_GET_CONNECTIONS
,
61 MGMT_OP_PIN_CODE_REPLY
,
62 MGMT_OP_PIN_CODE_NEG_REPLY
,
63 MGMT_OP_SET_IO_CAPABILITY
,
65 MGMT_OP_CANCEL_PAIR_DEVICE
,
66 MGMT_OP_UNPAIR_DEVICE
,
67 MGMT_OP_USER_CONFIRM_REPLY
,
68 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
69 MGMT_OP_USER_PASSKEY_REPLY
,
70 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
71 MGMT_OP_READ_LOCAL_OOB_DATA
,
72 MGMT_OP_ADD_REMOTE_OOB_DATA
,
73 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
74 MGMT_OP_START_DISCOVERY
,
75 MGMT_OP_STOP_DISCOVERY
,
78 MGMT_OP_UNBLOCK_DEVICE
,
79 MGMT_OP_SET_DEVICE_ID
,
80 MGMT_OP_SET_ADVERTISING
,
82 MGMT_OP_SET_STATIC_ADDRESS
,
83 MGMT_OP_SET_SCAN_PARAMS
,
84 MGMT_OP_SET_SECURE_CONN
,
85 MGMT_OP_SET_DEBUG_KEYS
,
88 MGMT_OP_GET_CONN_INFO
,
89 MGMT_OP_GET_CLOCK_INFO
,
91 MGMT_OP_REMOVE_DEVICE
,
92 MGMT_OP_LOAD_CONN_PARAM
,
93 MGMT_OP_READ_UNCONF_INDEX_LIST
,
94 MGMT_OP_READ_CONFIG_INFO
,
95 MGMT_OP_SET_EXTERNAL_CONFIG
,
96 MGMT_OP_SET_PUBLIC_ADDRESS
,
97 MGMT_OP_START_SERVICE_DISCOVERY
,
100 static const u16 mgmt_events
[] = {
101 MGMT_EV_CONTROLLER_ERROR
,
103 MGMT_EV_INDEX_REMOVED
,
104 MGMT_EV_NEW_SETTINGS
,
105 MGMT_EV_CLASS_OF_DEV_CHANGED
,
106 MGMT_EV_LOCAL_NAME_CHANGED
,
107 MGMT_EV_NEW_LINK_KEY
,
108 MGMT_EV_NEW_LONG_TERM_KEY
,
109 MGMT_EV_DEVICE_CONNECTED
,
110 MGMT_EV_DEVICE_DISCONNECTED
,
111 MGMT_EV_CONNECT_FAILED
,
112 MGMT_EV_PIN_CODE_REQUEST
,
113 MGMT_EV_USER_CONFIRM_REQUEST
,
114 MGMT_EV_USER_PASSKEY_REQUEST
,
116 MGMT_EV_DEVICE_FOUND
,
118 MGMT_EV_DEVICE_BLOCKED
,
119 MGMT_EV_DEVICE_UNBLOCKED
,
120 MGMT_EV_DEVICE_UNPAIRED
,
121 MGMT_EV_PASSKEY_NOTIFY
,
124 MGMT_EV_DEVICE_ADDED
,
125 MGMT_EV_DEVICE_REMOVED
,
126 MGMT_EV_NEW_CONN_PARAM
,
127 MGMT_EV_UNCONF_INDEX_ADDED
,
128 MGMT_EV_UNCONF_INDEX_REMOVED
,
129 MGMT_EV_NEW_CONFIG_OPTIONS
,
132 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
135 "\x00\x00\x00\x00\x00\x00\x00\x00"
138 struct list_head list
;
145 int (*cmd_complete
)(struct pending_cmd
*cmd
, u8 status
);
148 /* HCI to MGMT error code conversion table */
149 static u8 mgmt_status_table
[] = {
151 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
152 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
153 MGMT_STATUS_FAILED
, /* Hardware Failure */
154 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
155 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
156 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
157 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
158 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
159 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
160 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
161 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
162 MGMT_STATUS_BUSY
, /* Command Disallowed */
163 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
164 MGMT_STATUS_REJECTED
, /* Rejected Security */
165 MGMT_STATUS_REJECTED
, /* Rejected Personal */
166 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
167 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
168 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
169 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
170 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
171 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
172 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
173 MGMT_STATUS_BUSY
, /* Repeated Attempts */
174 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
175 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
176 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
177 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
178 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
179 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
180 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
181 MGMT_STATUS_FAILED
, /* Unspecified Error */
182 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
183 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
184 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
185 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
186 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
187 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
188 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
189 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
190 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
191 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
192 MGMT_STATUS_FAILED
, /* Transaction Collision */
193 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
194 MGMT_STATUS_REJECTED
, /* QoS Rejected */
195 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
196 MGMT_STATUS_REJECTED
, /* Insufficient Security */
197 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
198 MGMT_STATUS_BUSY
, /* Role Switch Pending */
199 MGMT_STATUS_FAILED
, /* Slot Violation */
200 MGMT_STATUS_FAILED
, /* Role Switch Failed */
201 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
202 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
203 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
204 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
205 MGMT_STATUS_BUSY
, /* Controller Busy */
206 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
207 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
208 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
209 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
210 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
213 static u8
mgmt_status(u8 hci_status
)
215 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
216 return mgmt_status_table
[hci_status
];
218 return MGMT_STATUS_FAILED
;
221 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
222 struct sock
*skip_sk
)
225 struct mgmt_hdr
*hdr
;
227 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
231 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
232 hdr
->opcode
= cpu_to_le16(event
);
234 hdr
->index
= cpu_to_le16(hdev
->id
);
236 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
237 hdr
->len
= cpu_to_le16(data_len
);
240 memcpy(skb_put(skb
, data_len
), data
, data_len
);
243 __net_timestamp(skb
);
245 hci_send_to_control(skb
, skip_sk
);
251 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
254 struct mgmt_hdr
*hdr
;
255 struct mgmt_ev_cmd_status
*ev
;
258 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
260 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
264 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
266 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
267 hdr
->index
= cpu_to_le16(index
);
268 hdr
->len
= cpu_to_le16(sizeof(*ev
));
270 ev
= (void *) skb_put(skb
, sizeof(*ev
));
272 ev
->opcode
= cpu_to_le16(cmd
);
274 err
= sock_queue_rcv_skb(sk
, skb
);
281 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
282 void *rp
, size_t rp_len
)
285 struct mgmt_hdr
*hdr
;
286 struct mgmt_ev_cmd_complete
*ev
;
289 BT_DBG("sock %p", sk
);
291 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
295 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
297 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
298 hdr
->index
= cpu_to_le16(index
);
299 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
301 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
302 ev
->opcode
= cpu_to_le16(cmd
);
306 memcpy(ev
->data
, rp
, rp_len
);
308 err
= sock_queue_rcv_skb(sk
, skb
);
315 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
318 struct mgmt_rp_read_version rp
;
320 BT_DBG("sock %p", sk
);
322 rp
.version
= MGMT_VERSION
;
323 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
325 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
329 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
332 struct mgmt_rp_read_commands
*rp
;
333 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
334 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
339 BT_DBG("sock %p", sk
);
341 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
343 rp
= kmalloc(rp_size
, GFP_KERNEL
);
347 rp
->num_commands
= cpu_to_le16(num_commands
);
348 rp
->num_events
= cpu_to_le16(num_events
);
350 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
351 put_unaligned_le16(mgmt_commands
[i
], opcode
);
353 for (i
= 0; i
< num_events
; i
++, opcode
++)
354 put_unaligned_le16(mgmt_events
[i
], opcode
);
356 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
363 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
366 struct mgmt_rp_read_index_list
*rp
;
372 BT_DBG("sock %p", sk
);
374 read_lock(&hci_dev_list_lock
);
377 list_for_each_entry(d
, &hci_dev_list
, list
) {
378 if (d
->dev_type
== HCI_BREDR
&&
379 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
383 rp_len
= sizeof(*rp
) + (2 * count
);
384 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
386 read_unlock(&hci_dev_list_lock
);
391 list_for_each_entry(d
, &hci_dev_list
, list
) {
392 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
393 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
394 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
397 /* Devices marked as raw-only are neither configured
398 * nor unconfigured controllers.
400 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
403 if (d
->dev_type
== HCI_BREDR
&&
404 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
405 rp
->index
[count
++] = cpu_to_le16(d
->id
);
406 BT_DBG("Added hci%u", d
->id
);
410 rp
->num_controllers
= cpu_to_le16(count
);
411 rp_len
= sizeof(*rp
) + (2 * count
);
413 read_unlock(&hci_dev_list_lock
);
415 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
423 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
424 void *data
, u16 data_len
)
426 struct mgmt_rp_read_unconf_index_list
*rp
;
432 BT_DBG("sock %p", sk
);
434 read_lock(&hci_dev_list_lock
);
437 list_for_each_entry(d
, &hci_dev_list
, list
) {
438 if (d
->dev_type
== HCI_BREDR
&&
439 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
443 rp_len
= sizeof(*rp
) + (2 * count
);
444 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
446 read_unlock(&hci_dev_list_lock
);
451 list_for_each_entry(d
, &hci_dev_list
, list
) {
452 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
453 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
454 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
457 /* Devices marked as raw-only are neither configured
458 * nor unconfigured controllers.
460 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
463 if (d
->dev_type
== HCI_BREDR
&&
464 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
465 rp
->index
[count
++] = cpu_to_le16(d
->id
);
466 BT_DBG("Added hci%u", d
->id
);
470 rp
->num_controllers
= cpu_to_le16(count
);
471 rp_len
= sizeof(*rp
) + (2 * count
);
473 read_unlock(&hci_dev_list_lock
);
475 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_UNCONF_INDEX_LIST
,
483 static bool is_configured(struct hci_dev
*hdev
)
485 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
486 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
489 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
490 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
496 static __le32
get_missing_options(struct hci_dev
*hdev
)
500 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
501 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
502 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
504 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
505 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
506 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
508 return cpu_to_le32(options
);
511 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
513 __le32 options
= get_missing_options(hdev
);
515 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
516 sizeof(options
), skip
);
519 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
521 __le32 options
= get_missing_options(hdev
);
523 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
527 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
528 void *data
, u16 data_len
)
530 struct mgmt_rp_read_config_info rp
;
533 BT_DBG("sock %p %s", sk
, hdev
->name
);
537 memset(&rp
, 0, sizeof(rp
));
538 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
540 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
541 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
543 if (hdev
->set_bdaddr
)
544 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
546 rp
.supported_options
= cpu_to_le32(options
);
547 rp
.missing_options
= get_missing_options(hdev
);
549 hci_dev_unlock(hdev
);
551 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0, &rp
,
555 static u32
get_supported_settings(struct hci_dev
*hdev
)
559 settings
|= MGMT_SETTING_POWERED
;
560 settings
|= MGMT_SETTING_BONDABLE
;
561 settings
|= MGMT_SETTING_DEBUG_KEYS
;
562 settings
|= MGMT_SETTING_CONNECTABLE
;
563 settings
|= MGMT_SETTING_DISCOVERABLE
;
565 if (lmp_bredr_capable(hdev
)) {
566 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
567 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
568 settings
|= MGMT_SETTING_BREDR
;
569 settings
|= MGMT_SETTING_LINK_SECURITY
;
571 if (lmp_ssp_capable(hdev
)) {
572 settings
|= MGMT_SETTING_SSP
;
573 settings
|= MGMT_SETTING_HS
;
576 if (lmp_sc_capable(hdev
))
577 settings
|= MGMT_SETTING_SECURE_CONN
;
580 if (lmp_le_capable(hdev
)) {
581 settings
|= MGMT_SETTING_LE
;
582 settings
|= MGMT_SETTING_ADVERTISING
;
583 settings
|= MGMT_SETTING_SECURE_CONN
;
584 settings
|= MGMT_SETTING_PRIVACY
;
587 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
589 settings
|= MGMT_SETTING_CONFIGURATION
;
594 static u32
get_current_settings(struct hci_dev
*hdev
)
598 if (hdev_is_powered(hdev
))
599 settings
|= MGMT_SETTING_POWERED
;
601 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
602 settings
|= MGMT_SETTING_CONNECTABLE
;
604 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
605 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
607 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
608 settings
|= MGMT_SETTING_DISCOVERABLE
;
610 if (test_bit(HCI_BONDABLE
, &hdev
->dev_flags
))
611 settings
|= MGMT_SETTING_BONDABLE
;
613 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
614 settings
|= MGMT_SETTING_BREDR
;
616 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
617 settings
|= MGMT_SETTING_LE
;
619 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
620 settings
|= MGMT_SETTING_LINK_SECURITY
;
622 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
623 settings
|= MGMT_SETTING_SSP
;
625 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
626 settings
|= MGMT_SETTING_HS
;
628 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
629 settings
|= MGMT_SETTING_ADVERTISING
;
631 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
632 settings
|= MGMT_SETTING_SECURE_CONN
;
634 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
635 settings
|= MGMT_SETTING_DEBUG_KEYS
;
637 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
638 settings
|= MGMT_SETTING_PRIVACY
;
643 #define PNP_INFO_SVCLASS_ID 0x1200
645 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
647 u8
*ptr
= data
, *uuids_start
= NULL
;
648 struct bt_uuid
*uuid
;
653 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
656 if (uuid
->size
!= 16)
659 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
663 if (uuid16
== PNP_INFO_SVCLASS_ID
)
669 uuids_start
[1] = EIR_UUID16_ALL
;
673 /* Stop if not enough space to put next UUID */
674 if ((ptr
- data
) + sizeof(u16
) > len
) {
675 uuids_start
[1] = EIR_UUID16_SOME
;
679 *ptr
++ = (uuid16
& 0x00ff);
680 *ptr
++ = (uuid16
& 0xff00) >> 8;
681 uuids_start
[0] += sizeof(uuid16
);
687 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
689 u8
*ptr
= data
, *uuids_start
= NULL
;
690 struct bt_uuid
*uuid
;
695 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
696 if (uuid
->size
!= 32)
702 uuids_start
[1] = EIR_UUID32_ALL
;
706 /* Stop if not enough space to put next UUID */
707 if ((ptr
- data
) + sizeof(u32
) > len
) {
708 uuids_start
[1] = EIR_UUID32_SOME
;
712 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
714 uuids_start
[0] += sizeof(u32
);
720 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
722 u8
*ptr
= data
, *uuids_start
= NULL
;
723 struct bt_uuid
*uuid
;
728 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
729 if (uuid
->size
!= 128)
735 uuids_start
[1] = EIR_UUID128_ALL
;
739 /* Stop if not enough space to put next UUID */
740 if ((ptr
- data
) + 16 > len
) {
741 uuids_start
[1] = EIR_UUID128_SOME
;
745 memcpy(ptr
, uuid
->uuid
, 16);
747 uuids_start
[0] += 16;
753 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
755 struct pending_cmd
*cmd
;
757 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
758 if (cmd
->opcode
== opcode
)
765 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
766 struct hci_dev
*hdev
,
769 struct pending_cmd
*cmd
;
771 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
772 if (cmd
->user_data
!= data
)
774 if (cmd
->opcode
== opcode
)
781 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
786 name_len
= strlen(hdev
->dev_name
);
788 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
790 if (name_len
> max_len
) {
792 ptr
[1] = EIR_NAME_SHORT
;
794 ptr
[1] = EIR_NAME_COMPLETE
;
796 ptr
[0] = name_len
+ 1;
798 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
800 ad_len
+= (name_len
+ 2);
801 ptr
+= (name_len
+ 2);
807 static void update_scan_rsp_data(struct hci_request
*req
)
809 struct hci_dev
*hdev
= req
->hdev
;
810 struct hci_cp_le_set_scan_rsp_data cp
;
813 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
816 memset(&cp
, 0, sizeof(cp
));
818 len
= create_scan_rsp_data(hdev
, cp
.data
);
820 if (hdev
->scan_rsp_data_len
== len
&&
821 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
824 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
825 hdev
->scan_rsp_data_len
= len
;
829 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
832 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
834 struct pending_cmd
*cmd
;
836 /* If there's a pending mgmt command the flags will not yet have
837 * their final values, so check for this first.
839 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
841 struct mgmt_mode
*cp
= cmd
->param
;
843 return LE_AD_GENERAL
;
844 else if (cp
->val
== 0x02)
845 return LE_AD_LIMITED
;
847 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
848 return LE_AD_LIMITED
;
849 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
850 return LE_AD_GENERAL
;
856 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
858 u8 ad_len
= 0, flags
= 0;
860 flags
|= get_adv_discov_flags(hdev
);
862 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
863 flags
|= LE_AD_NO_BREDR
;
866 BT_DBG("adv flags 0x%02x", flags
);
876 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
878 ptr
[1] = EIR_TX_POWER
;
879 ptr
[2] = (u8
) hdev
->adv_tx_power
;
888 static void update_adv_data(struct hci_request
*req
)
890 struct hci_dev
*hdev
= req
->hdev
;
891 struct hci_cp_le_set_adv_data cp
;
894 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
897 memset(&cp
, 0, sizeof(cp
));
899 len
= create_adv_data(hdev
, cp
.data
);
901 if (hdev
->adv_data_len
== len
&&
902 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
905 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
906 hdev
->adv_data_len
= len
;
910 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
913 int mgmt_update_adv_data(struct hci_dev
*hdev
)
915 struct hci_request req
;
917 hci_req_init(&req
, hdev
);
918 update_adv_data(&req
);
920 return hci_req_run(&req
, NULL
);
923 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
928 name_len
= strlen(hdev
->dev_name
);
934 ptr
[1] = EIR_NAME_SHORT
;
936 ptr
[1] = EIR_NAME_COMPLETE
;
938 /* EIR Data length */
939 ptr
[0] = name_len
+ 1;
941 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
943 ptr
+= (name_len
+ 2);
946 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
948 ptr
[1] = EIR_TX_POWER
;
949 ptr
[2] = (u8
) hdev
->inq_tx_power
;
954 if (hdev
->devid_source
> 0) {
956 ptr
[1] = EIR_DEVICE_ID
;
958 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
959 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
960 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
961 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
966 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
967 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
968 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
971 static void update_eir(struct hci_request
*req
)
973 struct hci_dev
*hdev
= req
->hdev
;
974 struct hci_cp_write_eir cp
;
976 if (!hdev_is_powered(hdev
))
979 if (!lmp_ext_inq_capable(hdev
))
982 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
985 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
988 memset(&cp
, 0, sizeof(cp
));
990 create_eir(hdev
, cp
.data
);
992 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
995 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
997 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1000 static u8
get_service_classes(struct hci_dev
*hdev
)
1002 struct bt_uuid
*uuid
;
1005 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1006 val
|= uuid
->svc_hint
;
1011 static void update_class(struct hci_request
*req
)
1013 struct hci_dev
*hdev
= req
->hdev
;
1016 BT_DBG("%s", hdev
->name
);
1018 if (!hdev_is_powered(hdev
))
1021 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1024 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1027 cod
[0] = hdev
->minor_class
;
1028 cod
[1] = hdev
->major_class
;
1029 cod
[2] = get_service_classes(hdev
);
1031 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
1034 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1037 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1040 static bool get_connectable(struct hci_dev
*hdev
)
1042 struct pending_cmd
*cmd
;
1044 /* If there's a pending mgmt command the flag will not yet have
1045 * it's final value, so check for this first.
1047 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1049 struct mgmt_mode
*cp
= cmd
->param
;
1053 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1056 static void disable_advertising(struct hci_request
*req
)
1060 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1063 static void enable_advertising(struct hci_request
*req
)
1065 struct hci_dev
*hdev
= req
->hdev
;
1066 struct hci_cp_le_set_adv_param cp
;
1067 u8 own_addr_type
, enable
= 0x01;
1070 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1073 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1074 disable_advertising(req
);
1076 /* Clear the HCI_LE_ADV bit temporarily so that the
1077 * hci_update_random_address knows that it's safe to go ahead
1078 * and write a new random address. The flag will be set back on
1079 * as soon as the SET_ADV_ENABLE HCI command completes.
1081 clear_bit(HCI_LE_ADV
, &hdev
->dev_flags
);
1083 connectable
= get_connectable(hdev
);
1085 /* Set require_privacy to true only when non-connectable
1086 * advertising is used. In that case it is fine to use a
1087 * non-resolvable private address.
1089 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1092 memset(&cp
, 0, sizeof(cp
));
1093 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1094 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1095 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
1096 cp
.own_address_type
= own_addr_type
;
1097 cp
.channel_map
= hdev
->le_adv_channel_map
;
1099 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1101 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1104 static void service_cache_off(struct work_struct
*work
)
1106 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1107 service_cache
.work
);
1108 struct hci_request req
;
1110 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1113 hci_req_init(&req
, hdev
);
1120 hci_dev_unlock(hdev
);
1122 hci_req_run(&req
, NULL
);
1125 static void rpa_expired(struct work_struct
*work
)
1127 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1129 struct hci_request req
;
1133 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
1135 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1138 /* The generation of a new RPA and programming it into the
1139 * controller happens in the enable_advertising() function.
1141 hci_req_init(&req
, hdev
);
1142 enable_advertising(&req
);
1143 hci_req_run(&req
, NULL
);
1146 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1148 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
1151 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1152 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1154 /* Non-mgmt controlled devices get this bit set
1155 * implicitly so that pairing works for them, however
1156 * for mgmt we require user-space to explicitly enable
1159 clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1162 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1163 void *data
, u16 data_len
)
1165 struct mgmt_rp_read_info rp
;
1167 BT_DBG("sock %p %s", sk
, hdev
->name
);
1171 memset(&rp
, 0, sizeof(rp
));
1173 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1175 rp
.version
= hdev
->hci_ver
;
1176 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1178 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1179 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1181 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1183 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1184 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1186 hci_dev_unlock(hdev
);
1188 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1192 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1199 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1200 struct hci_dev
*hdev
, void *data
,
1203 struct pending_cmd
*cmd
;
1205 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1209 cmd
->opcode
= opcode
;
1210 cmd
->index
= hdev
->id
;
1212 cmd
->param
= kmemdup(data
, len
, GFP_KERNEL
);
1218 cmd
->param_len
= len
;
1223 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1228 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1229 void (*cb
)(struct pending_cmd
*cmd
,
1233 struct pending_cmd
*cmd
, *tmp
;
1235 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1236 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1243 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1245 list_del(&cmd
->list
);
1246 mgmt_pending_free(cmd
);
1249 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1251 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1253 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1257 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1259 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1261 if (hci_conn_count(hdev
) == 0) {
1262 cancel_delayed_work(&hdev
->power_off
);
1263 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1267 static bool hci_stop_discovery(struct hci_request
*req
)
1269 struct hci_dev
*hdev
= req
->hdev
;
1270 struct hci_cp_remote_name_req_cancel cp
;
1271 struct inquiry_entry
*e
;
1273 switch (hdev
->discovery
.state
) {
1274 case DISCOVERY_FINDING
:
1275 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1276 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1278 cancel_delayed_work(&hdev
->le_scan_disable
);
1279 hci_req_add_le_scan_disable(req
);
1284 case DISCOVERY_RESOLVING
:
1285 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1290 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1291 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1297 /* Passive scanning */
1298 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
1299 hci_req_add_le_scan_disable(req
);
1309 static int clean_up_hci_state(struct hci_dev
*hdev
)
1311 struct hci_request req
;
1312 struct hci_conn
*conn
;
1313 bool discov_stopped
;
1316 hci_req_init(&req
, hdev
);
1318 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1319 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1321 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1324 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1325 disable_advertising(&req
);
1327 discov_stopped
= hci_stop_discovery(&req
);
1329 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1330 struct hci_cp_disconnect dc
;
1331 struct hci_cp_reject_conn_req rej
;
1333 switch (conn
->state
) {
1336 dc
.handle
= cpu_to_le16(conn
->handle
);
1337 dc
.reason
= 0x15; /* Terminated due to Power Off */
1338 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1341 if (conn
->type
== LE_LINK
)
1342 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1344 else if (conn
->type
== ACL_LINK
)
1345 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1349 bacpy(&rej
.bdaddr
, &conn
->dst
);
1350 rej
.reason
= 0x15; /* Terminated due to Power Off */
1351 if (conn
->type
== ACL_LINK
)
1352 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1354 else if (conn
->type
== SCO_LINK
)
1355 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1361 err
= hci_req_run(&req
, clean_up_hci_complete
);
1362 if (!err
&& discov_stopped
)
1363 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1368 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1371 struct mgmt_mode
*cp
= data
;
1372 struct pending_cmd
*cmd
;
1375 BT_DBG("request for %s", hdev
->name
);
1377 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1378 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1379 MGMT_STATUS_INVALID_PARAMS
);
1383 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1384 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1389 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1390 cancel_delayed_work(&hdev
->power_off
);
1393 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1395 err
= mgmt_powered(hdev
, 1);
1400 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1401 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1405 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1412 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1415 /* Disconnect connections, stop scans, etc */
1416 err
= clean_up_hci_state(hdev
);
1418 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1419 HCI_POWER_OFF_TIMEOUT
);
1421 /* ENODATA means there were no HCI commands queued */
1422 if (err
== -ENODATA
) {
1423 cancel_delayed_work(&hdev
->power_off
);
1424 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1430 hci_dev_unlock(hdev
);
1434 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1438 ev
= cpu_to_le32(get_current_settings(hdev
));
1440 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1443 int mgmt_new_settings(struct hci_dev
*hdev
)
1445 return new_settings(hdev
, NULL
);
1450 struct hci_dev
*hdev
;
1454 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1456 struct cmd_lookup
*match
= data
;
1458 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1460 list_del(&cmd
->list
);
1462 if (match
->sk
== NULL
) {
1463 match
->sk
= cmd
->sk
;
1464 sock_hold(match
->sk
);
1467 mgmt_pending_free(cmd
);
1470 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1474 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1475 mgmt_pending_remove(cmd
);
1478 static void cmd_complete_rsp(struct pending_cmd
*cmd
, void *data
)
1480 if (cmd
->cmd_complete
) {
1483 cmd
->cmd_complete(cmd
, *status
);
1484 mgmt_pending_remove(cmd
);
1489 cmd_status_rsp(cmd
, data
);
1492 static int generic_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
1494 return cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1495 cmd
->param
, cmd
->param_len
);
1498 static int addr_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
1500 return cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
,
1501 sizeof(struct mgmt_addr_info
));
1504 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1506 if (!lmp_bredr_capable(hdev
))
1507 return MGMT_STATUS_NOT_SUPPORTED
;
1508 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1509 return MGMT_STATUS_REJECTED
;
1511 return MGMT_STATUS_SUCCESS
;
1514 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1516 if (!lmp_le_capable(hdev
))
1517 return MGMT_STATUS_NOT_SUPPORTED
;
1518 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1519 return MGMT_STATUS_REJECTED
;
1521 return MGMT_STATUS_SUCCESS
;
1524 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
,
1527 struct pending_cmd
*cmd
;
1528 struct mgmt_mode
*cp
;
1529 struct hci_request req
;
1532 BT_DBG("status 0x%02x", status
);
1536 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1541 u8 mgmt_err
= mgmt_status(status
);
1542 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1543 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1549 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1552 if (hdev
->discov_timeout
> 0) {
1553 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1554 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1558 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1562 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1565 new_settings(hdev
, cmd
->sk
);
1567 /* When the discoverable mode gets changed, make sure
1568 * that class of device has the limited discoverable
1569 * bit correctly set. Also update page scan based on whitelist
1572 hci_req_init(&req
, hdev
);
1573 __hci_update_page_scan(&req
);
1575 hci_req_run(&req
, NULL
);
1578 mgmt_pending_remove(cmd
);
1581 hci_dev_unlock(hdev
);
1584 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1587 struct mgmt_cp_set_discoverable
*cp
= data
;
1588 struct pending_cmd
*cmd
;
1589 struct hci_request req
;
1594 BT_DBG("request for %s", hdev
->name
);
1596 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1597 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1598 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1599 MGMT_STATUS_REJECTED
);
1601 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1602 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1603 MGMT_STATUS_INVALID_PARAMS
);
1605 timeout
= __le16_to_cpu(cp
->timeout
);
1607 /* Disabling discoverable requires that no timeout is set,
1608 * and enabling limited discoverable requires a timeout.
1610 if ((cp
->val
== 0x00 && timeout
> 0) ||
1611 (cp
->val
== 0x02 && timeout
== 0))
1612 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1613 MGMT_STATUS_INVALID_PARAMS
);
1617 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1618 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1619 MGMT_STATUS_NOT_POWERED
);
1623 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1624 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1625 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1630 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1631 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1632 MGMT_STATUS_REJECTED
);
1636 if (!hdev_is_powered(hdev
)) {
1637 bool changed
= false;
1639 /* Setting limited discoverable when powered off is
1640 * not a valid operation since it requires a timeout
1641 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1643 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1644 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1648 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1653 err
= new_settings(hdev
, sk
);
1658 /* If the current mode is the same, then just update the timeout
1659 * value with the new value. And if only the timeout gets updated,
1660 * then no need for any HCI transactions.
1662 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1663 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1664 &hdev
->dev_flags
)) {
1665 cancel_delayed_work(&hdev
->discov_off
);
1666 hdev
->discov_timeout
= timeout
;
1668 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1669 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1670 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1674 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1678 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1684 /* Cancel any potential discoverable timeout that might be
1685 * still active and store new timeout value. The arming of
1686 * the timeout happens in the complete handler.
1688 cancel_delayed_work(&hdev
->discov_off
);
1689 hdev
->discov_timeout
= timeout
;
1691 /* Limited discoverable mode */
1692 if (cp
->val
== 0x02)
1693 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1695 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1697 hci_req_init(&req
, hdev
);
1699 /* The procedure for LE-only controllers is much simpler - just
1700 * update the advertising data.
1702 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1708 struct hci_cp_write_current_iac_lap hci_cp
;
1710 if (cp
->val
== 0x02) {
1711 /* Limited discoverable mode */
1712 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1713 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1714 hci_cp
.iac_lap
[1] = 0x8b;
1715 hci_cp
.iac_lap
[2] = 0x9e;
1716 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1717 hci_cp
.iac_lap
[4] = 0x8b;
1718 hci_cp
.iac_lap
[5] = 0x9e;
1720 /* General discoverable mode */
1722 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1723 hci_cp
.iac_lap
[1] = 0x8b;
1724 hci_cp
.iac_lap
[2] = 0x9e;
1727 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1728 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1730 scan
|= SCAN_INQUIRY
;
1732 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1735 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1738 update_adv_data(&req
);
1740 err
= hci_req_run(&req
, set_discoverable_complete
);
1742 mgmt_pending_remove(cmd
);
1745 hci_dev_unlock(hdev
);
1749 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1751 struct hci_dev
*hdev
= req
->hdev
;
1752 struct hci_cp_write_page_scan_activity acp
;
1755 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1758 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1762 type
= PAGE_SCAN_TYPE_INTERLACED
;
1764 /* 160 msec page scan interval */
1765 acp
.interval
= cpu_to_le16(0x0100);
1767 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1769 /* default 1.28 sec page scan */
1770 acp
.interval
= cpu_to_le16(0x0800);
1773 acp
.window
= cpu_to_le16(0x0012);
1775 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1776 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1777 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1780 if (hdev
->page_scan_type
!= type
)
1781 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1784 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
,
1787 struct pending_cmd
*cmd
;
1788 struct mgmt_mode
*cp
;
1789 bool conn_changed
, discov_changed
;
1791 BT_DBG("status 0x%02x", status
);
1795 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1800 u8 mgmt_err
= mgmt_status(status
);
1801 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1807 conn_changed
= !test_and_set_bit(HCI_CONNECTABLE
,
1809 discov_changed
= false;
1811 conn_changed
= test_and_clear_bit(HCI_CONNECTABLE
,
1813 discov_changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1817 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1819 if (conn_changed
|| discov_changed
) {
1820 new_settings(hdev
, cmd
->sk
);
1821 hci_update_page_scan(hdev
);
1823 mgmt_update_adv_data(hdev
);
1824 hci_update_background_scan(hdev
);
1828 mgmt_pending_remove(cmd
);
1831 hci_dev_unlock(hdev
);
1834 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1835 struct sock
*sk
, u8 val
)
1837 bool changed
= false;
1840 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1844 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1846 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1847 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1850 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1855 hci_update_page_scan(hdev
);
1856 hci_update_background_scan(hdev
);
1857 return new_settings(hdev
, sk
);
1863 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1866 struct mgmt_mode
*cp
= data
;
1867 struct pending_cmd
*cmd
;
1868 struct hci_request req
;
1872 BT_DBG("request for %s", hdev
->name
);
1874 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1875 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1876 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1877 MGMT_STATUS_REJECTED
);
1879 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1880 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1881 MGMT_STATUS_INVALID_PARAMS
);
1885 if (!hdev_is_powered(hdev
)) {
1886 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1890 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1891 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1892 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1897 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1903 hci_req_init(&req
, hdev
);
1905 /* If BR/EDR is not enabled and we disable advertising as a
1906 * by-product of disabling connectable, we need to update the
1907 * advertising flags.
1909 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1911 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1912 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1914 update_adv_data(&req
);
1915 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1919 /* If we don't have any whitelist entries just
1920 * disable all scanning. If there are entries
1921 * and we had both page and inquiry scanning
1922 * enabled then fall back to only page scanning.
1923 * Otherwise no changes are needed.
1925 if (list_empty(&hdev
->whitelist
))
1926 scan
= SCAN_DISABLED
;
1927 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
1930 goto no_scan_update
;
1932 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1933 hdev
->discov_timeout
> 0)
1934 cancel_delayed_work(&hdev
->discov_off
);
1937 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1941 /* If we're going from non-connectable to connectable or
1942 * vice-versa when fast connectable is enabled ensure that fast
1943 * connectable gets disabled. write_fast_connectable won't do
1944 * anything if the page scan parameters are already what they
1947 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1948 write_fast_connectable(&req
, false);
1950 /* Update the advertising parameters if necessary */
1951 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1952 enable_advertising(&req
);
1954 err
= hci_req_run(&req
, set_connectable_complete
);
1956 mgmt_pending_remove(cmd
);
1957 if (err
== -ENODATA
)
1958 err
= set_connectable_update_settings(hdev
, sk
,
1964 hci_dev_unlock(hdev
);
1968 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1971 struct mgmt_mode
*cp
= data
;
1975 BT_DBG("request for %s", hdev
->name
);
1977 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1978 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1979 MGMT_STATUS_INVALID_PARAMS
);
1984 changed
= !test_and_set_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1986 changed
= test_and_clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1988 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1993 err
= new_settings(hdev
, sk
);
1996 hci_dev_unlock(hdev
);
2000 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2003 struct mgmt_mode
*cp
= data
;
2004 struct pending_cmd
*cmd
;
2008 BT_DBG("request for %s", hdev
->name
);
2010 status
= mgmt_bredr_support(hdev
);
2012 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2015 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2016 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2017 MGMT_STATUS_INVALID_PARAMS
);
2021 if (!hdev_is_powered(hdev
)) {
2022 bool changed
= false;
2024 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
2025 &hdev
->dev_flags
)) {
2026 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
2030 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2035 err
= new_settings(hdev
, sk
);
2040 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2041 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2048 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2049 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2053 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2059 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2061 mgmt_pending_remove(cmd
);
2066 hci_dev_unlock(hdev
);
2070 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2072 struct mgmt_mode
*cp
= data
;
2073 struct pending_cmd
*cmd
;
2077 BT_DBG("request for %s", hdev
->name
);
2079 status
= mgmt_bredr_support(hdev
);
2081 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2083 if (!lmp_ssp_capable(hdev
))
2084 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2085 MGMT_STATUS_NOT_SUPPORTED
);
2087 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2088 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2089 MGMT_STATUS_INVALID_PARAMS
);
2093 if (!hdev_is_powered(hdev
)) {
2097 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
2100 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
2103 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
2106 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2109 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2114 err
= new_settings(hdev
, sk
);
2119 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
2120 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
2121 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2126 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
2127 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2131 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2137 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
2138 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2139 sizeof(cp
->val
), &cp
->val
);
2141 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2143 mgmt_pending_remove(cmd
);
2148 hci_dev_unlock(hdev
);
2152 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2154 struct mgmt_mode
*cp
= data
;
2159 BT_DBG("request for %s", hdev
->name
);
2161 status
= mgmt_bredr_support(hdev
);
2163 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2165 if (!lmp_ssp_capable(hdev
))
2166 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2167 MGMT_STATUS_NOT_SUPPORTED
);
2169 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
2170 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2171 MGMT_STATUS_REJECTED
);
2173 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2174 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2175 MGMT_STATUS_INVALID_PARAMS
);
2180 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2182 if (hdev_is_powered(hdev
)) {
2183 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2184 MGMT_STATUS_REJECTED
);
2188 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2191 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2196 err
= new_settings(hdev
, sk
);
2199 hci_dev_unlock(hdev
);
2203 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2205 struct cmd_lookup match
= { NULL
, hdev
};
2210 u8 mgmt_err
= mgmt_status(status
);
2212 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2217 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2219 new_settings(hdev
, match
.sk
);
2224 /* Make sure the controller has a good default for
2225 * advertising data. Restrict the update to when LE
2226 * has actually been enabled. During power on, the
2227 * update in powered_update_hci will take care of it.
2229 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2230 struct hci_request req
;
2232 hci_req_init(&req
, hdev
);
2233 update_adv_data(&req
);
2234 update_scan_rsp_data(&req
);
2235 __hci_update_background_scan(&req
);
2236 hci_req_run(&req
, NULL
);
2240 hci_dev_unlock(hdev
);
2243 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2245 struct mgmt_mode
*cp
= data
;
2246 struct hci_cp_write_le_host_supported hci_cp
;
2247 struct pending_cmd
*cmd
;
2248 struct hci_request req
;
2252 BT_DBG("request for %s", hdev
->name
);
2254 if (!lmp_le_capable(hdev
))
2255 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2256 MGMT_STATUS_NOT_SUPPORTED
);
2258 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2259 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2260 MGMT_STATUS_INVALID_PARAMS
);
2262 /* LE-only devices do not allow toggling LE on/off */
2263 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2264 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2265 MGMT_STATUS_REJECTED
);
2270 enabled
= lmp_host_le_capable(hdev
);
2272 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2273 bool changed
= false;
2275 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2276 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2280 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2281 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2285 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2290 err
= new_settings(hdev
, sk
);
2295 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2296 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2297 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2302 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2308 hci_req_init(&req
, hdev
);
2310 memset(&hci_cp
, 0, sizeof(hci_cp
));
2314 hci_cp
.simul
= 0x00;
2316 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
2317 disable_advertising(&req
);
2320 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2323 err
= hci_req_run(&req
, le_enable_complete
);
2325 mgmt_pending_remove(cmd
);
2328 hci_dev_unlock(hdev
);
2332 /* This is a helper function to test for pending mgmt commands that can
2333 * cause CoD or EIR HCI commands. We can only allow one such pending
2334 * mgmt command at a time since otherwise we cannot easily track what
2335 * the current values are, will be, and based on that calculate if a new
2336 * HCI command needs to be sent and if yes with what value.
2338 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2340 struct pending_cmd
*cmd
;
2342 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2343 switch (cmd
->opcode
) {
2344 case MGMT_OP_ADD_UUID
:
2345 case MGMT_OP_REMOVE_UUID
:
2346 case MGMT_OP_SET_DEV_CLASS
:
2347 case MGMT_OP_SET_POWERED
:
2355 static const u8 bluetooth_base_uuid
[] = {
2356 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2357 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2360 static u8
get_uuid_size(const u8
*uuid
)
2364 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2367 val
= get_unaligned_le32(&uuid
[12]);
2374 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2376 struct pending_cmd
*cmd
;
2380 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2384 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2385 hdev
->dev_class
, 3);
2387 mgmt_pending_remove(cmd
);
2390 hci_dev_unlock(hdev
);
2393 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2395 BT_DBG("status 0x%02x", status
);
2397 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2400 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2402 struct mgmt_cp_add_uuid
*cp
= data
;
2403 struct pending_cmd
*cmd
;
2404 struct hci_request req
;
2405 struct bt_uuid
*uuid
;
2408 BT_DBG("request for %s", hdev
->name
);
2412 if (pending_eir_or_class(hdev
)) {
2413 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2418 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2424 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2425 uuid
->svc_hint
= cp
->svc_hint
;
2426 uuid
->size
= get_uuid_size(cp
->uuid
);
2428 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2430 hci_req_init(&req
, hdev
);
2435 err
= hci_req_run(&req
, add_uuid_complete
);
2437 if (err
!= -ENODATA
)
2440 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2441 hdev
->dev_class
, 3);
2445 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2454 hci_dev_unlock(hdev
);
2458 static bool enable_service_cache(struct hci_dev
*hdev
)
2460 if (!hdev_is_powered(hdev
))
2463 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2464 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2472 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2474 BT_DBG("status 0x%02x", status
);
2476 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2479 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2482 struct mgmt_cp_remove_uuid
*cp
= data
;
2483 struct pending_cmd
*cmd
;
2484 struct bt_uuid
*match
, *tmp
;
2485 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2486 struct hci_request req
;
2489 BT_DBG("request for %s", hdev
->name
);
2493 if (pending_eir_or_class(hdev
)) {
2494 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2499 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2500 hci_uuids_clear(hdev
);
2502 if (enable_service_cache(hdev
)) {
2503 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2504 0, hdev
->dev_class
, 3);
2513 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2514 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2517 list_del(&match
->list
);
2523 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2524 MGMT_STATUS_INVALID_PARAMS
);
2529 hci_req_init(&req
, hdev
);
2534 err
= hci_req_run(&req
, remove_uuid_complete
);
2536 if (err
!= -ENODATA
)
2539 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2540 hdev
->dev_class
, 3);
2544 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2553 hci_dev_unlock(hdev
);
2557 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2559 BT_DBG("status 0x%02x", status
);
2561 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2564 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2567 struct mgmt_cp_set_dev_class
*cp
= data
;
2568 struct pending_cmd
*cmd
;
2569 struct hci_request req
;
2572 BT_DBG("request for %s", hdev
->name
);
2574 if (!lmp_bredr_capable(hdev
))
2575 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2576 MGMT_STATUS_NOT_SUPPORTED
);
2580 if (pending_eir_or_class(hdev
)) {
2581 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2586 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2587 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2588 MGMT_STATUS_INVALID_PARAMS
);
2592 hdev
->major_class
= cp
->major
;
2593 hdev
->minor_class
= cp
->minor
;
2595 if (!hdev_is_powered(hdev
)) {
2596 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2597 hdev
->dev_class
, 3);
2601 hci_req_init(&req
, hdev
);
2603 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2604 hci_dev_unlock(hdev
);
2605 cancel_delayed_work_sync(&hdev
->service_cache
);
2612 err
= hci_req_run(&req
, set_class_complete
);
2614 if (err
!= -ENODATA
)
2617 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2618 hdev
->dev_class
, 3);
2622 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2631 hci_dev_unlock(hdev
);
2635 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2638 struct mgmt_cp_load_link_keys
*cp
= data
;
2639 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2640 sizeof(struct mgmt_link_key_info
));
2641 u16 key_count
, expected_len
;
2645 BT_DBG("request for %s", hdev
->name
);
2647 if (!lmp_bredr_capable(hdev
))
2648 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2649 MGMT_STATUS_NOT_SUPPORTED
);
2651 key_count
= __le16_to_cpu(cp
->key_count
);
2652 if (key_count
> max_key_count
) {
2653 BT_ERR("load_link_keys: too big key_count value %u",
2655 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2656 MGMT_STATUS_INVALID_PARAMS
);
2659 expected_len
= sizeof(*cp
) + key_count
*
2660 sizeof(struct mgmt_link_key_info
);
2661 if (expected_len
!= len
) {
2662 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2664 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2665 MGMT_STATUS_INVALID_PARAMS
);
2668 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2669 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2670 MGMT_STATUS_INVALID_PARAMS
);
2672 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2675 for (i
= 0; i
< key_count
; i
++) {
2676 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2678 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2679 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2680 MGMT_STATUS_INVALID_PARAMS
);
2685 hci_link_keys_clear(hdev
);
2688 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2691 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2695 new_settings(hdev
, NULL
);
2697 for (i
= 0; i
< key_count
; i
++) {
2698 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2700 /* Always ignore debug keys and require a new pairing if
2701 * the user wants to use them.
2703 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2706 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2707 key
->type
, key
->pin_len
, NULL
);
2710 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2712 hci_dev_unlock(hdev
);
2717 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2718 u8 addr_type
, struct sock
*skip_sk
)
2720 struct mgmt_ev_device_unpaired ev
;
2722 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2723 ev
.addr
.type
= addr_type
;
2725 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2729 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2732 struct mgmt_cp_unpair_device
*cp
= data
;
2733 struct mgmt_rp_unpair_device rp
;
2734 struct hci_cp_disconnect dc
;
2735 struct pending_cmd
*cmd
;
2736 struct hci_conn
*conn
;
2739 memset(&rp
, 0, sizeof(rp
));
2740 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2741 rp
.addr
.type
= cp
->addr
.type
;
2743 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2744 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2745 MGMT_STATUS_INVALID_PARAMS
,
2748 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2749 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2750 MGMT_STATUS_INVALID_PARAMS
,
2755 if (!hdev_is_powered(hdev
)) {
2756 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2757 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2761 if (cp
->addr
.type
== BDADDR_BREDR
) {
2762 /* If disconnection is requested, then look up the
2763 * connection. If the remote device is connected, it
2764 * will be later used to terminate the link.
2766 * Setting it to NULL explicitly will cause no
2767 * termination of the link.
2770 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2775 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2779 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2782 /* Defer clearing up the connection parameters
2783 * until closing to give a chance of keeping
2784 * them if a repairing happens.
2786 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2788 /* If disconnection is not requested, then
2789 * clear the connection variable so that the
2790 * link is not terminated.
2792 if (!cp
->disconnect
)
2796 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2797 addr_type
= ADDR_LE_DEV_PUBLIC
;
2799 addr_type
= ADDR_LE_DEV_RANDOM
;
2801 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2803 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2807 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2808 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2812 /* If the connection variable is set, then termination of the
2813 * link is requested.
2816 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2818 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2822 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2829 cmd
->cmd_complete
= addr_cmd_complete
;
2831 dc
.handle
= cpu_to_le16(conn
->handle
);
2832 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2833 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2835 mgmt_pending_remove(cmd
);
2838 hci_dev_unlock(hdev
);
2842 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2845 struct mgmt_cp_disconnect
*cp
= data
;
2846 struct mgmt_rp_disconnect rp
;
2847 struct pending_cmd
*cmd
;
2848 struct hci_conn
*conn
;
2853 memset(&rp
, 0, sizeof(rp
));
2854 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2855 rp
.addr
.type
= cp
->addr
.type
;
2857 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2858 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2859 MGMT_STATUS_INVALID_PARAMS
,
2864 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2865 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2866 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2870 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2871 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2872 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2876 if (cp
->addr
.type
== BDADDR_BREDR
)
2877 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2880 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2882 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2883 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2884 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2888 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2894 cmd
->cmd_complete
= generic_cmd_complete
;
2896 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2898 mgmt_pending_remove(cmd
);
2901 hci_dev_unlock(hdev
);
2905 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2907 switch (link_type
) {
2909 switch (addr_type
) {
2910 case ADDR_LE_DEV_PUBLIC
:
2911 return BDADDR_LE_PUBLIC
;
2914 /* Fallback to LE Random address type */
2915 return BDADDR_LE_RANDOM
;
2919 /* Fallback to BR/EDR type */
2920 return BDADDR_BREDR
;
2924 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2927 struct mgmt_rp_get_connections
*rp
;
2937 if (!hdev_is_powered(hdev
)) {
2938 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2939 MGMT_STATUS_NOT_POWERED
);
2944 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2945 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2949 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2950 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2957 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2958 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2960 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2961 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2962 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2967 rp
->conn_count
= cpu_to_le16(i
);
2969 /* Recalculate length in case of filtered SCO connections, etc */
2970 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2972 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2978 hci_dev_unlock(hdev
);
2982 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2983 struct mgmt_cp_pin_code_neg_reply
*cp
)
2985 struct pending_cmd
*cmd
;
2988 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2993 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2994 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2996 mgmt_pending_remove(cmd
);
3001 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3004 struct hci_conn
*conn
;
3005 struct mgmt_cp_pin_code_reply
*cp
= data
;
3006 struct hci_cp_pin_code_reply reply
;
3007 struct pending_cmd
*cmd
;
3014 if (!hdev_is_powered(hdev
)) {
3015 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3016 MGMT_STATUS_NOT_POWERED
);
3020 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3022 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3023 MGMT_STATUS_NOT_CONNECTED
);
3027 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3028 struct mgmt_cp_pin_code_neg_reply ncp
;
3030 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3032 BT_ERR("PIN code is not 16 bytes long");
3034 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3036 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3037 MGMT_STATUS_INVALID_PARAMS
);
3042 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3048 cmd
->cmd_complete
= addr_cmd_complete
;
3050 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3051 reply
.pin_len
= cp
->pin_len
;
3052 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3054 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3056 mgmt_pending_remove(cmd
);
3059 hci_dev_unlock(hdev
);
3063 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3066 struct mgmt_cp_set_io_capability
*cp
= data
;
3070 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3071 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3072 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3076 hdev
->io_capability
= cp
->io_capability
;
3078 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3079 hdev
->io_capability
);
3081 hci_dev_unlock(hdev
);
3083 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
3087 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
3089 struct hci_dev
*hdev
= conn
->hdev
;
3090 struct pending_cmd
*cmd
;
3092 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3093 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3096 if (cmd
->user_data
!= conn
)
3105 static int pairing_complete(struct pending_cmd
*cmd
, u8 status
)
3107 struct mgmt_rp_pair_device rp
;
3108 struct hci_conn
*conn
= cmd
->user_data
;
3111 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3112 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3114 err
= cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
3117 /* So we don't get further callbacks for this connection */
3118 conn
->connect_cfm_cb
= NULL
;
3119 conn
->security_cfm_cb
= NULL
;
3120 conn
->disconn_cfm_cb
= NULL
;
3122 hci_conn_drop(conn
);
3124 /* The device is paired so there is no need to remove
3125 * its connection parameters anymore.
3127 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3134 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3136 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3137 struct pending_cmd
*cmd
;
3139 cmd
= find_pairing(conn
);
3141 cmd
->cmd_complete(cmd
, status
);
3142 mgmt_pending_remove(cmd
);
3146 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3148 struct pending_cmd
*cmd
;
3150 BT_DBG("status %u", status
);
3152 cmd
= find_pairing(conn
);
3154 BT_DBG("Unable to find a pending command");
3158 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3159 mgmt_pending_remove(cmd
);
3162 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3164 struct pending_cmd
*cmd
;
3166 BT_DBG("status %u", status
);
3171 cmd
= find_pairing(conn
);
3173 BT_DBG("Unable to find a pending command");
3177 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3178 mgmt_pending_remove(cmd
);
3181 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3184 struct mgmt_cp_pair_device
*cp
= data
;
3185 struct mgmt_rp_pair_device rp
;
3186 struct pending_cmd
*cmd
;
3187 u8 sec_level
, auth_type
;
3188 struct hci_conn
*conn
;
3193 memset(&rp
, 0, sizeof(rp
));
3194 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3195 rp
.addr
.type
= cp
->addr
.type
;
3197 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3198 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3199 MGMT_STATUS_INVALID_PARAMS
,
3202 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3203 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3204 MGMT_STATUS_INVALID_PARAMS
,
3209 if (!hdev_is_powered(hdev
)) {
3210 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3211 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
3215 sec_level
= BT_SECURITY_MEDIUM
;
3216 auth_type
= HCI_AT_DEDICATED_BONDING
;
3218 if (cp
->addr
.type
== BDADDR_BREDR
) {
3219 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3224 /* Convert from L2CAP channel address type to HCI address type
3226 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3227 addr_type
= ADDR_LE_DEV_PUBLIC
;
3229 addr_type
= ADDR_LE_DEV_RANDOM
;
3231 /* When pairing a new device, it is expected to remember
3232 * this device for future connections. Adding the connection
3233 * parameter information ahead of time allows tracking
3234 * of the slave preferred values and will speed up any
3235 * further connection establishment.
3237 * If connection parameters already exist, then they
3238 * will be kept and this function does nothing.
3240 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3242 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3243 sec_level
, HCI_LE_CONN_TIMEOUT
,
3250 if (PTR_ERR(conn
) == -EBUSY
)
3251 status
= MGMT_STATUS_BUSY
;
3253 status
= MGMT_STATUS_CONNECT_FAILED
;
3255 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3261 if (conn
->connect_cfm_cb
) {
3262 hci_conn_drop(conn
);
3263 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3264 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3268 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3271 hci_conn_drop(conn
);
3275 cmd
->cmd_complete
= pairing_complete
;
3277 /* For LE, just connecting isn't a proof that the pairing finished */
3278 if (cp
->addr
.type
== BDADDR_BREDR
) {
3279 conn
->connect_cfm_cb
= pairing_complete_cb
;
3280 conn
->security_cfm_cb
= pairing_complete_cb
;
3281 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3283 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3284 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3285 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3288 conn
->io_capability
= cp
->io_cap
;
3289 cmd
->user_data
= hci_conn_get(conn
);
3291 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3292 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3293 cmd
->cmd_complete(cmd
, 0);
3294 mgmt_pending_remove(cmd
);
3300 hci_dev_unlock(hdev
);
3304 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3307 struct mgmt_addr_info
*addr
= data
;
3308 struct pending_cmd
*cmd
;
3309 struct hci_conn
*conn
;
3316 if (!hdev_is_powered(hdev
)) {
3317 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3318 MGMT_STATUS_NOT_POWERED
);
3322 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3324 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3325 MGMT_STATUS_INVALID_PARAMS
);
3329 conn
= cmd
->user_data
;
3331 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3332 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3333 MGMT_STATUS_INVALID_PARAMS
);
3337 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3338 mgmt_pending_remove(cmd
);
3340 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3341 addr
, sizeof(*addr
));
3343 hci_dev_unlock(hdev
);
3347 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3348 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3349 u16 hci_op
, __le32 passkey
)
3351 struct pending_cmd
*cmd
;
3352 struct hci_conn
*conn
;
3357 if (!hdev_is_powered(hdev
)) {
3358 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3359 MGMT_STATUS_NOT_POWERED
, addr
,
3364 if (addr
->type
== BDADDR_BREDR
)
3365 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3367 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3370 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3371 MGMT_STATUS_NOT_CONNECTED
, addr
,
3376 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3377 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3379 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3380 MGMT_STATUS_SUCCESS
, addr
,
3383 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3384 MGMT_STATUS_FAILED
, addr
,
3390 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3396 cmd
->cmd_complete
= addr_cmd_complete
;
3398 /* Continue with pairing via HCI */
3399 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3400 struct hci_cp_user_passkey_reply cp
;
3402 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3403 cp
.passkey
= passkey
;
3404 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3406 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3410 mgmt_pending_remove(cmd
);
3413 hci_dev_unlock(hdev
);
3417 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3418 void *data
, u16 len
)
3420 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3424 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3425 MGMT_OP_PIN_CODE_NEG_REPLY
,
3426 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3429 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3432 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3436 if (len
!= sizeof(*cp
))
3437 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3438 MGMT_STATUS_INVALID_PARAMS
);
3440 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3441 MGMT_OP_USER_CONFIRM_REPLY
,
3442 HCI_OP_USER_CONFIRM_REPLY
, 0);
3445 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3446 void *data
, u16 len
)
3448 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3452 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3453 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3454 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3457 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3460 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3464 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3465 MGMT_OP_USER_PASSKEY_REPLY
,
3466 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3469 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3470 void *data
, u16 len
)
3472 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3476 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3477 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3478 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3481 static void update_name(struct hci_request
*req
)
3483 struct hci_dev
*hdev
= req
->hdev
;
3484 struct hci_cp_write_local_name cp
;
3486 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3488 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3491 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3493 struct mgmt_cp_set_local_name
*cp
;
3494 struct pending_cmd
*cmd
;
3496 BT_DBG("status 0x%02x", status
);
3500 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3507 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3508 mgmt_status(status
));
3510 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3513 mgmt_pending_remove(cmd
);
3516 hci_dev_unlock(hdev
);
3519 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3522 struct mgmt_cp_set_local_name
*cp
= data
;
3523 struct pending_cmd
*cmd
;
3524 struct hci_request req
;
3531 /* If the old values are the same as the new ones just return a
3532 * direct command complete event.
3534 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3535 !memcmp(hdev
->short_name
, cp
->short_name
,
3536 sizeof(hdev
->short_name
))) {
3537 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3542 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3544 if (!hdev_is_powered(hdev
)) {
3545 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3547 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3552 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3558 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3564 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3566 hci_req_init(&req
, hdev
);
3568 if (lmp_bredr_capable(hdev
)) {
3573 /* The name is stored in the scan response data and so
3574 * no need to udpate the advertising data here.
3576 if (lmp_le_capable(hdev
))
3577 update_scan_rsp_data(&req
);
3579 err
= hci_req_run(&req
, set_name_complete
);
3581 mgmt_pending_remove(cmd
);
3584 hci_dev_unlock(hdev
);
3588 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3589 void *data
, u16 data_len
)
3591 struct pending_cmd
*cmd
;
3594 BT_DBG("%s", hdev
->name
);
3598 if (!hdev_is_powered(hdev
)) {
3599 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3600 MGMT_STATUS_NOT_POWERED
);
3604 if (!lmp_ssp_capable(hdev
)) {
3605 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3606 MGMT_STATUS_NOT_SUPPORTED
);
3610 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3611 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3616 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3622 if (bredr_sc_enabled(hdev
))
3623 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3626 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3629 mgmt_pending_remove(cmd
);
3632 hci_dev_unlock(hdev
);
3636 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3637 void *data
, u16 len
)
3639 struct mgmt_addr_info
*addr
= data
;
3642 BT_DBG("%s ", hdev
->name
);
3644 if (!bdaddr_type_is_valid(addr
->type
))
3645 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3646 MGMT_STATUS_INVALID_PARAMS
, addr
,
3651 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3652 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3655 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3656 err
= cmd_complete(sk
, hdev
->id
,
3657 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3658 MGMT_STATUS_INVALID_PARAMS
,
3659 &cp
->addr
, sizeof(cp
->addr
));
3663 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3664 cp
->addr
.type
, cp
->hash
,
3665 cp
->rand
, NULL
, NULL
);
3667 status
= MGMT_STATUS_FAILED
;
3669 status
= MGMT_STATUS_SUCCESS
;
3671 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3672 status
, &cp
->addr
, sizeof(cp
->addr
));
3673 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3674 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3675 u8
*rand192
, *hash192
, *rand256
, *hash256
;
3678 if (bdaddr_type_is_le(cp
->addr
.type
)) {
3679 /* Enforce zero-valued 192-bit parameters as
3680 * long as legacy SMP OOB isn't implemented.
3682 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3683 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3684 err
= cmd_complete(sk
, hdev
->id
,
3685 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3686 MGMT_STATUS_INVALID_PARAMS
,
3687 addr
, sizeof(*addr
));
3694 /* In case one of the P-192 values is set to zero,
3695 * then just disable OOB data for P-192.
3697 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
3698 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
3702 rand192
= cp
->rand192
;
3703 hash192
= cp
->hash192
;
3707 /* In case one of the P-256 values is set to zero, then just
3708 * disable OOB data for P-256.
3710 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
3711 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
3715 rand256
= cp
->rand256
;
3716 hash256
= cp
->hash256
;
3719 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3720 cp
->addr
.type
, hash192
, rand192
,
3723 status
= MGMT_STATUS_FAILED
;
3725 status
= MGMT_STATUS_SUCCESS
;
3727 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3728 status
, &cp
->addr
, sizeof(cp
->addr
));
3730 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3731 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3732 MGMT_STATUS_INVALID_PARAMS
);
3736 hci_dev_unlock(hdev
);
3740 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3741 void *data
, u16 len
)
3743 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3747 BT_DBG("%s", hdev
->name
);
3749 if (cp
->addr
.type
!= BDADDR_BREDR
)
3750 return cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3751 MGMT_STATUS_INVALID_PARAMS
,
3752 &cp
->addr
, sizeof(cp
->addr
));
3756 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
3757 hci_remote_oob_data_clear(hdev
);
3758 status
= MGMT_STATUS_SUCCESS
;
3762 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3764 status
= MGMT_STATUS_INVALID_PARAMS
;
3766 status
= MGMT_STATUS_SUCCESS
;
3769 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3770 status
, &cp
->addr
, sizeof(cp
->addr
));
3772 hci_dev_unlock(hdev
);
3776 static bool trigger_discovery(struct hci_request
*req
, u8
*status
)
3778 struct hci_dev
*hdev
= req
->hdev
;
3779 struct hci_cp_le_set_scan_param param_cp
;
3780 struct hci_cp_le_set_scan_enable enable_cp
;
3781 struct hci_cp_inquiry inq_cp
;
3782 /* General inquiry access code (GIAC) */
3783 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3787 switch (hdev
->discovery
.type
) {
3788 case DISCOV_TYPE_BREDR
:
3789 *status
= mgmt_bredr_support(hdev
);
3793 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3794 *status
= MGMT_STATUS_BUSY
;
3798 hci_inquiry_cache_flush(hdev
);
3800 memset(&inq_cp
, 0, sizeof(inq_cp
));
3801 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3802 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3803 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3806 case DISCOV_TYPE_LE
:
3807 case DISCOV_TYPE_INTERLEAVED
:
3808 *status
= mgmt_le_support(hdev
);
3812 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3813 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3814 *status
= MGMT_STATUS_NOT_SUPPORTED
;
3818 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
)) {
3819 /* Don't let discovery abort an outgoing
3820 * connection attempt that's using directed
3823 if (hci_conn_hash_lookup_state(hdev
, LE_LINK
,
3825 *status
= MGMT_STATUS_REJECTED
;
3829 disable_advertising(req
);
3832 /* If controller is scanning, it means the background scanning
3833 * is running. Thus, we should temporarily stop it in order to
3834 * set the discovery scanning parameters.
3836 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3837 hci_req_add_le_scan_disable(req
);
3839 memset(¶m_cp
, 0, sizeof(param_cp
));
3841 /* All active scans will be done with either a resolvable
3842 * private address (when privacy feature has been enabled)
3843 * or non-resolvable private address.
3845 err
= hci_update_random_address(req
, true, &own_addr_type
);
3847 *status
= MGMT_STATUS_FAILED
;
3851 param_cp
.type
= LE_SCAN_ACTIVE
;
3852 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3853 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3854 param_cp
.own_address_type
= own_addr_type
;
3855 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3858 memset(&enable_cp
, 0, sizeof(enable_cp
));
3859 enable_cp
.enable
= LE_SCAN_ENABLE
;
3860 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3861 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3866 *status
= MGMT_STATUS_INVALID_PARAMS
;
3873 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
,
3876 struct pending_cmd
*cmd
;
3877 unsigned long timeout
;
3879 BT_DBG("status %d", status
);
3883 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3885 cmd
= mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
3888 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3889 mgmt_pending_remove(cmd
);
3893 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3897 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3899 /* If the scan involves LE scan, pick proper timeout to schedule
3900 * hdev->le_scan_disable that will stop it.
3902 switch (hdev
->discovery
.type
) {
3903 case DISCOV_TYPE_LE
:
3904 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3906 case DISCOV_TYPE_INTERLEAVED
:
3907 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3909 case DISCOV_TYPE_BREDR
:
3913 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3919 /* When service discovery is used and the controller has
3920 * a strict duplicate filter, it is important to remember
3921 * the start and duration of the scan. This is required
3922 * for restarting scanning during the discovery phase.
3924 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
,
3926 (hdev
->discovery
.uuid_count
> 0 ||
3927 hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
)) {
3928 hdev
->discovery
.scan_start
= jiffies
;
3929 hdev
->discovery
.scan_duration
= timeout
;
3932 queue_delayed_work(hdev
->workqueue
,
3933 &hdev
->le_scan_disable
, timeout
);
3937 hci_dev_unlock(hdev
);
3940 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3941 void *data
, u16 len
)
3943 struct mgmt_cp_start_discovery
*cp
= data
;
3944 struct pending_cmd
*cmd
;
3945 struct hci_request req
;
3949 BT_DBG("%s", hdev
->name
);
3953 if (!hdev_is_powered(hdev
)) {
3954 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3955 MGMT_STATUS_NOT_POWERED
,
3956 &cp
->type
, sizeof(cp
->type
));
3960 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3961 test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3962 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3963 MGMT_STATUS_BUSY
, &cp
->type
,
3968 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
3974 cmd
->cmd_complete
= generic_cmd_complete
;
3976 /* Clear the discovery filter first to free any previously
3977 * allocated memory for the UUID list.
3979 hci_discovery_filter_clear(hdev
);
3981 hdev
->discovery
.type
= cp
->type
;
3982 hdev
->discovery
.report_invalid_rssi
= false;
3984 hci_req_init(&req
, hdev
);
3986 if (!trigger_discovery(&req
, &status
)) {
3987 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3988 status
, &cp
->type
, sizeof(cp
->type
));
3989 mgmt_pending_remove(cmd
);
3993 err
= hci_req_run(&req
, start_discovery_complete
);
3995 mgmt_pending_remove(cmd
);
3999 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4002 hci_dev_unlock(hdev
);
4006 static int service_discovery_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
4008 return cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
4012 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4013 void *data
, u16 len
)
4015 struct mgmt_cp_start_service_discovery
*cp
= data
;
4016 struct pending_cmd
*cmd
;
4017 struct hci_request req
;
4018 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
4019 u16 uuid_count
, expected_len
;
4023 BT_DBG("%s", hdev
->name
);
4027 if (!hdev_is_powered(hdev
)) {
4028 err
= cmd_complete(sk
, hdev
->id
,
4029 MGMT_OP_START_SERVICE_DISCOVERY
,
4030 MGMT_STATUS_NOT_POWERED
,
4031 &cp
->type
, sizeof(cp
->type
));
4035 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4036 test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
4037 err
= cmd_complete(sk
, hdev
->id
,
4038 MGMT_OP_START_SERVICE_DISCOVERY
,
4039 MGMT_STATUS_BUSY
, &cp
->type
,
4044 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
4045 if (uuid_count
> max_uuid_count
) {
4046 BT_ERR("service_discovery: too big uuid_count value %u",
4048 err
= cmd_complete(sk
, hdev
->id
,
4049 MGMT_OP_START_SERVICE_DISCOVERY
,
4050 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4055 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4056 if (expected_len
!= len
) {
4057 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4059 err
= cmd_complete(sk
, hdev
->id
,
4060 MGMT_OP_START_SERVICE_DISCOVERY
,
4061 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4066 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4073 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4075 /* Clear the discovery filter first to free any previously
4076 * allocated memory for the UUID list.
4078 hci_discovery_filter_clear(hdev
);
4080 hdev
->discovery
.type
= cp
->type
;
4081 hdev
->discovery
.rssi
= cp
->rssi
;
4082 hdev
->discovery
.uuid_count
= uuid_count
;
4084 if (uuid_count
> 0) {
4085 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4087 if (!hdev
->discovery
.uuids
) {
4088 err
= cmd_complete(sk
, hdev
->id
,
4089 MGMT_OP_START_SERVICE_DISCOVERY
,
4091 &cp
->type
, sizeof(cp
->type
));
4092 mgmt_pending_remove(cmd
);
4097 hci_req_init(&req
, hdev
);
4099 if (!trigger_discovery(&req
, &status
)) {
4100 err
= cmd_complete(sk
, hdev
->id
,
4101 MGMT_OP_START_SERVICE_DISCOVERY
,
4102 status
, &cp
->type
, sizeof(cp
->type
));
4103 mgmt_pending_remove(cmd
);
4107 err
= hci_req_run(&req
, start_discovery_complete
);
4109 mgmt_pending_remove(cmd
);
4113 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4116 hci_dev_unlock(hdev
);
4120 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4122 struct pending_cmd
*cmd
;
4124 BT_DBG("status %d", status
);
4128 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4130 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4131 mgmt_pending_remove(cmd
);
4135 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4137 hci_dev_unlock(hdev
);
4140 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4143 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4144 struct pending_cmd
*cmd
;
4145 struct hci_request req
;
4148 BT_DBG("%s", hdev
->name
);
4152 if (!hci_discovery_active(hdev
)) {
4153 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4154 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4155 sizeof(mgmt_cp
->type
));
4159 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4160 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4161 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
4162 sizeof(mgmt_cp
->type
));
4166 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4172 cmd
->cmd_complete
= generic_cmd_complete
;
4174 hci_req_init(&req
, hdev
);
4176 hci_stop_discovery(&req
);
4178 err
= hci_req_run(&req
, stop_discovery_complete
);
4180 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4184 mgmt_pending_remove(cmd
);
4186 /* If no HCI commands were sent we're done */
4187 if (err
== -ENODATA
) {
4188 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
4189 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4190 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4194 hci_dev_unlock(hdev
);
4198 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4201 struct mgmt_cp_confirm_name
*cp
= data
;
4202 struct inquiry_entry
*e
;
4205 BT_DBG("%s", hdev
->name
);
4209 if (!hci_discovery_active(hdev
)) {
4210 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4211 MGMT_STATUS_FAILED
, &cp
->addr
,
4216 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4218 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4219 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4224 if (cp
->name_known
) {
4225 e
->name_state
= NAME_KNOWN
;
4228 e
->name_state
= NAME_NEEDED
;
4229 hci_inquiry_cache_update_resolve(hdev
, e
);
4232 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
4236 hci_dev_unlock(hdev
);
4240 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4243 struct mgmt_cp_block_device
*cp
= data
;
4247 BT_DBG("%s", hdev
->name
);
4249 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4250 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4251 MGMT_STATUS_INVALID_PARAMS
,
4252 &cp
->addr
, sizeof(cp
->addr
));
4256 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4259 status
= MGMT_STATUS_FAILED
;
4263 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4265 status
= MGMT_STATUS_SUCCESS
;
4268 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4269 &cp
->addr
, sizeof(cp
->addr
));
4271 hci_dev_unlock(hdev
);
4276 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4279 struct mgmt_cp_unblock_device
*cp
= data
;
4283 BT_DBG("%s", hdev
->name
);
4285 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4286 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4287 MGMT_STATUS_INVALID_PARAMS
,
4288 &cp
->addr
, sizeof(cp
->addr
));
4292 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4295 status
= MGMT_STATUS_INVALID_PARAMS
;
4299 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4301 status
= MGMT_STATUS_SUCCESS
;
4304 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4305 &cp
->addr
, sizeof(cp
->addr
));
4307 hci_dev_unlock(hdev
);
4312 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4315 struct mgmt_cp_set_device_id
*cp
= data
;
4316 struct hci_request req
;
4320 BT_DBG("%s", hdev
->name
);
4322 source
= __le16_to_cpu(cp
->source
);
4324 if (source
> 0x0002)
4325 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4326 MGMT_STATUS_INVALID_PARAMS
);
4330 hdev
->devid_source
= source
;
4331 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4332 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4333 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4335 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
4337 hci_req_init(&req
, hdev
);
4339 hci_req_run(&req
, NULL
);
4341 hci_dev_unlock(hdev
);
4346 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
4349 struct cmd_lookup match
= { NULL
, hdev
};
4354 u8 mgmt_err
= mgmt_status(status
);
4356 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4357 cmd_status_rsp
, &mgmt_err
);
4361 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
4362 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4364 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4366 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4369 new_settings(hdev
, match
.sk
);
4375 hci_dev_unlock(hdev
);
4378 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4381 struct mgmt_mode
*cp
= data
;
4382 struct pending_cmd
*cmd
;
4383 struct hci_request req
;
4384 u8 val
, enabled
, status
;
4387 BT_DBG("request for %s", hdev
->name
);
4389 status
= mgmt_le_support(hdev
);
4391 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4394 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4395 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4396 MGMT_STATUS_INVALID_PARAMS
);
4401 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4403 /* The following conditions are ones which mean that we should
4404 * not do any HCI communication but directly send a mgmt
4405 * response to user space (after toggling the flag if
4408 if (!hdev_is_powered(hdev
) || val
== enabled
||
4409 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4410 (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4411 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4412 bool changed
= false;
4414 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
4415 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4419 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4424 err
= new_settings(hdev
, sk
);
4429 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4430 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
4431 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4436 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4442 hci_req_init(&req
, hdev
);
4445 enable_advertising(&req
);
4447 disable_advertising(&req
);
4449 err
= hci_req_run(&req
, set_advertising_complete
);
4451 mgmt_pending_remove(cmd
);
4454 hci_dev_unlock(hdev
);
4458 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4459 void *data
, u16 len
)
4461 struct mgmt_cp_set_static_address
*cp
= data
;
4464 BT_DBG("%s", hdev
->name
);
4466 if (!lmp_le_capable(hdev
))
4467 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4468 MGMT_STATUS_NOT_SUPPORTED
);
4470 if (hdev_is_powered(hdev
))
4471 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4472 MGMT_STATUS_REJECTED
);
4474 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4475 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4476 return cmd_status(sk
, hdev
->id
,
4477 MGMT_OP_SET_STATIC_ADDRESS
,
4478 MGMT_STATUS_INVALID_PARAMS
);
4480 /* Two most significant bits shall be set */
4481 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4482 return cmd_status(sk
, hdev
->id
,
4483 MGMT_OP_SET_STATIC_ADDRESS
,
4484 MGMT_STATUS_INVALID_PARAMS
);
4489 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4491 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
4493 hci_dev_unlock(hdev
);
4498 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4499 void *data
, u16 len
)
4501 struct mgmt_cp_set_scan_params
*cp
= data
;
4502 __u16 interval
, window
;
4505 BT_DBG("%s", hdev
->name
);
4507 if (!lmp_le_capable(hdev
))
4508 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4509 MGMT_STATUS_NOT_SUPPORTED
);
4511 interval
= __le16_to_cpu(cp
->interval
);
4513 if (interval
< 0x0004 || interval
> 0x4000)
4514 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4515 MGMT_STATUS_INVALID_PARAMS
);
4517 window
= __le16_to_cpu(cp
->window
);
4519 if (window
< 0x0004 || window
> 0x4000)
4520 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4521 MGMT_STATUS_INVALID_PARAMS
);
4523 if (window
> interval
)
4524 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4525 MGMT_STATUS_INVALID_PARAMS
);
4529 hdev
->le_scan_interval
= interval
;
4530 hdev
->le_scan_window
= window
;
4532 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4534 /* If background scan is running, restart it so new parameters are
4537 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4538 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4539 struct hci_request req
;
4541 hci_req_init(&req
, hdev
);
4543 hci_req_add_le_scan_disable(&req
);
4544 hci_req_add_le_passive_scan(&req
);
4546 hci_req_run(&req
, NULL
);
4549 hci_dev_unlock(hdev
);
4554 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
4557 struct pending_cmd
*cmd
;
4559 BT_DBG("status 0x%02x", status
);
4563 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4568 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4569 mgmt_status(status
));
4571 struct mgmt_mode
*cp
= cmd
->param
;
4574 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4576 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4578 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4579 new_settings(hdev
, cmd
->sk
);
4582 mgmt_pending_remove(cmd
);
4585 hci_dev_unlock(hdev
);
4588 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4589 void *data
, u16 len
)
4591 struct mgmt_mode
*cp
= data
;
4592 struct pending_cmd
*cmd
;
4593 struct hci_request req
;
4596 BT_DBG("%s", hdev
->name
);
4598 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4599 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4600 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4601 MGMT_STATUS_NOT_SUPPORTED
);
4603 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4604 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4605 MGMT_STATUS_INVALID_PARAMS
);
4607 if (!hdev_is_powered(hdev
))
4608 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4609 MGMT_STATUS_NOT_POWERED
);
4611 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4612 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4613 MGMT_STATUS_REJECTED
);
4617 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4618 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4623 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4624 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4629 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4636 hci_req_init(&req
, hdev
);
4638 write_fast_connectable(&req
, cp
->val
);
4640 err
= hci_req_run(&req
, fast_connectable_complete
);
4642 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4643 MGMT_STATUS_FAILED
);
4644 mgmt_pending_remove(cmd
);
4648 hci_dev_unlock(hdev
);
4653 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4655 struct pending_cmd
*cmd
;
4657 BT_DBG("status 0x%02x", status
);
4661 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4666 u8 mgmt_err
= mgmt_status(status
);
4668 /* We need to restore the flag if related HCI commands
4671 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4673 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4675 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4676 new_settings(hdev
, cmd
->sk
);
4679 mgmt_pending_remove(cmd
);
4682 hci_dev_unlock(hdev
);
4685 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4687 struct mgmt_mode
*cp
= data
;
4688 struct pending_cmd
*cmd
;
4689 struct hci_request req
;
4692 BT_DBG("request for %s", hdev
->name
);
4694 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4695 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4696 MGMT_STATUS_NOT_SUPPORTED
);
4698 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4699 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4700 MGMT_STATUS_REJECTED
);
4702 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4703 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4704 MGMT_STATUS_INVALID_PARAMS
);
4708 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4709 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4713 if (!hdev_is_powered(hdev
)) {
4715 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4716 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4717 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4718 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4719 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4722 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4724 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4728 err
= new_settings(hdev
, sk
);
4732 /* Reject disabling when powered on */
4734 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4735 MGMT_STATUS_REJECTED
);
4738 /* When configuring a dual-mode controller to operate
4739 * with LE only and using a static address, then switching
4740 * BR/EDR back on is not allowed.
4742 * Dual-mode controllers shall operate with the public
4743 * address as its identity address for BR/EDR and LE. So
4744 * reject the attempt to create an invalid configuration.
4746 * The same restrictions applies when secure connections
4747 * has been enabled. For BR/EDR this is a controller feature
4748 * while for LE it is a host stack feature. This means that
4749 * switching BR/EDR back on when secure connections has been
4750 * enabled is not a supported transaction.
4752 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) &&
4753 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
4754 test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))) {
4755 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4756 MGMT_STATUS_REJECTED
);
4761 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4762 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4767 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4773 /* We need to flip the bit already here so that update_adv_data
4774 * generates the correct flags.
4776 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4778 hci_req_init(&req
, hdev
);
4780 write_fast_connectable(&req
, false);
4781 __hci_update_page_scan(&req
);
4783 /* Since only the advertising data flags will change, there
4784 * is no need to update the scan response data.
4786 update_adv_data(&req
);
4788 err
= hci_req_run(&req
, set_bredr_complete
);
4790 mgmt_pending_remove(cmd
);
4793 hci_dev_unlock(hdev
);
4797 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4799 struct pending_cmd
*cmd
;
4800 struct mgmt_mode
*cp
;
4802 BT_DBG("%s status %u", hdev
->name
, status
);
4806 cmd
= mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
4811 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
4812 mgmt_status(status
));
4820 clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
4821 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4824 set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
4825 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4828 set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
4829 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4833 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4834 new_settings(hdev
, cmd
->sk
);
4837 mgmt_pending_remove(cmd
);
4839 hci_dev_unlock(hdev
);
4842 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4843 void *data
, u16 len
)
4845 struct mgmt_mode
*cp
= data
;
4846 struct pending_cmd
*cmd
;
4847 struct hci_request req
;
4851 BT_DBG("request for %s", hdev
->name
);
4853 if (!lmp_sc_capable(hdev
) &&
4854 !test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4855 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4856 MGMT_STATUS_NOT_SUPPORTED
);
4858 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) &&
4859 lmp_sc_capable(hdev
) &&
4860 !test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
4861 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4862 MGMT_STATUS_REJECTED
);
4864 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4865 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4866 MGMT_STATUS_INVALID_PARAMS
);
4870 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
4871 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4875 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4877 if (cp
->val
== 0x02)
4878 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4880 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4882 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4884 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4887 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4892 err
= new_settings(hdev
, sk
);
4897 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4898 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4905 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4906 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4907 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4911 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4917 hci_req_init(&req
, hdev
);
4918 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4919 err
= hci_req_run(&req
, sc_enable_complete
);
4921 mgmt_pending_remove(cmd
);
4926 hci_dev_unlock(hdev
);
4930 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4931 void *data
, u16 len
)
4933 struct mgmt_mode
*cp
= data
;
4934 bool changed
, use_changed
;
4937 BT_DBG("request for %s", hdev
->name
);
4939 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4940 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4941 MGMT_STATUS_INVALID_PARAMS
);
4946 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4949 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4952 if (cp
->val
== 0x02)
4953 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4956 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4959 if (hdev_is_powered(hdev
) && use_changed
&&
4960 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4961 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4962 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4963 sizeof(mode
), &mode
);
4966 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4971 err
= new_settings(hdev
, sk
);
4974 hci_dev_unlock(hdev
);
4978 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4981 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4985 BT_DBG("request for %s", hdev
->name
);
4987 if (!lmp_le_capable(hdev
))
4988 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4989 MGMT_STATUS_NOT_SUPPORTED
);
4991 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4992 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4993 MGMT_STATUS_INVALID_PARAMS
);
4995 if (hdev_is_powered(hdev
))
4996 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4997 MGMT_STATUS_REJECTED
);
5001 /* If user space supports this command it is also expected to
5002 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5004 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
5007 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
5008 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
5009 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
5011 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
5012 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
5013 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
5016 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
5021 err
= new_settings(hdev
, sk
);
5024 hci_dev_unlock(hdev
);
5028 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
5030 switch (irk
->addr
.type
) {
5031 case BDADDR_LE_PUBLIC
:
5034 case BDADDR_LE_RANDOM
:
5035 /* Two most significant bits shall be set */
5036 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5044 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5047 struct mgmt_cp_load_irks
*cp
= cp_data
;
5048 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
5049 sizeof(struct mgmt_irk_info
));
5050 u16 irk_count
, expected_len
;
5053 BT_DBG("request for %s", hdev
->name
);
5055 if (!lmp_le_capable(hdev
))
5056 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5057 MGMT_STATUS_NOT_SUPPORTED
);
5059 irk_count
= __le16_to_cpu(cp
->irk_count
);
5060 if (irk_count
> max_irk_count
) {
5061 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
5062 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5063 MGMT_STATUS_INVALID_PARAMS
);
5066 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
5067 if (expected_len
!= len
) {
5068 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5070 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5071 MGMT_STATUS_INVALID_PARAMS
);
5074 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
5076 for (i
= 0; i
< irk_count
; i
++) {
5077 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
5079 if (!irk_is_valid(key
))
5080 return cmd_status(sk
, hdev
->id
,
5082 MGMT_STATUS_INVALID_PARAMS
);
5087 hci_smp_irks_clear(hdev
);
5089 for (i
= 0; i
< irk_count
; i
++) {
5090 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
5093 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
5094 addr_type
= ADDR_LE_DEV_PUBLIC
;
5096 addr_type
= ADDR_LE_DEV_RANDOM
;
5098 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
5102 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
5104 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
5106 hci_dev_unlock(hdev
);
5111 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
5113 if (key
->master
!= 0x00 && key
->master
!= 0x01)
5116 switch (key
->addr
.type
) {
5117 case BDADDR_LE_PUBLIC
:
5120 case BDADDR_LE_RANDOM
:
5121 /* Two most significant bits shall be set */
5122 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5130 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5131 void *cp_data
, u16 len
)
5133 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5134 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5135 sizeof(struct mgmt_ltk_info
));
5136 u16 key_count
, expected_len
;
5139 BT_DBG("request for %s", hdev
->name
);
5141 if (!lmp_le_capable(hdev
))
5142 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5143 MGMT_STATUS_NOT_SUPPORTED
);
5145 key_count
= __le16_to_cpu(cp
->key_count
);
5146 if (key_count
> max_key_count
) {
5147 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5148 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5149 MGMT_STATUS_INVALID_PARAMS
);
5152 expected_len
= sizeof(*cp
) + key_count
*
5153 sizeof(struct mgmt_ltk_info
);
5154 if (expected_len
!= len
) {
5155 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5157 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5158 MGMT_STATUS_INVALID_PARAMS
);
5161 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5163 for (i
= 0; i
< key_count
; i
++) {
5164 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5166 if (!ltk_is_valid(key
))
5167 return cmd_status(sk
, hdev
->id
,
5168 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5169 MGMT_STATUS_INVALID_PARAMS
);
5174 hci_smp_ltks_clear(hdev
);
5176 for (i
= 0; i
< key_count
; i
++) {
5177 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5178 u8 type
, addr_type
, authenticated
;
5180 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
5181 addr_type
= ADDR_LE_DEV_PUBLIC
;
5183 addr_type
= ADDR_LE_DEV_RANDOM
;
5185 switch (key
->type
) {
5186 case MGMT_LTK_UNAUTHENTICATED
:
5187 authenticated
= 0x00;
5188 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5190 case MGMT_LTK_AUTHENTICATED
:
5191 authenticated
= 0x01;
5192 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5194 case MGMT_LTK_P256_UNAUTH
:
5195 authenticated
= 0x00;
5196 type
= SMP_LTK_P256
;
5198 case MGMT_LTK_P256_AUTH
:
5199 authenticated
= 0x01;
5200 type
= SMP_LTK_P256
;
5202 case MGMT_LTK_P256_DEBUG
:
5203 authenticated
= 0x00;
5204 type
= SMP_LTK_P256_DEBUG
;
5209 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
5210 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
5214 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5217 hci_dev_unlock(hdev
);
5222 static int conn_info_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
5224 struct hci_conn
*conn
= cmd
->user_data
;
5225 struct mgmt_rp_get_conn_info rp
;
5228 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5230 if (status
== MGMT_STATUS_SUCCESS
) {
5231 rp
.rssi
= conn
->rssi
;
5232 rp
.tx_power
= conn
->tx_power
;
5233 rp
.max_tx_power
= conn
->max_tx_power
;
5235 rp
.rssi
= HCI_RSSI_INVALID
;
5236 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5237 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5240 err
= cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
, status
,
5243 hci_conn_drop(conn
);
5249 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
5252 struct hci_cp_read_rssi
*cp
;
5253 struct pending_cmd
*cmd
;
5254 struct hci_conn
*conn
;
5258 BT_DBG("status 0x%02x", hci_status
);
5262 /* Commands sent in request are either Read RSSI or Read Transmit Power
5263 * Level so we check which one was last sent to retrieve connection
5264 * handle. Both commands have handle as first parameter so it's safe to
5265 * cast data on the same command struct.
5267 * First command sent is always Read RSSI and we fail only if it fails.
5268 * In other case we simply override error to indicate success as we
5269 * already remembered if TX power value is actually valid.
5271 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5273 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5274 status
= MGMT_STATUS_SUCCESS
;
5276 status
= mgmt_status(hci_status
);
5280 BT_ERR("invalid sent_cmd in conn_info response");
5284 handle
= __le16_to_cpu(cp
->handle
);
5285 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5287 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5291 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5295 cmd
->cmd_complete(cmd
, status
);
5296 mgmt_pending_remove(cmd
);
5299 hci_dev_unlock(hdev
);
5302 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5305 struct mgmt_cp_get_conn_info
*cp
= data
;
5306 struct mgmt_rp_get_conn_info rp
;
5307 struct hci_conn
*conn
;
5308 unsigned long conn_info_age
;
5311 BT_DBG("%s", hdev
->name
);
5313 memset(&rp
, 0, sizeof(rp
));
5314 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5315 rp
.addr
.type
= cp
->addr
.type
;
5317 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5318 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5319 MGMT_STATUS_INVALID_PARAMS
,
5324 if (!hdev_is_powered(hdev
)) {
5325 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5326 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5330 if (cp
->addr
.type
== BDADDR_BREDR
)
5331 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5334 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5336 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5337 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5338 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
5342 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5343 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5344 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5348 /* To avoid client trying to guess when to poll again for information we
5349 * calculate conn info age as random value between min/max set in hdev.
5351 conn_info_age
= hdev
->conn_info_min_age
+
5352 prandom_u32_max(hdev
->conn_info_max_age
-
5353 hdev
->conn_info_min_age
);
5355 /* Query controller to refresh cached values if they are too old or were
5358 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5359 msecs_to_jiffies(conn_info_age
)) ||
5360 !conn
->conn_info_timestamp
) {
5361 struct hci_request req
;
5362 struct hci_cp_read_tx_power req_txp_cp
;
5363 struct hci_cp_read_rssi req_rssi_cp
;
5364 struct pending_cmd
*cmd
;
5366 hci_req_init(&req
, hdev
);
5367 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5368 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5371 /* For LE links TX power does not change thus we don't need to
5372 * query for it once value is known.
5374 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5375 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5376 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5377 req_txp_cp
.type
= 0x00;
5378 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5379 sizeof(req_txp_cp
), &req_txp_cp
);
5382 /* Max TX power needs to be read only once per connection */
5383 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5384 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5385 req_txp_cp
.type
= 0x01;
5386 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5387 sizeof(req_txp_cp
), &req_txp_cp
);
5390 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5394 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5401 hci_conn_hold(conn
);
5402 cmd
->user_data
= hci_conn_get(conn
);
5403 cmd
->cmd_complete
= conn_info_cmd_complete
;
5405 conn
->conn_info_timestamp
= jiffies
;
5407 /* Cache is valid, just reply with values cached in hci_conn */
5408 rp
.rssi
= conn
->rssi
;
5409 rp
.tx_power
= conn
->tx_power
;
5410 rp
.max_tx_power
= conn
->max_tx_power
;
5412 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5413 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5417 hci_dev_unlock(hdev
);
5421 static int clock_info_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
5423 struct hci_conn
*conn
= cmd
->user_data
;
5424 struct mgmt_rp_get_clock_info rp
;
5425 struct hci_dev
*hdev
;
5428 memset(&rp
, 0, sizeof(rp
));
5429 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5434 hdev
= hci_dev_get(cmd
->index
);
5436 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5441 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5442 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5446 err
= cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
5450 hci_conn_drop(conn
);
5457 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5459 struct hci_cp_read_clock
*hci_cp
;
5460 struct pending_cmd
*cmd
;
5461 struct hci_conn
*conn
;
5463 BT_DBG("%s status %u", hdev
->name
, status
);
5467 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5471 if (hci_cp
->which
) {
5472 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5473 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5478 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5482 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5483 mgmt_pending_remove(cmd
);
5486 hci_dev_unlock(hdev
);
5489 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5492 struct mgmt_cp_get_clock_info
*cp
= data
;
5493 struct mgmt_rp_get_clock_info rp
;
5494 struct hci_cp_read_clock hci_cp
;
5495 struct pending_cmd
*cmd
;
5496 struct hci_request req
;
5497 struct hci_conn
*conn
;
5500 BT_DBG("%s", hdev
->name
);
5502 memset(&rp
, 0, sizeof(rp
));
5503 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5504 rp
.addr
.type
= cp
->addr
.type
;
5506 if (cp
->addr
.type
!= BDADDR_BREDR
)
5507 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5508 MGMT_STATUS_INVALID_PARAMS
,
5513 if (!hdev_is_powered(hdev
)) {
5514 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5515 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5519 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5520 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5522 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5523 err
= cmd_complete(sk
, hdev
->id
,
5524 MGMT_OP_GET_CLOCK_INFO
,
5525 MGMT_STATUS_NOT_CONNECTED
,
5533 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5539 cmd
->cmd_complete
= clock_info_cmd_complete
;
5541 hci_req_init(&req
, hdev
);
5543 memset(&hci_cp
, 0, sizeof(hci_cp
));
5544 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5547 hci_conn_hold(conn
);
5548 cmd
->user_data
= hci_conn_get(conn
);
5550 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5551 hci_cp
.which
= 0x01; /* Piconet clock */
5552 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5555 err
= hci_req_run(&req
, get_clock_info_complete
);
5557 mgmt_pending_remove(cmd
);
5560 hci_dev_unlock(hdev
);
5564 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
5566 struct hci_conn
*conn
;
5568 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
5572 if (conn
->dst_type
!= type
)
5575 if (conn
->state
!= BT_CONNECTED
)
5581 /* This function requires the caller holds hdev->lock */
5582 static int hci_conn_params_set(struct hci_request
*req
, bdaddr_t
*addr
,
5583 u8 addr_type
, u8 auto_connect
)
5585 struct hci_dev
*hdev
= req
->hdev
;
5586 struct hci_conn_params
*params
;
5588 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
5592 if (params
->auto_connect
== auto_connect
)
5595 list_del_init(¶ms
->action
);
5597 switch (auto_connect
) {
5598 case HCI_AUTO_CONN_DISABLED
:
5599 case HCI_AUTO_CONN_LINK_LOSS
:
5600 __hci_update_background_scan(req
);
5602 case HCI_AUTO_CONN_REPORT
:
5603 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
5604 __hci_update_background_scan(req
);
5606 case HCI_AUTO_CONN_DIRECT
:
5607 case HCI_AUTO_CONN_ALWAYS
:
5608 if (!is_connected(hdev
, addr
, addr_type
)) {
5609 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5610 __hci_update_background_scan(req
);
5615 params
->auto_connect
= auto_connect
;
5617 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
5623 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5624 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5626 struct mgmt_ev_device_added ev
;
5628 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5629 ev
.addr
.type
= type
;
5632 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5635 static void add_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5637 struct pending_cmd
*cmd
;
5639 BT_DBG("status 0x%02x", status
);
5643 cmd
= mgmt_pending_find(MGMT_OP_ADD_DEVICE
, hdev
);
5647 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5648 mgmt_pending_remove(cmd
);
5651 hci_dev_unlock(hdev
);
5654 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5655 void *data
, u16 len
)
5657 struct mgmt_cp_add_device
*cp
= data
;
5658 struct pending_cmd
*cmd
;
5659 struct hci_request req
;
5660 u8 auto_conn
, addr_type
;
5663 BT_DBG("%s", hdev
->name
);
5665 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5666 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5667 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5668 MGMT_STATUS_INVALID_PARAMS
,
5669 &cp
->addr
, sizeof(cp
->addr
));
5671 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5672 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5673 MGMT_STATUS_INVALID_PARAMS
,
5674 &cp
->addr
, sizeof(cp
->addr
));
5676 hci_req_init(&req
, hdev
);
5680 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_DEVICE
, hdev
, data
, len
);
5686 cmd
->cmd_complete
= addr_cmd_complete
;
5688 if (cp
->addr
.type
== BDADDR_BREDR
) {
5689 /* Only incoming connections action is supported for now */
5690 if (cp
->action
!= 0x01) {
5691 err
= cmd
->cmd_complete(cmd
,
5692 MGMT_STATUS_INVALID_PARAMS
);
5693 mgmt_pending_remove(cmd
);
5697 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5702 __hci_update_page_scan(&req
);
5707 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5708 addr_type
= ADDR_LE_DEV_PUBLIC
;
5710 addr_type
= ADDR_LE_DEV_RANDOM
;
5712 if (cp
->action
== 0x02)
5713 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5714 else if (cp
->action
== 0x01)
5715 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5717 auto_conn
= HCI_AUTO_CONN_REPORT
;
5719 /* If the connection parameters don't exist for this device,
5720 * they will be created and configured with defaults.
5722 if (hci_conn_params_set(&req
, &cp
->addr
.bdaddr
, addr_type
,
5724 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_FAILED
);
5725 mgmt_pending_remove(cmd
);
5730 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5732 err
= hci_req_run(&req
, add_device_complete
);
5734 /* ENODATA means no HCI commands were needed (e.g. if
5735 * the adapter is powered off).
5737 if (err
== -ENODATA
)
5738 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
5739 mgmt_pending_remove(cmd
);
5743 hci_dev_unlock(hdev
);
5747 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5748 bdaddr_t
*bdaddr
, u8 type
)
5750 struct mgmt_ev_device_removed ev
;
5752 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5753 ev
.addr
.type
= type
;
5755 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5758 static void remove_device_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5760 struct pending_cmd
*cmd
;
5762 BT_DBG("status 0x%02x", status
);
5766 cmd
= mgmt_pending_find(MGMT_OP_REMOVE_DEVICE
, hdev
);
5770 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5771 mgmt_pending_remove(cmd
);
5774 hci_dev_unlock(hdev
);
5777 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5778 void *data
, u16 len
)
5780 struct mgmt_cp_remove_device
*cp
= data
;
5781 struct pending_cmd
*cmd
;
5782 struct hci_request req
;
5785 BT_DBG("%s", hdev
->name
);
5787 hci_req_init(&req
, hdev
);
5791 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_DEVICE
, hdev
, data
, len
);
5797 cmd
->cmd_complete
= addr_cmd_complete
;
5799 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5800 struct hci_conn_params
*params
;
5803 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
5804 err
= cmd
->cmd_complete(cmd
,
5805 MGMT_STATUS_INVALID_PARAMS
);
5806 mgmt_pending_remove(cmd
);
5810 if (cp
->addr
.type
== BDADDR_BREDR
) {
5811 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
5815 err
= cmd
->cmd_complete(cmd
,
5816 MGMT_STATUS_INVALID_PARAMS
);
5817 mgmt_pending_remove(cmd
);
5821 __hci_update_page_scan(&req
);
5823 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
5828 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5829 addr_type
= ADDR_LE_DEV_PUBLIC
;
5831 addr_type
= ADDR_LE_DEV_RANDOM
;
5833 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5836 err
= cmd
->cmd_complete(cmd
,
5837 MGMT_STATUS_INVALID_PARAMS
);
5838 mgmt_pending_remove(cmd
);
5842 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
5843 err
= cmd
->cmd_complete(cmd
,
5844 MGMT_STATUS_INVALID_PARAMS
);
5845 mgmt_pending_remove(cmd
);
5849 list_del(¶ms
->action
);
5850 list_del(¶ms
->list
);
5852 __hci_update_background_scan(&req
);
5854 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5856 struct hci_conn_params
*p
, *tmp
;
5857 struct bdaddr_list
*b
, *btmp
;
5859 if (cp
->addr
.type
) {
5860 err
= cmd
->cmd_complete(cmd
,
5861 MGMT_STATUS_INVALID_PARAMS
);
5862 mgmt_pending_remove(cmd
);
5866 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
5867 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
5872 __hci_update_page_scan(&req
);
5874 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
5875 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
5877 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
5878 list_del(&p
->action
);
5883 BT_DBG("All LE connection parameters were removed");
5885 __hci_update_background_scan(&req
);
5889 err
= hci_req_run(&req
, remove_device_complete
);
5891 /* ENODATA means no HCI commands were needed (e.g. if
5892 * the adapter is powered off).
5894 if (err
== -ENODATA
)
5895 err
= cmd
->cmd_complete(cmd
, MGMT_STATUS_SUCCESS
);
5896 mgmt_pending_remove(cmd
);
5900 hci_dev_unlock(hdev
);
5904 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5907 struct mgmt_cp_load_conn_param
*cp
= data
;
5908 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5909 sizeof(struct mgmt_conn_param
));
5910 u16 param_count
, expected_len
;
5913 if (!lmp_le_capable(hdev
))
5914 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5915 MGMT_STATUS_NOT_SUPPORTED
);
5917 param_count
= __le16_to_cpu(cp
->param_count
);
5918 if (param_count
> max_param_count
) {
5919 BT_ERR("load_conn_param: too big param_count value %u",
5921 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5922 MGMT_STATUS_INVALID_PARAMS
);
5925 expected_len
= sizeof(*cp
) + param_count
*
5926 sizeof(struct mgmt_conn_param
);
5927 if (expected_len
!= len
) {
5928 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5930 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5931 MGMT_STATUS_INVALID_PARAMS
);
5934 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5938 hci_conn_params_clear_disabled(hdev
);
5940 for (i
= 0; i
< param_count
; i
++) {
5941 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5942 struct hci_conn_params
*hci_param
;
5943 u16 min
, max
, latency
, timeout
;
5946 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5949 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5950 addr_type
= ADDR_LE_DEV_PUBLIC
;
5951 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5952 addr_type
= ADDR_LE_DEV_RANDOM
;
5954 BT_ERR("Ignoring invalid connection parameters");
5958 min
= le16_to_cpu(param
->min_interval
);
5959 max
= le16_to_cpu(param
->max_interval
);
5960 latency
= le16_to_cpu(param
->latency
);
5961 timeout
= le16_to_cpu(param
->timeout
);
5963 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5964 min
, max
, latency
, timeout
);
5966 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5967 BT_ERR("Ignoring invalid connection parameters");
5971 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5974 BT_ERR("Failed to add connection parameters");
5978 hci_param
->conn_min_interval
= min
;
5979 hci_param
->conn_max_interval
= max
;
5980 hci_param
->conn_latency
= latency
;
5981 hci_param
->supervision_timeout
= timeout
;
5984 hci_dev_unlock(hdev
);
5986 return cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0, NULL
, 0);
5989 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5990 void *data
, u16 len
)
5992 struct mgmt_cp_set_external_config
*cp
= data
;
5996 BT_DBG("%s", hdev
->name
);
5998 if (hdev_is_powered(hdev
))
5999 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6000 MGMT_STATUS_REJECTED
);
6002 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
6003 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6004 MGMT_STATUS_INVALID_PARAMS
);
6006 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
6007 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6008 MGMT_STATUS_NOT_SUPPORTED
);
6013 changed
= !test_and_set_bit(HCI_EXT_CONFIGURED
,
6016 changed
= test_and_clear_bit(HCI_EXT_CONFIGURED
,
6019 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
6026 err
= new_options(hdev
, sk
);
6028 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) == is_configured(hdev
)) {
6029 mgmt_index_removed(hdev
);
6031 if (test_and_change_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
6032 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
6033 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
6035 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6037 set_bit(HCI_RAW
, &hdev
->flags
);
6038 mgmt_index_added(hdev
);
6043 hci_dev_unlock(hdev
);
6047 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
6048 void *data
, u16 len
)
6050 struct mgmt_cp_set_public_address
*cp
= data
;
6054 BT_DBG("%s", hdev
->name
);
6056 if (hdev_is_powered(hdev
))
6057 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6058 MGMT_STATUS_REJECTED
);
6060 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
6061 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6062 MGMT_STATUS_INVALID_PARAMS
);
6064 if (!hdev
->set_bdaddr
)
6065 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6066 MGMT_STATUS_NOT_SUPPORTED
);
6070 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
6071 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
6073 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
6080 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6081 err
= new_options(hdev
, sk
);
6083 if (is_configured(hdev
)) {
6084 mgmt_index_removed(hdev
);
6086 clear_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
6088 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
6089 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
6091 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6095 hci_dev_unlock(hdev
);
6099 static const struct mgmt_handler
{
6100 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6104 } mgmt_handlers
[] = {
6105 { NULL
}, /* 0x0000 (no command) */
6106 { read_version
, false, MGMT_READ_VERSION_SIZE
},
6107 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
6108 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
6109 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
6110 { set_powered
, false, MGMT_SETTING_SIZE
},
6111 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
6112 { set_connectable
, false, MGMT_SETTING_SIZE
},
6113 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
6114 { set_bondable
, false, MGMT_SETTING_SIZE
},
6115 { set_link_security
, false, MGMT_SETTING_SIZE
},
6116 { set_ssp
, false, MGMT_SETTING_SIZE
},
6117 { set_hs
, false, MGMT_SETTING_SIZE
},
6118 { set_le
, false, MGMT_SETTING_SIZE
},
6119 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
6120 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
6121 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
6122 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
6123 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
6124 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
6125 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
6126 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
6127 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
6128 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
6129 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
6130 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
6131 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
6132 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
6133 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
6134 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
6135 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
6136 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
6137 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
6138 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
6139 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
6140 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
6141 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
6142 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
6143 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
6144 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
6145 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
6146 { set_advertising
, false, MGMT_SETTING_SIZE
},
6147 { set_bredr
, false, MGMT_SETTING_SIZE
},
6148 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
6149 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
6150 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
6151 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
6152 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
6153 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
6154 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
6155 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
6156 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
6157 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
6158 { load_conn_param
, true, MGMT_LOAD_CONN_PARAM_SIZE
},
6159 { read_unconf_index_list
, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE
},
6160 { read_config_info
, false, MGMT_READ_CONFIG_INFO_SIZE
},
6161 { set_external_config
, false, MGMT_SET_EXTERNAL_CONFIG_SIZE
},
6162 { set_public_address
, false, MGMT_SET_PUBLIC_ADDRESS_SIZE
},
6163 { start_service_discovery
,true, MGMT_START_SERVICE_DISCOVERY_SIZE
},
6166 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
6170 struct mgmt_hdr
*hdr
;
6171 u16 opcode
, index
, len
;
6172 struct hci_dev
*hdev
= NULL
;
6173 const struct mgmt_handler
*handler
;
6176 BT_DBG("got %zu bytes", msglen
);
6178 if (msglen
< sizeof(*hdr
))
6181 buf
= kmalloc(msglen
, GFP_KERNEL
);
6185 if (memcpy_from_msg(buf
, msg
, msglen
)) {
6191 opcode
= __le16_to_cpu(hdr
->opcode
);
6192 index
= __le16_to_cpu(hdr
->index
);
6193 len
= __le16_to_cpu(hdr
->len
);
6195 if (len
!= msglen
- sizeof(*hdr
)) {
6200 if (index
!= MGMT_INDEX_NONE
) {
6201 hdev
= hci_dev_get(index
);
6203 err
= cmd_status(sk
, index
, opcode
,
6204 MGMT_STATUS_INVALID_INDEX
);
6208 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
6209 test_bit(HCI_CONFIG
, &hdev
->dev_flags
) ||
6210 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
6211 err
= cmd_status(sk
, index
, opcode
,
6212 MGMT_STATUS_INVALID_INDEX
);
6216 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
6217 opcode
!= MGMT_OP_READ_CONFIG_INFO
&&
6218 opcode
!= MGMT_OP_SET_EXTERNAL_CONFIG
&&
6219 opcode
!= MGMT_OP_SET_PUBLIC_ADDRESS
) {
6220 err
= cmd_status(sk
, index
, opcode
,
6221 MGMT_STATUS_INVALID_INDEX
);
6226 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
6227 mgmt_handlers
[opcode
].func
== NULL
) {
6228 BT_DBG("Unknown op %u", opcode
);
6229 err
= cmd_status(sk
, index
, opcode
,
6230 MGMT_STATUS_UNKNOWN_COMMAND
);
6234 if (hdev
&& (opcode
<= MGMT_OP_READ_INDEX_LIST
||
6235 opcode
== MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
6236 err
= cmd_status(sk
, index
, opcode
,
6237 MGMT_STATUS_INVALID_INDEX
);
6241 if (!hdev
&& (opcode
> MGMT_OP_READ_INDEX_LIST
&&
6242 opcode
!= MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
6243 err
= cmd_status(sk
, index
, opcode
,
6244 MGMT_STATUS_INVALID_INDEX
);
6248 handler
= &mgmt_handlers
[opcode
];
6250 if ((handler
->var_len
&& len
< handler
->data_len
) ||
6251 (!handler
->var_len
&& len
!= handler
->data_len
)) {
6252 err
= cmd_status(sk
, index
, opcode
,
6253 MGMT_STATUS_INVALID_PARAMS
);
6258 mgmt_init_hdev(sk
, hdev
);
6260 cp
= buf
+ sizeof(*hdr
);
6262 err
= handler
->func(sk
, hdev
, cp
, len
);
6276 void mgmt_index_added(struct hci_dev
*hdev
)
6278 if (hdev
->dev_type
!= HCI_BREDR
)
6281 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6284 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6285 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
6287 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
6290 void mgmt_index_removed(struct hci_dev
*hdev
)
6292 u8 status
= MGMT_STATUS_INVALID_INDEX
;
6294 if (hdev
->dev_type
!= HCI_BREDR
)
6297 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6300 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6302 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6303 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
6305 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
6308 /* This function requires the caller holds hdev->lock */
6309 static void restart_le_actions(struct hci_request
*req
)
6311 struct hci_dev
*hdev
= req
->hdev
;
6312 struct hci_conn_params
*p
;
6314 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
6315 /* Needed for AUTO_OFF case where might not "really"
6316 * have been powered off.
6318 list_del_init(&p
->action
);
6320 switch (p
->auto_connect
) {
6321 case HCI_AUTO_CONN_DIRECT
:
6322 case HCI_AUTO_CONN_ALWAYS
:
6323 list_add(&p
->action
, &hdev
->pend_le_conns
);
6325 case HCI_AUTO_CONN_REPORT
:
6326 list_add(&p
->action
, &hdev
->pend_le_reports
);
6333 __hci_update_background_scan(req
);
6336 static void powered_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6338 struct cmd_lookup match
= { NULL
, hdev
};
6340 BT_DBG("status 0x%02x", status
);
6343 /* Register the available SMP channels (BR/EDR and LE) only
6344 * when successfully powering on the controller. This late
6345 * registration is required so that LE SMP can clearly
6346 * decide if the public address or static address is used.
6353 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6355 new_settings(hdev
, match
.sk
);
6357 hci_dev_unlock(hdev
);
6363 static int powered_update_hci(struct hci_dev
*hdev
)
6365 struct hci_request req
;
6368 hci_req_init(&req
, hdev
);
6370 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
6371 !lmp_host_ssp_capable(hdev
)) {
6374 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
6376 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
6379 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
,
6380 sizeof(support
), &support
);
6384 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
6385 lmp_bredr_capable(hdev
)) {
6386 struct hci_cp_write_le_host_supported cp
;
6391 /* Check first if we already have the right
6392 * host state (host features set)
6394 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
6395 cp
.simul
!= lmp_host_le_br_capable(hdev
))
6396 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
6400 if (lmp_le_capable(hdev
)) {
6401 /* Make sure the controller has a good default for
6402 * advertising data. This also applies to the case
6403 * where BR/EDR was toggled during the AUTO_OFF phase.
6405 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
6406 update_adv_data(&req
);
6407 update_scan_rsp_data(&req
);
6410 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6411 enable_advertising(&req
);
6413 restart_le_actions(&req
);
6416 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
6417 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
6418 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
6419 sizeof(link_sec
), &link_sec
);
6421 if (lmp_bredr_capable(hdev
)) {
6422 write_fast_connectable(&req
, false);
6423 __hci_update_page_scan(&req
);
6429 return hci_req_run(&req
, powered_complete
);
6432 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
6434 struct cmd_lookup match
= { NULL
, hdev
};
6435 u8 status
, zero_cod
[] = { 0, 0, 0 };
6438 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
6442 if (powered_update_hci(hdev
) == 0)
6445 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
6450 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6452 /* If the power off is because of hdev unregistration let
6453 * use the appropriate INVALID_INDEX status. Otherwise use
6454 * NOT_POWERED. We cover both scenarios here since later in
6455 * mgmt_index_removed() any hci_conn callbacks will have already
6456 * been triggered, potentially causing misleading DISCONNECTED
6459 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
))
6460 status
= MGMT_STATUS_INVALID_INDEX
;
6462 status
= MGMT_STATUS_NOT_POWERED
;
6464 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6466 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
6467 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
6468 zero_cod
, sizeof(zero_cod
), NULL
);
6471 err
= new_settings(hdev
, match
.sk
);
6479 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
6481 struct pending_cmd
*cmd
;
6484 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6488 if (err
== -ERFKILL
)
6489 status
= MGMT_STATUS_RFKILLED
;
6491 status
= MGMT_STATUS_FAILED
;
6493 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
6495 mgmt_pending_remove(cmd
);
6498 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
6500 struct hci_request req
;
6504 /* When discoverable timeout triggers, then just make sure
6505 * the limited discoverable flag is cleared. Even in the case
6506 * of a timeout triggered from general discoverable, it is
6507 * safe to unconditionally clear the flag.
6509 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
6510 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
6512 hci_req_init(&req
, hdev
);
6513 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
6514 u8 scan
= SCAN_PAGE
;
6515 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
6516 sizeof(scan
), &scan
);
6519 update_adv_data(&req
);
6520 hci_req_run(&req
, NULL
);
6522 hdev
->discov_timeout
= 0;
6524 new_settings(hdev
, NULL
);
6526 hci_dev_unlock(hdev
);
6529 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
6532 struct mgmt_ev_new_link_key ev
;
6534 memset(&ev
, 0, sizeof(ev
));
6536 ev
.store_hint
= persistent
;
6537 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6538 ev
.key
.addr
.type
= BDADDR_BREDR
;
6539 ev
.key
.type
= key
->type
;
6540 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
6541 ev
.key
.pin_len
= key
->pin_len
;
6543 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6546 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
6548 switch (ltk
->type
) {
6551 if (ltk
->authenticated
)
6552 return MGMT_LTK_AUTHENTICATED
;
6553 return MGMT_LTK_UNAUTHENTICATED
;
6555 if (ltk
->authenticated
)
6556 return MGMT_LTK_P256_AUTH
;
6557 return MGMT_LTK_P256_UNAUTH
;
6558 case SMP_LTK_P256_DEBUG
:
6559 return MGMT_LTK_P256_DEBUG
;
6562 return MGMT_LTK_UNAUTHENTICATED
;
6565 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
6567 struct mgmt_ev_new_long_term_key ev
;
6569 memset(&ev
, 0, sizeof(ev
));
6571 /* Devices using resolvable or non-resolvable random addresses
6572 * without providing an indentity resolving key don't require
6573 * to store long term keys. Their addresses will change the
6576 * Only when a remote device provides an identity address
6577 * make sure the long term key is stored. If the remote
6578 * identity is known, the long term keys are internally
6579 * mapped to the identity address. So allow static random
6580 * and public addresses here.
6582 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6583 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6584 ev
.store_hint
= 0x00;
6586 ev
.store_hint
= persistent
;
6588 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6589 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
6590 ev
.key
.type
= mgmt_ltk_type(key
);
6591 ev
.key
.enc_size
= key
->enc_size
;
6592 ev
.key
.ediv
= key
->ediv
;
6593 ev
.key
.rand
= key
->rand
;
6595 if (key
->type
== SMP_LTK
)
6598 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
6600 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6603 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
6605 struct mgmt_ev_new_irk ev
;
6607 memset(&ev
, 0, sizeof(ev
));
6609 /* For identity resolving keys from devices that are already
6610 * using a public address or static random address, do not
6611 * ask for storing this key. The identity resolving key really
6612 * is only mandatory for devices using resovlable random
6615 * Storing all identity resolving keys has the downside that
6616 * they will be also loaded on next boot of they system. More
6617 * identity resolving keys, means more time during scanning is
6618 * needed to actually resolve these addresses.
6620 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
6621 ev
.store_hint
= 0x01;
6623 ev
.store_hint
= 0x00;
6625 bacpy(&ev
.rpa
, &irk
->rpa
);
6626 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
6627 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
6628 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6630 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6633 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6636 struct mgmt_ev_new_csrk ev
;
6638 memset(&ev
, 0, sizeof(ev
));
6640 /* Devices using resolvable or non-resolvable random addresses
6641 * without providing an indentity resolving key don't require
6642 * to store signature resolving keys. Their addresses will change
6643 * the next time around.
6645 * Only when a remote device provides an identity address
6646 * make sure the signature resolving key is stored. So allow
6647 * static random and public addresses here.
6649 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6650 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6651 ev
.store_hint
= 0x00;
6653 ev
.store_hint
= persistent
;
6655 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6656 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6657 ev
.key
.master
= csrk
->master
;
6658 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6660 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6663 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6664 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6665 u16 max_interval
, u16 latency
, u16 timeout
)
6667 struct mgmt_ev_new_conn_param ev
;
6669 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6672 memset(&ev
, 0, sizeof(ev
));
6673 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6674 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6675 ev
.store_hint
= store_hint
;
6676 ev
.min_interval
= cpu_to_le16(min_interval
);
6677 ev
.max_interval
= cpu_to_le16(max_interval
);
6678 ev
.latency
= cpu_to_le16(latency
);
6679 ev
.timeout
= cpu_to_le16(timeout
);
6681 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6684 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6687 eir
[eir_len
++] = sizeof(type
) + data_len
;
6688 eir
[eir_len
++] = type
;
6689 memcpy(&eir
[eir_len
], data
, data_len
);
6690 eir_len
+= data_len
;
6695 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
6696 u32 flags
, u8
*name
, u8 name_len
)
6699 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6702 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
6703 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6705 ev
->flags
= __cpu_to_le32(flags
);
6707 /* We must ensure that the EIR Data fields are ordered and
6708 * unique. Keep it simple for now and avoid the problem by not
6709 * adding any BR/EDR data to the LE adv.
6711 if (conn
->le_adv_data_len
> 0) {
6712 memcpy(&ev
->eir
[eir_len
],
6713 conn
->le_adv_data
, conn
->le_adv_data_len
);
6714 eir_len
= conn
->le_adv_data_len
;
6717 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6720 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
6721 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6723 conn
->dev_class
, 3);
6726 ev
->eir_len
= cpu_to_le16(eir_len
);
6728 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6729 sizeof(*ev
) + eir_len
, NULL
);
6732 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
6734 struct sock
**sk
= data
;
6736 cmd
->cmd_complete(cmd
, 0);
6741 mgmt_pending_remove(cmd
);
6744 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
6746 struct hci_dev
*hdev
= data
;
6747 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6749 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6751 cmd
->cmd_complete(cmd
, 0);
6752 mgmt_pending_remove(cmd
);
6755 bool mgmt_powering_down(struct hci_dev
*hdev
)
6757 struct pending_cmd
*cmd
;
6758 struct mgmt_mode
*cp
;
6760 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6771 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6772 u8 link_type
, u8 addr_type
, u8 reason
,
6773 bool mgmt_connected
)
6775 struct mgmt_ev_device_disconnected ev
;
6776 struct sock
*sk
= NULL
;
6778 /* The connection is still in hci_conn_hash so test for 1
6779 * instead of 0 to know if this is the last one.
6781 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6782 cancel_delayed_work(&hdev
->power_off
);
6783 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6786 if (!mgmt_connected
)
6789 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6792 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6794 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6795 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6798 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6803 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6807 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6808 u8 link_type
, u8 addr_type
, u8 status
)
6810 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6811 struct mgmt_cp_disconnect
*cp
;
6812 struct pending_cmd
*cmd
;
6814 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6817 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
6823 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
6826 if (cp
->addr
.type
!= bdaddr_type
)
6829 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6830 mgmt_pending_remove(cmd
);
6833 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6834 u8 addr_type
, u8 status
)
6836 struct mgmt_ev_connect_failed ev
;
6838 /* The connection is still in hci_conn_hash so test for 1
6839 * instead of 0 to know if this is the last one.
6841 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6842 cancel_delayed_work(&hdev
->power_off
);
6843 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6846 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6847 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6848 ev
.status
= mgmt_status(status
);
6850 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6853 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
6855 struct mgmt_ev_pin_code_request ev
;
6857 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6858 ev
.addr
.type
= BDADDR_BREDR
;
6861 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
6864 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6867 struct pending_cmd
*cmd
;
6869 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6873 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6874 mgmt_pending_remove(cmd
);
6877 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6880 struct pending_cmd
*cmd
;
6882 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6886 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6887 mgmt_pending_remove(cmd
);
6890 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6891 u8 link_type
, u8 addr_type
, u32 value
,
6894 struct mgmt_ev_user_confirm_request ev
;
6896 BT_DBG("%s", hdev
->name
);
6898 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6899 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6900 ev
.confirm_hint
= confirm_hint
;
6901 ev
.value
= cpu_to_le32(value
);
6903 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6907 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6908 u8 link_type
, u8 addr_type
)
6910 struct mgmt_ev_user_passkey_request ev
;
6912 BT_DBG("%s", hdev
->name
);
6914 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6915 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6917 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6921 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6922 u8 link_type
, u8 addr_type
, u8 status
,
6925 struct pending_cmd
*cmd
;
6927 cmd
= mgmt_pending_find(opcode
, hdev
);
6931 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6932 mgmt_pending_remove(cmd
);
6937 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6938 u8 link_type
, u8 addr_type
, u8 status
)
6940 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6941 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6944 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6945 u8 link_type
, u8 addr_type
, u8 status
)
6947 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6949 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6952 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6953 u8 link_type
, u8 addr_type
, u8 status
)
6955 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6956 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6959 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6960 u8 link_type
, u8 addr_type
, u8 status
)
6962 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6964 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6967 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6968 u8 link_type
, u8 addr_type
, u32 passkey
,
6971 struct mgmt_ev_passkey_notify ev
;
6973 BT_DBG("%s", hdev
->name
);
6975 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6976 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6977 ev
.passkey
= __cpu_to_le32(passkey
);
6978 ev
.entered
= entered
;
6980 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6983 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
6985 struct mgmt_ev_auth_failed ev
;
6986 struct pending_cmd
*cmd
;
6987 u8 status
= mgmt_status(hci_status
);
6989 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
6990 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6993 cmd
= find_pairing(conn
);
6995 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
6996 cmd
? cmd
->sk
: NULL
);
6999 cmd
->cmd_complete(cmd
, status
);
7000 mgmt_pending_remove(cmd
);
7004 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
7006 struct cmd_lookup match
= { NULL
, hdev
};
7010 u8 mgmt_err
= mgmt_status(status
);
7011 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
7012 cmd_status_rsp
, &mgmt_err
);
7016 if (test_bit(HCI_AUTH
, &hdev
->flags
))
7017 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
7020 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
7023 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
7027 new_settings(hdev
, match
.sk
);
7033 static void clear_eir(struct hci_request
*req
)
7035 struct hci_dev
*hdev
= req
->hdev
;
7036 struct hci_cp_write_eir cp
;
7038 if (!lmp_ext_inq_capable(hdev
))
7041 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
7043 memset(&cp
, 0, sizeof(cp
));
7045 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
7048 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
7050 struct cmd_lookup match
= { NULL
, hdev
};
7051 struct hci_request req
;
7052 bool changed
= false;
7055 u8 mgmt_err
= mgmt_status(status
);
7057 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
7058 &hdev
->dev_flags
)) {
7059 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
7060 new_settings(hdev
, NULL
);
7063 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
7069 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
7071 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
7073 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
7076 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
7079 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
7082 new_settings(hdev
, match
.sk
);
7087 hci_req_init(&req
, hdev
);
7089 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
7090 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
7091 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
7092 sizeof(enable
), &enable
);
7098 hci_req_run(&req
, NULL
);
7101 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
7103 struct cmd_lookup
*match
= data
;
7105 if (match
->sk
== NULL
) {
7106 match
->sk
= cmd
->sk
;
7107 sock_hold(match
->sk
);
7111 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
7114 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
7116 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
7117 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
7118 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
7121 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
7128 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
7130 struct mgmt_cp_set_local_name ev
;
7131 struct pending_cmd
*cmd
;
7136 memset(&ev
, 0, sizeof(ev
));
7137 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
7138 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
7140 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
7142 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
7144 /* If this is a HCI command related to powering on the
7145 * HCI dev don't send any mgmt signals.
7147 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
7151 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
7152 cmd
? cmd
->sk
: NULL
);
7155 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
7156 u8
*rand192
, u8
*hash256
, u8
*rand256
,
7159 struct pending_cmd
*cmd
;
7161 BT_DBG("%s status %u", hdev
->name
, status
);
7163 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
7168 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
7169 mgmt_status(status
));
7171 struct mgmt_rp_read_local_oob_data rp
;
7172 size_t rp_size
= sizeof(rp
);
7174 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
7175 memcpy(rp
.rand192
, rand192
, sizeof(rp
.rand192
));
7177 if (bredr_sc_enabled(hdev
) && hash256
&& rand256
) {
7178 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
7179 memcpy(rp
.rand256
, rand256
, sizeof(rp
.rand256
));
7181 rp_size
-= sizeof(rp
.hash256
) + sizeof(rp
.rand256
);
7184 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
7188 mgmt_pending_remove(cmd
);
7191 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
7195 for (i
= 0; i
< uuid_count
; i
++) {
7196 if (!memcmp(uuid
, uuids
[i
], 16))
7203 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
7207 while (parsed
< eir_len
) {
7208 u8 field_len
= eir
[0];
7215 if (eir_len
- parsed
< field_len
+ 1)
7219 case EIR_UUID16_ALL
:
7220 case EIR_UUID16_SOME
:
7221 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
7222 memcpy(uuid
, bluetooth_base_uuid
, 16);
7223 uuid
[13] = eir
[i
+ 3];
7224 uuid
[12] = eir
[i
+ 2];
7225 if (has_uuid(uuid
, uuid_count
, uuids
))
7229 case EIR_UUID32_ALL
:
7230 case EIR_UUID32_SOME
:
7231 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
7232 memcpy(uuid
, bluetooth_base_uuid
, 16);
7233 uuid
[15] = eir
[i
+ 5];
7234 uuid
[14] = eir
[i
+ 4];
7235 uuid
[13] = eir
[i
+ 3];
7236 uuid
[12] = eir
[i
+ 2];
7237 if (has_uuid(uuid
, uuid_count
, uuids
))
7241 case EIR_UUID128_ALL
:
7242 case EIR_UUID128_SOME
:
7243 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
7244 memcpy(uuid
, eir
+ i
+ 2, 16);
7245 if (has_uuid(uuid
, uuid_count
, uuids
))
7251 parsed
+= field_len
+ 1;
7252 eir
+= field_len
+ 1;
7258 static void restart_le_scan(struct hci_dev
*hdev
)
7260 /* If controller is not scanning we are done. */
7261 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
7264 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
7265 hdev
->discovery
.scan_start
+
7266 hdev
->discovery
.scan_duration
))
7269 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_restart
,
7270 DISCOV_LE_RESTART_DELAY
);
7273 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7274 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
7275 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
7278 struct mgmt_ev_device_found
*ev
= (void *) buf
;
7282 /* Don't send events for a non-kernel initiated discovery. With
7283 * LE one exception is if we have pend_le_reports > 0 in which
7284 * case we're doing passive scanning and want these events.
7286 if (!hci_discovery_active(hdev
)) {
7287 if (link_type
== ACL_LINK
)
7289 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
7293 /* When using service discovery with a RSSI threshold, then check
7294 * if such a RSSI threshold is specified. If a RSSI threshold has
7295 * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set,
7296 * then all results with a RSSI smaller than the RSSI threshold will be
7297 * dropped. If the quirk is set, let it through for further processing,
7298 * as we might need to restart the scan.
7300 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7301 * the results are also dropped.
7303 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7304 (rssi
== HCI_RSSI_INVALID
||
7305 (rssi
< hdev
->discovery
.rssi
&&
7306 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
7309 /* Make sure that the buffer is big enough. The 5 extra bytes
7310 * are for the potential CoD field.
7312 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
7315 memset(buf
, 0, sizeof(buf
));
7317 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7318 * RSSI value was reported as 0 when not available. This behavior
7319 * is kept when using device discovery. This is required for full
7320 * backwards compatibility with the API.
7322 * However when using service discovery, the value 127 will be
7323 * returned when the RSSI is not available.
7325 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
7326 link_type
== ACL_LINK
)
7329 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7330 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7332 ev
->flags
= cpu_to_le32(flags
);
7335 /* When using service discovery and a list of UUID is
7336 * provided, results with no matching UUID should be
7337 * dropped. In case there is a match the result is
7338 * kept and checking possible scan response data
7341 if (hdev
->discovery
.uuid_count
> 0) {
7342 match
= eir_has_uuids(eir
, eir_len
,
7343 hdev
->discovery
.uuid_count
,
7344 hdev
->discovery
.uuids
);
7345 /* If duplicate filtering does not report RSSI changes,
7346 * then restart scanning to ensure updated result with
7347 * updated RSSI values.
7349 if (match
&& test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
,
7351 restart_le_scan(hdev
);
7356 if (!match
&& !scan_rsp_len
)
7359 /* Copy EIR or advertising data into event */
7360 memcpy(ev
->eir
, eir
, eir_len
);
7362 /* When using service discovery and a list of UUID is
7363 * provided, results with empty EIR or advertising data
7364 * should be dropped since they do not match any UUID.
7366 if (hdev
->discovery
.uuid_count
> 0 && !scan_rsp_len
)
7372 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
7373 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7376 if (scan_rsp_len
> 0) {
7377 /* When using service discovery and a list of UUID is
7378 * provided, results with no matching UUID should be
7379 * dropped if there is no previous match from the
7382 if (hdev
->discovery
.uuid_count
> 0) {
7383 if (!match
&& !eir_has_uuids(scan_rsp
, scan_rsp_len
,
7384 hdev
->discovery
.uuid_count
,
7385 hdev
->discovery
.uuids
))
7388 /* If duplicate filtering does not report RSSI changes,
7389 * then restart scanning to ensure updated result with
7390 * updated RSSI values.
7392 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
,
7394 restart_le_scan(hdev
);
7397 /* Append scan response data to event */
7398 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
7400 /* When using service discovery and a list of UUID is
7401 * provided, results with empty scan response and no
7402 * previous matched advertising data should be dropped.
7404 if (hdev
->discovery
.uuid_count
> 0 && !match
)
7408 /* Validate the reported RSSI value against the RSSI threshold once more
7409 * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE
7412 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7413 rssi
< hdev
->discovery
.rssi
)
7416 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
7417 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
7419 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
7422 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7423 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
7425 struct mgmt_ev_device_found
*ev
;
7426 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
7429 ev
= (struct mgmt_ev_device_found
*) buf
;
7431 memset(buf
, 0, sizeof(buf
));
7433 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7434 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7437 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
7440 ev
->eir_len
= cpu_to_le16(eir_len
);
7442 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
7445 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
7447 struct mgmt_ev_discovering ev
;
7449 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
7451 memset(&ev
, 0, sizeof(ev
));
7452 ev
.type
= hdev
->discovery
.type
;
7453 ev
.discovering
= discovering
;
7455 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
7458 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
7460 BT_DBG("%s status %u", hdev
->name
, status
);
7463 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
7465 struct hci_request req
;
7467 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7470 hci_req_init(&req
, hdev
);
7471 enable_advertising(&req
);
7472 hci_req_run(&req
, adv_enable_complete
);