2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands
[] = {
41 MGMT_OP_READ_INDEX_LIST
,
44 MGMT_OP_SET_DISCOVERABLE
,
45 MGMT_OP_SET_CONNECTABLE
,
46 MGMT_OP_SET_FAST_CONNECTABLE
,
48 MGMT_OP_SET_LINK_SECURITY
,
52 MGMT_OP_SET_DEV_CLASS
,
53 MGMT_OP_SET_LOCAL_NAME
,
56 MGMT_OP_LOAD_LINK_KEYS
,
57 MGMT_OP_LOAD_LONG_TERM_KEYS
,
59 MGMT_OP_GET_CONNECTIONS
,
60 MGMT_OP_PIN_CODE_REPLY
,
61 MGMT_OP_PIN_CODE_NEG_REPLY
,
62 MGMT_OP_SET_IO_CAPABILITY
,
64 MGMT_OP_CANCEL_PAIR_DEVICE
,
65 MGMT_OP_UNPAIR_DEVICE
,
66 MGMT_OP_USER_CONFIRM_REPLY
,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
68 MGMT_OP_USER_PASSKEY_REPLY
,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
70 MGMT_OP_READ_LOCAL_OOB_DATA
,
71 MGMT_OP_ADD_REMOTE_OOB_DATA
,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
73 MGMT_OP_START_DISCOVERY
,
74 MGMT_OP_STOP_DISCOVERY
,
77 MGMT_OP_UNBLOCK_DEVICE
,
78 MGMT_OP_SET_DEVICE_ID
,
79 MGMT_OP_SET_ADVERTISING
,
81 MGMT_OP_SET_STATIC_ADDRESS
,
82 MGMT_OP_SET_SCAN_PARAMS
,
83 MGMT_OP_SET_SECURE_CONN
,
84 MGMT_OP_SET_DEBUG_KEYS
,
87 MGMT_OP_GET_CONN_INFO
,
88 MGMT_OP_GET_CLOCK_INFO
,
90 MGMT_OP_REMOVE_DEVICE
,
91 MGMT_OP_LOAD_CONN_PARAM
,
92 MGMT_OP_READ_UNCONF_INDEX_LIST
,
93 MGMT_OP_READ_CONFIG_INFO
,
94 MGMT_OP_SET_EXTERNAL_CONFIG
,
95 MGMT_OP_SET_PUBLIC_ADDRESS
,
98 static const u16 mgmt_events
[] = {
99 MGMT_EV_CONTROLLER_ERROR
,
101 MGMT_EV_INDEX_REMOVED
,
102 MGMT_EV_NEW_SETTINGS
,
103 MGMT_EV_CLASS_OF_DEV_CHANGED
,
104 MGMT_EV_LOCAL_NAME_CHANGED
,
105 MGMT_EV_NEW_LINK_KEY
,
106 MGMT_EV_NEW_LONG_TERM_KEY
,
107 MGMT_EV_DEVICE_CONNECTED
,
108 MGMT_EV_DEVICE_DISCONNECTED
,
109 MGMT_EV_CONNECT_FAILED
,
110 MGMT_EV_PIN_CODE_REQUEST
,
111 MGMT_EV_USER_CONFIRM_REQUEST
,
112 MGMT_EV_USER_PASSKEY_REQUEST
,
114 MGMT_EV_DEVICE_FOUND
,
116 MGMT_EV_DEVICE_BLOCKED
,
117 MGMT_EV_DEVICE_UNBLOCKED
,
118 MGMT_EV_DEVICE_UNPAIRED
,
119 MGMT_EV_PASSKEY_NOTIFY
,
122 MGMT_EV_DEVICE_ADDED
,
123 MGMT_EV_DEVICE_REMOVED
,
124 MGMT_EV_NEW_CONN_PARAM
,
125 MGMT_EV_UNCONF_INDEX_ADDED
,
126 MGMT_EV_UNCONF_INDEX_REMOVED
,
127 MGMT_EV_NEW_CONFIG_OPTIONS
,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
136 struct list_head list
;
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table
[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
149 MGMT_STATUS_FAILED
, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
154 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY
, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED
, /* Rejected Security */
161 MGMT_STATUS_REJECTED
, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
169 MGMT_STATUS_BUSY
, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED
, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED
, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED
, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED
, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY
, /* Role Switch Pending */
195 MGMT_STATUS_FAILED
, /* Slot Violation */
196 MGMT_STATUS_FAILED
, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY
, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
209 static u8
mgmt_status(u8 hci_status
)
211 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
212 return mgmt_status_table
[hci_status
];
214 return MGMT_STATUS_FAILED
;
217 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
218 struct sock
*skip_sk
)
221 struct mgmt_hdr
*hdr
;
223 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
227 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
228 hdr
->opcode
= cpu_to_le16(event
);
230 hdr
->index
= cpu_to_le16(hdev
->id
);
232 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
233 hdr
->len
= cpu_to_le16(data_len
);
236 memcpy(skb_put(skb
, data_len
), data
, data_len
);
239 __net_timestamp(skb
);
241 hci_send_to_control(skb
, skip_sk
);
247 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
250 struct mgmt_hdr
*hdr
;
251 struct mgmt_ev_cmd_status
*ev
;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
256 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
260 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
262 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
263 hdr
->index
= cpu_to_le16(index
);
264 hdr
->len
= cpu_to_le16(sizeof(*ev
));
266 ev
= (void *) skb_put(skb
, sizeof(*ev
));
268 ev
->opcode
= cpu_to_le16(cmd
);
270 err
= sock_queue_rcv_skb(sk
, skb
);
277 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
278 void *rp
, size_t rp_len
)
281 struct mgmt_hdr
*hdr
;
282 struct mgmt_ev_cmd_complete
*ev
;
285 BT_DBG("sock %p", sk
);
287 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
291 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
293 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
294 hdr
->index
= cpu_to_le16(index
);
295 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
297 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
298 ev
->opcode
= cpu_to_le16(cmd
);
302 memcpy(ev
->data
, rp
, rp_len
);
304 err
= sock_queue_rcv_skb(sk
, skb
);
311 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
314 struct mgmt_rp_read_version rp
;
316 BT_DBG("sock %p", sk
);
318 rp
.version
= MGMT_VERSION
;
319 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
321 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
325 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
328 struct mgmt_rp_read_commands
*rp
;
329 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
330 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
335 BT_DBG("sock %p", sk
);
337 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
339 rp
= kmalloc(rp_size
, GFP_KERNEL
);
343 rp
->num_commands
= cpu_to_le16(num_commands
);
344 rp
->num_events
= cpu_to_le16(num_events
);
346 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
347 put_unaligned_le16(mgmt_commands
[i
], opcode
);
349 for (i
= 0; i
< num_events
; i
++, opcode
++)
350 put_unaligned_le16(mgmt_events
[i
], opcode
);
352 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
359 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
362 struct mgmt_rp_read_index_list
*rp
;
368 BT_DBG("sock %p", sk
);
370 read_lock(&hci_dev_list_lock
);
373 list_for_each_entry(d
, &hci_dev_list
, list
) {
374 if (d
->dev_type
== HCI_BREDR
&&
375 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
379 rp_len
= sizeof(*rp
) + (2 * count
);
380 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
382 read_unlock(&hci_dev_list_lock
);
387 list_for_each_entry(d
, &hci_dev_list
, list
) {
388 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
389 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
390 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
399 if (d
->dev_type
== HCI_BREDR
&&
400 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
401 rp
->index
[count
++] = cpu_to_le16(d
->id
);
402 BT_DBG("Added hci%u", d
->id
);
406 rp
->num_controllers
= cpu_to_le16(count
);
407 rp_len
= sizeof(*rp
) + (2 * count
);
409 read_unlock(&hci_dev_list_lock
);
411 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
419 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
420 void *data
, u16 data_len
)
422 struct mgmt_rp_read_unconf_index_list
*rp
;
428 BT_DBG("sock %p", sk
);
430 read_lock(&hci_dev_list_lock
);
433 list_for_each_entry(d
, &hci_dev_list
, list
) {
434 if (d
->dev_type
== HCI_BREDR
&&
435 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
439 rp_len
= sizeof(*rp
) + (2 * count
);
440 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
442 read_unlock(&hci_dev_list_lock
);
447 list_for_each_entry(d
, &hci_dev_list
, list
) {
448 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
449 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
450 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
459 if (d
->dev_type
== HCI_BREDR
&&
460 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
461 rp
->index
[count
++] = cpu_to_le16(d
->id
);
462 BT_DBG("Added hci%u", d
->id
);
466 rp
->num_controllers
= cpu_to_le16(count
);
467 rp_len
= sizeof(*rp
) + (2 * count
);
469 read_unlock(&hci_dev_list_lock
);
471 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_UNCONF_INDEX_LIST
,
479 static bool is_configured(struct hci_dev
*hdev
)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
482 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
486 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
492 static __le32
get_missing_options(struct hci_dev
*hdev
)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
497 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
498 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
501 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
502 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
504 return cpu_to_le32(options
);
507 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
509 __le32 options
= get_missing_options(hdev
);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
512 sizeof(options
), skip
);
515 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
517 __le32 options
= get_missing_options(hdev
);
519 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
523 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
524 void *data
, u16 data_len
)
526 struct mgmt_rp_read_config_info rp
;
529 BT_DBG("sock %p %s", sk
, hdev
->name
);
533 memset(&rp
, 0, sizeof(rp
));
534 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
537 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
539 if (hdev
->set_bdaddr
)
540 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
542 rp
.supported_options
= cpu_to_le32(options
);
543 rp
.missing_options
= get_missing_options(hdev
);
545 hci_dev_unlock(hdev
);
547 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0, &rp
,
551 static u32
get_supported_settings(struct hci_dev
*hdev
)
555 settings
|= MGMT_SETTING_POWERED
;
556 settings
|= MGMT_SETTING_BONDABLE
;
557 settings
|= MGMT_SETTING_DEBUG_KEYS
;
558 settings
|= MGMT_SETTING_CONNECTABLE
;
559 settings
|= MGMT_SETTING_DISCOVERABLE
;
561 if (lmp_bredr_capable(hdev
)) {
562 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
563 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
564 settings
|= MGMT_SETTING_BREDR
;
565 settings
|= MGMT_SETTING_LINK_SECURITY
;
567 if (lmp_ssp_capable(hdev
)) {
568 settings
|= MGMT_SETTING_SSP
;
569 settings
|= MGMT_SETTING_HS
;
572 if (lmp_sc_capable(hdev
) ||
573 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
574 settings
|= MGMT_SETTING_SECURE_CONN
;
577 if (lmp_le_capable(hdev
)) {
578 settings
|= MGMT_SETTING_LE
;
579 settings
|= MGMT_SETTING_ADVERTISING
;
580 settings
|= MGMT_SETTING_PRIVACY
;
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
585 settings
|= MGMT_SETTING_CONFIGURATION
;
590 static u32
get_current_settings(struct hci_dev
*hdev
)
594 if (hdev_is_powered(hdev
))
595 settings
|= MGMT_SETTING_POWERED
;
597 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
598 settings
|= MGMT_SETTING_CONNECTABLE
;
600 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
601 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
603 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
604 settings
|= MGMT_SETTING_DISCOVERABLE
;
606 if (test_bit(HCI_BONDABLE
, &hdev
->dev_flags
))
607 settings
|= MGMT_SETTING_BONDABLE
;
609 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
610 settings
|= MGMT_SETTING_BREDR
;
612 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
613 settings
|= MGMT_SETTING_LE
;
615 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
616 settings
|= MGMT_SETTING_LINK_SECURITY
;
618 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
619 settings
|= MGMT_SETTING_SSP
;
621 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
622 settings
|= MGMT_SETTING_HS
;
624 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
625 settings
|= MGMT_SETTING_ADVERTISING
;
627 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
628 settings
|= MGMT_SETTING_SECURE_CONN
;
630 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
631 settings
|= MGMT_SETTING_DEBUG_KEYS
;
633 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
634 settings
|= MGMT_SETTING_PRIVACY
;
639 #define PNP_INFO_SVCLASS_ID 0x1200
641 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
643 u8
*ptr
= data
, *uuids_start
= NULL
;
644 struct bt_uuid
*uuid
;
649 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
652 if (uuid
->size
!= 16)
655 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
659 if (uuid16
== PNP_INFO_SVCLASS_ID
)
665 uuids_start
[1] = EIR_UUID16_ALL
;
669 /* Stop if not enough space to put next UUID */
670 if ((ptr
- data
) + sizeof(u16
) > len
) {
671 uuids_start
[1] = EIR_UUID16_SOME
;
675 *ptr
++ = (uuid16
& 0x00ff);
676 *ptr
++ = (uuid16
& 0xff00) >> 8;
677 uuids_start
[0] += sizeof(uuid16
);
683 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
685 u8
*ptr
= data
, *uuids_start
= NULL
;
686 struct bt_uuid
*uuid
;
691 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
692 if (uuid
->size
!= 32)
698 uuids_start
[1] = EIR_UUID32_ALL
;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr
- data
) + sizeof(u32
) > len
) {
704 uuids_start
[1] = EIR_UUID32_SOME
;
708 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
710 uuids_start
[0] += sizeof(u32
);
716 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
718 u8
*ptr
= data
, *uuids_start
= NULL
;
719 struct bt_uuid
*uuid
;
724 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
725 if (uuid
->size
!= 128)
731 uuids_start
[1] = EIR_UUID128_ALL
;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr
- data
) + 16 > len
) {
737 uuids_start
[1] = EIR_UUID128_SOME
;
741 memcpy(ptr
, uuid
->uuid
, 16);
743 uuids_start
[0] += 16;
749 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
751 struct pending_cmd
*cmd
;
753 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
754 if (cmd
->opcode
== opcode
)
761 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
762 struct hci_dev
*hdev
,
765 struct pending_cmd
*cmd
;
767 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
768 if (cmd
->user_data
!= data
)
770 if (cmd
->opcode
== opcode
)
777 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
782 name_len
= strlen(hdev
->dev_name
);
784 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
786 if (name_len
> max_len
) {
788 ptr
[1] = EIR_NAME_SHORT
;
790 ptr
[1] = EIR_NAME_COMPLETE
;
792 ptr
[0] = name_len
+ 1;
794 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
796 ad_len
+= (name_len
+ 2);
797 ptr
+= (name_len
+ 2);
803 static void update_scan_rsp_data(struct hci_request
*req
)
805 struct hci_dev
*hdev
= req
->hdev
;
806 struct hci_cp_le_set_scan_rsp_data cp
;
809 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
812 memset(&cp
, 0, sizeof(cp
));
814 len
= create_scan_rsp_data(hdev
, cp
.data
);
816 if (hdev
->scan_rsp_data_len
== len
&&
817 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
820 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
821 hdev
->scan_rsp_data_len
= len
;
825 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
828 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
830 struct pending_cmd
*cmd
;
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
835 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
837 struct mgmt_mode
*cp
= cmd
->param
;
839 return LE_AD_GENERAL
;
840 else if (cp
->val
== 0x02)
841 return LE_AD_LIMITED
;
843 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
844 return LE_AD_LIMITED
;
845 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
846 return LE_AD_GENERAL
;
852 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
854 u8 ad_len
= 0, flags
= 0;
856 flags
|= get_adv_discov_flags(hdev
);
858 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
859 flags
|= LE_AD_NO_BREDR
;
862 BT_DBG("adv flags 0x%02x", flags
);
872 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
874 ptr
[1] = EIR_TX_POWER
;
875 ptr
[2] = (u8
) hdev
->adv_tx_power
;
884 static void update_adv_data(struct hci_request
*req
)
886 struct hci_dev
*hdev
= req
->hdev
;
887 struct hci_cp_le_set_adv_data cp
;
890 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
893 memset(&cp
, 0, sizeof(cp
));
895 len
= create_adv_data(hdev
, cp
.data
);
897 if (hdev
->adv_data_len
== len
&&
898 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
901 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
902 hdev
->adv_data_len
= len
;
906 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
909 int mgmt_update_adv_data(struct hci_dev
*hdev
)
911 struct hci_request req
;
913 hci_req_init(&req
, hdev
);
914 update_adv_data(&req
);
916 return hci_req_run(&req
, NULL
);
919 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
924 name_len
= strlen(hdev
->dev_name
);
930 ptr
[1] = EIR_NAME_SHORT
;
932 ptr
[1] = EIR_NAME_COMPLETE
;
934 /* EIR Data length */
935 ptr
[0] = name_len
+ 1;
937 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
939 ptr
+= (name_len
+ 2);
942 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
944 ptr
[1] = EIR_TX_POWER
;
945 ptr
[2] = (u8
) hdev
->inq_tx_power
;
950 if (hdev
->devid_source
> 0) {
952 ptr
[1] = EIR_DEVICE_ID
;
954 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
955 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
956 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
957 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
962 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
963 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
964 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
967 static void update_eir(struct hci_request
*req
)
969 struct hci_dev
*hdev
= req
->hdev
;
970 struct hci_cp_write_eir cp
;
972 if (!hdev_is_powered(hdev
))
975 if (!lmp_ext_inq_capable(hdev
))
978 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
981 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
984 memset(&cp
, 0, sizeof(cp
));
986 create_eir(hdev
, cp
.data
);
988 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
991 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
993 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
996 static u8
get_service_classes(struct hci_dev
*hdev
)
998 struct bt_uuid
*uuid
;
1001 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1002 val
|= uuid
->svc_hint
;
1007 static void update_class(struct hci_request
*req
)
1009 struct hci_dev
*hdev
= req
->hdev
;
1012 BT_DBG("%s", hdev
->name
);
1014 if (!hdev_is_powered(hdev
))
1017 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1020 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1023 cod
[0] = hdev
->minor_class
;
1024 cod
[1] = hdev
->major_class
;
1025 cod
[2] = get_service_classes(hdev
);
1027 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
1030 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1033 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1036 static bool get_connectable(struct hci_dev
*hdev
)
1038 struct pending_cmd
*cmd
;
1040 /* If there's a pending mgmt command the flag will not yet have
1041 * it's final value, so check for this first.
1043 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1045 struct mgmt_mode
*cp
= cmd
->param
;
1049 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1052 static void disable_advertising(struct hci_request
*req
)
1056 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1059 static void enable_advertising(struct hci_request
*req
)
1061 struct hci_dev
*hdev
= req
->hdev
;
1062 struct hci_cp_le_set_adv_param cp
;
1063 u8 own_addr_type
, enable
= 0x01;
1066 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1069 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1070 disable_advertising(req
);
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
1073 * hci_update_random_address knows that it's safe to go ahead
1074 * and write a new random address. The flag will be set back on
1075 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 clear_bit(HCI_LE_ADV
, &hdev
->dev_flags
);
1079 connectable
= get_connectable(hdev
);
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1085 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1088 memset(&cp
, 0, sizeof(cp
));
1089 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1090 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1091 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
1092 cp
.own_address_type
= own_addr_type
;
1093 cp
.channel_map
= hdev
->le_adv_channel_map
;
1095 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1097 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1100 static void service_cache_off(struct work_struct
*work
)
1102 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1103 service_cache
.work
);
1104 struct hci_request req
;
1106 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1109 hci_req_init(&req
, hdev
);
1116 hci_dev_unlock(hdev
);
1118 hci_req_run(&req
, NULL
);
1121 static void rpa_expired(struct work_struct
*work
)
1123 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1125 struct hci_request req
;
1129 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
1131 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1134 /* The generation of a new RPA and programming it into the
1135 * controller happens in the enable_advertising() function.
1137 hci_req_init(&req
, hdev
);
1138 enable_advertising(&req
);
1139 hci_req_run(&req
, NULL
);
1142 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1144 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
1147 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1148 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1155 clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1158 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1159 void *data
, u16 data_len
)
1161 struct mgmt_rp_read_info rp
;
1163 BT_DBG("sock %p %s", sk
, hdev
->name
);
1167 memset(&rp
, 0, sizeof(rp
));
1169 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1171 rp
.version
= hdev
->hci_ver
;
1172 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1174 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1175 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1177 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1179 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1180 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1182 hci_dev_unlock(hdev
);
1184 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1188 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1195 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1196 struct hci_dev
*hdev
, void *data
,
1199 struct pending_cmd
*cmd
;
1201 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1205 cmd
->opcode
= opcode
;
1206 cmd
->index
= hdev
->id
;
1208 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
1215 memcpy(cmd
->param
, data
, len
);
1220 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1225 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1226 void (*cb
)(struct pending_cmd
*cmd
,
1230 struct pending_cmd
*cmd
, *tmp
;
1232 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1233 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1240 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1242 list_del(&cmd
->list
);
1243 mgmt_pending_free(cmd
);
1246 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1248 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1250 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1254 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1256 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1258 if (hci_conn_count(hdev
) == 0) {
1259 cancel_delayed_work(&hdev
->power_off
);
1260 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1264 static bool hci_stop_discovery(struct hci_request
*req
)
1266 struct hci_dev
*hdev
= req
->hdev
;
1267 struct hci_cp_remote_name_req_cancel cp
;
1268 struct inquiry_entry
*e
;
1270 switch (hdev
->discovery
.state
) {
1271 case DISCOVERY_FINDING
:
1272 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1273 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1275 cancel_delayed_work(&hdev
->le_scan_disable
);
1276 hci_req_add_le_scan_disable(req
);
1281 case DISCOVERY_RESOLVING
:
1282 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1287 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1288 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
1296 hci_req_add_le_scan_disable(req
);
1306 static int clean_up_hci_state(struct hci_dev
*hdev
)
1308 struct hci_request req
;
1309 struct hci_conn
*conn
;
1310 bool discov_stopped
;
1313 hci_req_init(&req
, hdev
);
1315 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1316 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1318 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1321 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1322 disable_advertising(&req
);
1324 discov_stopped
= hci_stop_discovery(&req
);
1326 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1327 struct hci_cp_disconnect dc
;
1328 struct hci_cp_reject_conn_req rej
;
1330 switch (conn
->state
) {
1333 dc
.handle
= cpu_to_le16(conn
->handle
);
1334 dc
.reason
= 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1338 if (conn
->type
== LE_LINK
)
1339 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1341 else if (conn
->type
== ACL_LINK
)
1342 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1346 bacpy(&rej
.bdaddr
, &conn
->dst
);
1347 rej
.reason
= 0x15; /* Terminated due to Power Off */
1348 if (conn
->type
== ACL_LINK
)
1349 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1351 else if (conn
->type
== SCO_LINK
)
1352 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1358 err
= hci_req_run(&req
, clean_up_hci_complete
);
1359 if (!err
&& discov_stopped
)
1360 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1365 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1368 struct mgmt_mode
*cp
= data
;
1369 struct pending_cmd
*cmd
;
1372 BT_DBG("request for %s", hdev
->name
);
1374 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1375 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1376 MGMT_STATUS_INVALID_PARAMS
);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1381 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1386 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1387 cancel_delayed_work(&hdev
->power_off
);
1390 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1392 err
= mgmt_powered(hdev
, 1);
1397 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1398 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1402 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1409 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1412 /* Disconnect connections, stop scans, etc */
1413 err
= clean_up_hci_state(hdev
);
1415 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1416 HCI_POWER_OFF_TIMEOUT
);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err
== -ENODATA
) {
1420 cancel_delayed_work(&hdev
->power_off
);
1421 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1427 hci_dev_unlock(hdev
);
1431 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1435 ev
= cpu_to_le32(get_current_settings(hdev
));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1440 int mgmt_new_settings(struct hci_dev
*hdev
)
1442 return new_settings(hdev
, NULL
);
1447 struct hci_dev
*hdev
;
1451 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1453 struct cmd_lookup
*match
= data
;
1455 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1457 list_del(&cmd
->list
);
1459 if (match
->sk
== NULL
) {
1460 match
->sk
= cmd
->sk
;
1461 sock_hold(match
->sk
);
1464 mgmt_pending_free(cmd
);
1467 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1471 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1472 mgmt_pending_remove(cmd
);
1475 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1477 if (!lmp_bredr_capable(hdev
))
1478 return MGMT_STATUS_NOT_SUPPORTED
;
1479 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1480 return MGMT_STATUS_REJECTED
;
1482 return MGMT_STATUS_SUCCESS
;
1485 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1487 if (!lmp_le_capable(hdev
))
1488 return MGMT_STATUS_NOT_SUPPORTED
;
1489 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1490 return MGMT_STATUS_REJECTED
;
1492 return MGMT_STATUS_SUCCESS
;
1495 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1497 struct pending_cmd
*cmd
;
1498 struct mgmt_mode
*cp
;
1499 struct hci_request req
;
1502 BT_DBG("status 0x%02x", status
);
1506 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1511 u8 mgmt_err
= mgmt_status(status
);
1512 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1513 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1519 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1522 if (hdev
->discov_timeout
> 0) {
1523 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1524 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1528 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1532 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1535 new_settings(hdev
, cmd
->sk
);
1537 /* When the discoverable mode gets changed, make sure
1538 * that class of device has the limited discoverable
1539 * bit correctly set.
1541 hci_req_init(&req
, hdev
);
1543 hci_req_run(&req
, NULL
);
1546 mgmt_pending_remove(cmd
);
1549 hci_dev_unlock(hdev
);
1552 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1555 struct mgmt_cp_set_discoverable
*cp
= data
;
1556 struct pending_cmd
*cmd
;
1557 struct hci_request req
;
1562 BT_DBG("request for %s", hdev
->name
);
1564 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1565 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1566 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1567 MGMT_STATUS_REJECTED
);
1569 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1570 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1571 MGMT_STATUS_INVALID_PARAMS
);
1573 timeout
= __le16_to_cpu(cp
->timeout
);
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1578 if ((cp
->val
== 0x00 && timeout
> 0) ||
1579 (cp
->val
== 0x02 && timeout
== 0))
1580 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1581 MGMT_STATUS_INVALID_PARAMS
);
1585 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1586 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1587 MGMT_STATUS_NOT_POWERED
);
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1593 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1598 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1599 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1600 MGMT_STATUS_REJECTED
);
1604 if (!hdev_is_powered(hdev
)) {
1605 bool changed
= false;
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1611 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1612 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1616 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1621 err
= new_settings(hdev
, sk
);
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1630 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1631 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1632 &hdev
->dev_flags
)) {
1633 cancel_delayed_work(&hdev
->discov_off
);
1634 hdev
->discov_timeout
= timeout
;
1636 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1637 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1638 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1642 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1646 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1656 cancel_delayed_work(&hdev
->discov_off
);
1657 hdev
->discov_timeout
= timeout
;
1659 /* Limited discoverable mode */
1660 if (cp
->val
== 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1663 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1665 hci_req_init(&req
, hdev
);
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1670 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1676 struct hci_cp_write_current_iac_lap hci_cp
;
1678 if (cp
->val
== 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1681 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1682 hci_cp
.iac_lap
[1] = 0x8b;
1683 hci_cp
.iac_lap
[2] = 0x9e;
1684 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1685 hci_cp
.iac_lap
[4] = 0x8b;
1686 hci_cp
.iac_lap
[5] = 0x9e;
1688 /* General discoverable mode */
1690 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1691 hci_cp
.iac_lap
[1] = 0x8b;
1692 hci_cp
.iac_lap
[2] = 0x9e;
1695 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1696 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1698 scan
|= SCAN_INQUIRY
;
1700 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1703 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1706 update_adv_data(&req
);
1708 err
= hci_req_run(&req
, set_discoverable_complete
);
1710 mgmt_pending_remove(cmd
);
1713 hci_dev_unlock(hdev
);
1717 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1719 struct hci_dev
*hdev
= req
->hdev
;
1720 struct hci_cp_write_page_scan_activity acp
;
1723 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1726 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1730 type
= PAGE_SCAN_TYPE_INTERLACED
;
1732 /* 160 msec page scan interval */
1733 acp
.interval
= cpu_to_le16(0x0100);
1735 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1737 /* default 1.28 sec page scan */
1738 acp
.interval
= cpu_to_le16(0x0800);
1741 acp
.window
= cpu_to_le16(0x0012);
1743 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1744 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1745 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1748 if (hdev
->page_scan_type
!= type
)
1749 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1752 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1754 struct pending_cmd
*cmd
;
1755 struct mgmt_mode
*cp
;
1756 bool conn_changed
, discov_changed
;
1758 BT_DBG("status 0x%02x", status
);
1762 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1767 u8 mgmt_err
= mgmt_status(status
);
1768 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1774 conn_changed
= !test_and_set_bit(HCI_CONNECTABLE
,
1776 discov_changed
= false;
1778 conn_changed
= test_and_clear_bit(HCI_CONNECTABLE
,
1780 discov_changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1784 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1786 if (conn_changed
|| discov_changed
) {
1787 new_settings(hdev
, cmd
->sk
);
1789 mgmt_update_adv_data(hdev
);
1790 hci_update_background_scan(hdev
);
1794 mgmt_pending_remove(cmd
);
1797 hci_dev_unlock(hdev
);
1800 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1801 struct sock
*sk
, u8 val
)
1803 bool changed
= false;
1806 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1810 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1812 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1813 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1816 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1821 hci_update_background_scan(hdev
);
1822 return new_settings(hdev
, sk
);
1828 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1831 struct mgmt_mode
*cp
= data
;
1832 struct pending_cmd
*cmd
;
1833 struct hci_request req
;
1837 BT_DBG("request for %s", hdev
->name
);
1839 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1840 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1841 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1842 MGMT_STATUS_REJECTED
);
1844 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1845 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1846 MGMT_STATUS_INVALID_PARAMS
);
1850 if (!hdev_is_powered(hdev
)) {
1851 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1855 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1856 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1857 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1862 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1868 hci_req_init(&req
, hdev
);
1870 /* If BR/EDR is not enabled and we disable advertising as a
1871 * by-product of disabling connectable, we need to update the
1872 * advertising flags.
1874 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1876 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1877 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1879 update_adv_data(&req
);
1880 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1884 /* If we don't have any whitelist entries just
1885 * disable all scanning. If there are entries
1886 * and we had both page and inquiry scanning
1887 * enabled then fall back to only page scanning.
1888 * Otherwise no changes are needed.
1890 if (list_empty(&hdev
->whitelist
))
1891 scan
= SCAN_DISABLED
;
1892 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
1895 goto no_scan_update
;
1897 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1898 hdev
->discov_timeout
> 0)
1899 cancel_delayed_work(&hdev
->discov_off
);
1902 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1906 /* If we're going from non-connectable to connectable or
1907 * vice-versa when fast connectable is enabled ensure that fast
1908 * connectable gets disabled. write_fast_connectable won't do
1909 * anything if the page scan parameters are already what they
1912 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1913 write_fast_connectable(&req
, false);
1915 /* Update the advertising parameters if necessary */
1916 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1917 enable_advertising(&req
);
1919 err
= hci_req_run(&req
, set_connectable_complete
);
1921 mgmt_pending_remove(cmd
);
1922 if (err
== -ENODATA
)
1923 err
= set_connectable_update_settings(hdev
, sk
,
1929 hci_dev_unlock(hdev
);
1933 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1936 struct mgmt_mode
*cp
= data
;
1940 BT_DBG("request for %s", hdev
->name
);
1942 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1943 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1944 MGMT_STATUS_INVALID_PARAMS
);
1949 changed
= !test_and_set_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1951 changed
= test_and_clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1953 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1958 err
= new_settings(hdev
, sk
);
1961 hci_dev_unlock(hdev
);
1965 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1968 struct mgmt_mode
*cp
= data
;
1969 struct pending_cmd
*cmd
;
1973 BT_DBG("request for %s", hdev
->name
);
1975 status
= mgmt_bredr_support(hdev
);
1977 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1980 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1981 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1982 MGMT_STATUS_INVALID_PARAMS
);
1986 if (!hdev_is_powered(hdev
)) {
1987 bool changed
= false;
1989 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1990 &hdev
->dev_flags
)) {
1991 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1995 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2000 err
= new_settings(hdev
, sk
);
2005 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2006 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2013 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2014 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2018 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2024 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2026 mgmt_pending_remove(cmd
);
2031 hci_dev_unlock(hdev
);
2035 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2037 struct mgmt_mode
*cp
= data
;
2038 struct pending_cmd
*cmd
;
2042 BT_DBG("request for %s", hdev
->name
);
2044 status
= mgmt_bredr_support(hdev
);
2046 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2048 if (!lmp_ssp_capable(hdev
))
2049 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2050 MGMT_STATUS_NOT_SUPPORTED
);
2052 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2053 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2054 MGMT_STATUS_INVALID_PARAMS
);
2058 if (!hdev_is_powered(hdev
)) {
2062 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
2065 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
2068 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
2071 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2074 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2079 err
= new_settings(hdev
, sk
);
2084 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
2085 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
2086 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2091 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
2092 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2096 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2102 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
2103 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2104 sizeof(cp
->val
), &cp
->val
);
2106 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2108 mgmt_pending_remove(cmd
);
2113 hci_dev_unlock(hdev
);
2117 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2119 struct mgmt_mode
*cp
= data
;
2124 BT_DBG("request for %s", hdev
->name
);
2126 status
= mgmt_bredr_support(hdev
);
2128 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2130 if (!lmp_ssp_capable(hdev
))
2131 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2132 MGMT_STATUS_NOT_SUPPORTED
);
2134 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
2135 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2136 MGMT_STATUS_REJECTED
);
2138 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2139 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2140 MGMT_STATUS_INVALID_PARAMS
);
2145 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2147 if (hdev_is_powered(hdev
)) {
2148 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2149 MGMT_STATUS_REJECTED
);
2153 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2156 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2161 err
= new_settings(hdev
, sk
);
2164 hci_dev_unlock(hdev
);
2168 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
2170 struct cmd_lookup match
= { NULL
, hdev
};
2173 u8 mgmt_err
= mgmt_status(status
);
2175 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2180 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2182 new_settings(hdev
, match
.sk
);
2187 /* Make sure the controller has a good default for
2188 * advertising data. Restrict the update to when LE
2189 * has actually been enabled. During power on, the
2190 * update in powered_update_hci will take care of it.
2192 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2193 struct hci_request req
;
2197 hci_req_init(&req
, hdev
);
2198 update_adv_data(&req
);
2199 update_scan_rsp_data(&req
);
2200 hci_req_run(&req
, NULL
);
2202 hci_update_background_scan(hdev
);
2204 hci_dev_unlock(hdev
);
2208 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2210 struct mgmt_mode
*cp
= data
;
2211 struct hci_cp_write_le_host_supported hci_cp
;
2212 struct pending_cmd
*cmd
;
2213 struct hci_request req
;
2217 BT_DBG("request for %s", hdev
->name
);
2219 if (!lmp_le_capable(hdev
))
2220 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2221 MGMT_STATUS_NOT_SUPPORTED
);
2223 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2224 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2225 MGMT_STATUS_INVALID_PARAMS
);
2227 /* LE-only devices do not allow toggling LE on/off */
2228 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2229 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2230 MGMT_STATUS_REJECTED
);
2235 enabled
= lmp_host_le_capable(hdev
);
2237 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2238 bool changed
= false;
2240 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2241 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2245 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2246 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2250 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2255 err
= new_settings(hdev
, sk
);
2260 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2261 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2262 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2267 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2273 hci_req_init(&req
, hdev
);
2275 memset(&hci_cp
, 0, sizeof(hci_cp
));
2279 hci_cp
.simul
= 0x00;
2281 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
2282 disable_advertising(&req
);
2285 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2288 err
= hci_req_run(&req
, le_enable_complete
);
2290 mgmt_pending_remove(cmd
);
2293 hci_dev_unlock(hdev
);
2297 /* This is a helper function to test for pending mgmt commands that can
2298 * cause CoD or EIR HCI commands. We can only allow one such pending
2299 * mgmt command at a time since otherwise we cannot easily track what
2300 * the current values are, will be, and based on that calculate if a new
2301 * HCI command needs to be sent and if yes with what value.
2303 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2305 struct pending_cmd
*cmd
;
2307 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2308 switch (cmd
->opcode
) {
2309 case MGMT_OP_ADD_UUID
:
2310 case MGMT_OP_REMOVE_UUID
:
2311 case MGMT_OP_SET_DEV_CLASS
:
2312 case MGMT_OP_SET_POWERED
:
2320 static const u8 bluetooth_base_uuid
[] = {
2321 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2322 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2325 static u8
get_uuid_size(const u8
*uuid
)
2329 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2332 val
= get_unaligned_le32(&uuid
[12]);
2339 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2341 struct pending_cmd
*cmd
;
2345 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2349 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2350 hdev
->dev_class
, 3);
2352 mgmt_pending_remove(cmd
);
2355 hci_dev_unlock(hdev
);
2358 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2360 BT_DBG("status 0x%02x", status
);
2362 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2365 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2367 struct mgmt_cp_add_uuid
*cp
= data
;
2368 struct pending_cmd
*cmd
;
2369 struct hci_request req
;
2370 struct bt_uuid
*uuid
;
2373 BT_DBG("request for %s", hdev
->name
);
2377 if (pending_eir_or_class(hdev
)) {
2378 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2383 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2389 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2390 uuid
->svc_hint
= cp
->svc_hint
;
2391 uuid
->size
= get_uuid_size(cp
->uuid
);
2393 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2395 hci_req_init(&req
, hdev
);
2400 err
= hci_req_run(&req
, add_uuid_complete
);
2402 if (err
!= -ENODATA
)
2405 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2406 hdev
->dev_class
, 3);
2410 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2419 hci_dev_unlock(hdev
);
2423 static bool enable_service_cache(struct hci_dev
*hdev
)
2425 if (!hdev_is_powered(hdev
))
2428 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2429 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2437 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2439 BT_DBG("status 0x%02x", status
);
2441 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2444 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2447 struct mgmt_cp_remove_uuid
*cp
= data
;
2448 struct pending_cmd
*cmd
;
2449 struct bt_uuid
*match
, *tmp
;
2450 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2451 struct hci_request req
;
2454 BT_DBG("request for %s", hdev
->name
);
2458 if (pending_eir_or_class(hdev
)) {
2459 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2464 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2465 hci_uuids_clear(hdev
);
2467 if (enable_service_cache(hdev
)) {
2468 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2469 0, hdev
->dev_class
, 3);
2478 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2479 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2482 list_del(&match
->list
);
2488 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2489 MGMT_STATUS_INVALID_PARAMS
);
2494 hci_req_init(&req
, hdev
);
2499 err
= hci_req_run(&req
, remove_uuid_complete
);
2501 if (err
!= -ENODATA
)
2504 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2505 hdev
->dev_class
, 3);
2509 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2518 hci_dev_unlock(hdev
);
2522 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2524 BT_DBG("status 0x%02x", status
);
2526 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2529 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2532 struct mgmt_cp_set_dev_class
*cp
= data
;
2533 struct pending_cmd
*cmd
;
2534 struct hci_request req
;
2537 BT_DBG("request for %s", hdev
->name
);
2539 if (!lmp_bredr_capable(hdev
))
2540 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2541 MGMT_STATUS_NOT_SUPPORTED
);
2545 if (pending_eir_or_class(hdev
)) {
2546 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2551 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2552 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2553 MGMT_STATUS_INVALID_PARAMS
);
2557 hdev
->major_class
= cp
->major
;
2558 hdev
->minor_class
= cp
->minor
;
2560 if (!hdev_is_powered(hdev
)) {
2561 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2562 hdev
->dev_class
, 3);
2566 hci_req_init(&req
, hdev
);
2568 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2569 hci_dev_unlock(hdev
);
2570 cancel_delayed_work_sync(&hdev
->service_cache
);
2577 err
= hci_req_run(&req
, set_class_complete
);
2579 if (err
!= -ENODATA
)
2582 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2583 hdev
->dev_class
, 3);
2587 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2596 hci_dev_unlock(hdev
);
2600 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2603 struct mgmt_cp_load_link_keys
*cp
= data
;
2604 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2605 sizeof(struct mgmt_link_key_info
));
2606 u16 key_count
, expected_len
;
2610 BT_DBG("request for %s", hdev
->name
);
2612 if (!lmp_bredr_capable(hdev
))
2613 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2614 MGMT_STATUS_NOT_SUPPORTED
);
2616 key_count
= __le16_to_cpu(cp
->key_count
);
2617 if (key_count
> max_key_count
) {
2618 BT_ERR("load_link_keys: too big key_count value %u",
2620 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2621 MGMT_STATUS_INVALID_PARAMS
);
2624 expected_len
= sizeof(*cp
) + key_count
*
2625 sizeof(struct mgmt_link_key_info
);
2626 if (expected_len
!= len
) {
2627 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2629 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2630 MGMT_STATUS_INVALID_PARAMS
);
2633 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2634 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2635 MGMT_STATUS_INVALID_PARAMS
);
2637 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2640 for (i
= 0; i
< key_count
; i
++) {
2641 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2643 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2644 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2645 MGMT_STATUS_INVALID_PARAMS
);
2650 hci_link_keys_clear(hdev
);
2653 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2656 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2660 new_settings(hdev
, NULL
);
2662 for (i
= 0; i
< key_count
; i
++) {
2663 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2665 /* Always ignore debug keys and require a new pairing if
2666 * the user wants to use them.
2668 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2671 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2672 key
->type
, key
->pin_len
, NULL
);
2675 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2677 hci_dev_unlock(hdev
);
2682 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2683 u8 addr_type
, struct sock
*skip_sk
)
2685 struct mgmt_ev_device_unpaired ev
;
2687 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2688 ev
.addr
.type
= addr_type
;
2690 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2694 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2697 struct mgmt_cp_unpair_device
*cp
= data
;
2698 struct mgmt_rp_unpair_device rp
;
2699 struct hci_cp_disconnect dc
;
2700 struct pending_cmd
*cmd
;
2701 struct hci_conn
*conn
;
2704 memset(&rp
, 0, sizeof(rp
));
2705 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2706 rp
.addr
.type
= cp
->addr
.type
;
2708 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2709 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2710 MGMT_STATUS_INVALID_PARAMS
,
2713 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2714 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2715 MGMT_STATUS_INVALID_PARAMS
,
2720 if (!hdev_is_powered(hdev
)) {
2721 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2722 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2726 if (cp
->addr
.type
== BDADDR_BREDR
) {
2727 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2731 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2732 addr_type
= ADDR_LE_DEV_PUBLIC
;
2734 addr_type
= ADDR_LE_DEV_RANDOM
;
2736 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2738 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2740 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2744 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2745 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2749 if (cp
->disconnect
) {
2750 if (cp
->addr
.type
== BDADDR_BREDR
)
2751 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2754 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2761 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2763 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2767 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2774 dc
.handle
= cpu_to_le16(conn
->handle
);
2775 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2776 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2778 mgmt_pending_remove(cmd
);
2781 hci_dev_unlock(hdev
);
2785 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2788 struct mgmt_cp_disconnect
*cp
= data
;
2789 struct mgmt_rp_disconnect rp
;
2790 struct hci_cp_disconnect dc
;
2791 struct pending_cmd
*cmd
;
2792 struct hci_conn
*conn
;
2797 memset(&rp
, 0, sizeof(rp
));
2798 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2799 rp
.addr
.type
= cp
->addr
.type
;
2801 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2802 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2803 MGMT_STATUS_INVALID_PARAMS
,
2808 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2809 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2810 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2814 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2815 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2816 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2820 if (cp
->addr
.type
== BDADDR_BREDR
)
2821 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2824 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2826 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2827 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2828 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2832 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2838 dc
.handle
= cpu_to_le16(conn
->handle
);
2839 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2841 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2843 mgmt_pending_remove(cmd
);
2846 hci_dev_unlock(hdev
);
2850 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2852 switch (link_type
) {
2854 switch (addr_type
) {
2855 case ADDR_LE_DEV_PUBLIC
:
2856 return BDADDR_LE_PUBLIC
;
2859 /* Fallback to LE Random address type */
2860 return BDADDR_LE_RANDOM
;
2864 /* Fallback to BR/EDR type */
2865 return BDADDR_BREDR
;
2869 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2872 struct mgmt_rp_get_connections
*rp
;
2882 if (!hdev_is_powered(hdev
)) {
2883 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2884 MGMT_STATUS_NOT_POWERED
);
2889 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2890 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2894 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2895 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2902 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2903 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2905 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2906 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2907 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2912 rp
->conn_count
= cpu_to_le16(i
);
2914 /* Recalculate length in case of filtered SCO connections, etc */
2915 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2917 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2923 hci_dev_unlock(hdev
);
2927 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2928 struct mgmt_cp_pin_code_neg_reply
*cp
)
2930 struct pending_cmd
*cmd
;
2933 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2938 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2939 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2941 mgmt_pending_remove(cmd
);
2946 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2949 struct hci_conn
*conn
;
2950 struct mgmt_cp_pin_code_reply
*cp
= data
;
2951 struct hci_cp_pin_code_reply reply
;
2952 struct pending_cmd
*cmd
;
2959 if (!hdev_is_powered(hdev
)) {
2960 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2961 MGMT_STATUS_NOT_POWERED
);
2965 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2967 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2968 MGMT_STATUS_NOT_CONNECTED
);
2972 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2973 struct mgmt_cp_pin_code_neg_reply ncp
;
2975 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2977 BT_ERR("PIN code is not 16 bytes long");
2979 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2981 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2982 MGMT_STATUS_INVALID_PARAMS
);
2987 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2993 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2994 reply
.pin_len
= cp
->pin_len
;
2995 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2997 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2999 mgmt_pending_remove(cmd
);
3002 hci_dev_unlock(hdev
);
3006 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3009 struct mgmt_cp_set_io_capability
*cp
= data
;
3013 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3014 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3015 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3019 hdev
->io_capability
= cp
->io_capability
;
3021 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3022 hdev
->io_capability
);
3024 hci_dev_unlock(hdev
);
3026 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
3030 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
3032 struct hci_dev
*hdev
= conn
->hdev
;
3033 struct pending_cmd
*cmd
;
3035 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3036 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3039 if (cmd
->user_data
!= conn
)
3048 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
3050 struct mgmt_rp_pair_device rp
;
3051 struct hci_conn
*conn
= cmd
->user_data
;
3053 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3054 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3056 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
3059 /* So we don't get further callbacks for this connection */
3060 conn
->connect_cfm_cb
= NULL
;
3061 conn
->security_cfm_cb
= NULL
;
3062 conn
->disconn_cfm_cb
= NULL
;
3064 hci_conn_drop(conn
);
3066 mgmt_pending_remove(cmd
);
3069 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3071 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3072 struct pending_cmd
*cmd
;
3074 cmd
= find_pairing(conn
);
3076 pairing_complete(cmd
, status
);
3079 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3081 struct pending_cmd
*cmd
;
3083 BT_DBG("status %u", status
);
3085 cmd
= find_pairing(conn
);
3087 BT_DBG("Unable to find a pending command");
3089 pairing_complete(cmd
, mgmt_status(status
));
3092 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3094 struct pending_cmd
*cmd
;
3096 BT_DBG("status %u", status
);
3101 cmd
= find_pairing(conn
);
3103 BT_DBG("Unable to find a pending command");
3105 pairing_complete(cmd
, mgmt_status(status
));
3108 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3111 struct mgmt_cp_pair_device
*cp
= data
;
3112 struct mgmt_rp_pair_device rp
;
3113 struct pending_cmd
*cmd
;
3114 u8 sec_level
, auth_type
;
3115 struct hci_conn
*conn
;
3120 memset(&rp
, 0, sizeof(rp
));
3121 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3122 rp
.addr
.type
= cp
->addr
.type
;
3124 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3125 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3126 MGMT_STATUS_INVALID_PARAMS
,
3129 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3130 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3131 MGMT_STATUS_INVALID_PARAMS
,
3136 if (!hdev_is_powered(hdev
)) {
3137 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3138 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
3142 sec_level
= BT_SECURITY_MEDIUM
;
3143 auth_type
= HCI_AT_DEDICATED_BONDING
;
3145 if (cp
->addr
.type
== BDADDR_BREDR
) {
3146 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3151 /* Convert from L2CAP channel address type to HCI address type
3153 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3154 addr_type
= ADDR_LE_DEV_PUBLIC
;
3156 addr_type
= ADDR_LE_DEV_RANDOM
;
3158 /* When pairing a new device, it is expected to remember
3159 * this device for future connections. Adding the connection
3160 * parameter information ahead of time allows tracking
3161 * of the slave preferred values and will speed up any
3162 * further connection establishment.
3164 * If connection parameters already exist, then they
3165 * will be kept and this function does nothing.
3167 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3169 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3170 sec_level
, HCI_LE_CONN_TIMEOUT
,
3177 if (PTR_ERR(conn
) == -EBUSY
)
3178 status
= MGMT_STATUS_BUSY
;
3180 status
= MGMT_STATUS_CONNECT_FAILED
;
3182 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3188 if (conn
->connect_cfm_cb
) {
3189 hci_conn_drop(conn
);
3190 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3191 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3195 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3198 hci_conn_drop(conn
);
3202 /* For LE, just connecting isn't a proof that the pairing finished */
3203 if (cp
->addr
.type
== BDADDR_BREDR
) {
3204 conn
->connect_cfm_cb
= pairing_complete_cb
;
3205 conn
->security_cfm_cb
= pairing_complete_cb
;
3206 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3208 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3209 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3210 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3213 conn
->io_capability
= cp
->io_cap
;
3214 cmd
->user_data
= conn
;
3216 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3217 hci_conn_security(conn
, sec_level
, auth_type
, true))
3218 pairing_complete(cmd
, 0);
3223 hci_dev_unlock(hdev
);
3227 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3230 struct mgmt_addr_info
*addr
= data
;
3231 struct pending_cmd
*cmd
;
3232 struct hci_conn
*conn
;
3239 if (!hdev_is_powered(hdev
)) {
3240 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3241 MGMT_STATUS_NOT_POWERED
);
3245 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3247 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3248 MGMT_STATUS_INVALID_PARAMS
);
3252 conn
= cmd
->user_data
;
3254 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3255 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3256 MGMT_STATUS_INVALID_PARAMS
);
3260 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
3262 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3263 addr
, sizeof(*addr
));
3265 hci_dev_unlock(hdev
);
3269 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3270 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3271 u16 hci_op
, __le32 passkey
)
3273 struct pending_cmd
*cmd
;
3274 struct hci_conn
*conn
;
3279 if (!hdev_is_powered(hdev
)) {
3280 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3281 MGMT_STATUS_NOT_POWERED
, addr
,
3286 if (addr
->type
== BDADDR_BREDR
)
3287 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3289 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3292 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3293 MGMT_STATUS_NOT_CONNECTED
, addr
,
3298 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3299 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3301 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3302 MGMT_STATUS_SUCCESS
, addr
,
3305 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3306 MGMT_STATUS_FAILED
, addr
,
3312 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3318 /* Continue with pairing via HCI */
3319 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3320 struct hci_cp_user_passkey_reply cp
;
3322 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3323 cp
.passkey
= passkey
;
3324 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3326 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3330 mgmt_pending_remove(cmd
);
3333 hci_dev_unlock(hdev
);
3337 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3338 void *data
, u16 len
)
3340 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3344 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3345 MGMT_OP_PIN_CODE_NEG_REPLY
,
3346 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3349 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3352 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3356 if (len
!= sizeof(*cp
))
3357 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3358 MGMT_STATUS_INVALID_PARAMS
);
3360 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3361 MGMT_OP_USER_CONFIRM_REPLY
,
3362 HCI_OP_USER_CONFIRM_REPLY
, 0);
3365 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3366 void *data
, u16 len
)
3368 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3372 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3373 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3374 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3377 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3380 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3384 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3385 MGMT_OP_USER_PASSKEY_REPLY
,
3386 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3389 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3390 void *data
, u16 len
)
3392 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3396 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3397 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3398 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3401 static void update_name(struct hci_request
*req
)
3403 struct hci_dev
*hdev
= req
->hdev
;
3404 struct hci_cp_write_local_name cp
;
3406 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3408 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3411 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3413 struct mgmt_cp_set_local_name
*cp
;
3414 struct pending_cmd
*cmd
;
3416 BT_DBG("status 0x%02x", status
);
3420 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3427 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3428 mgmt_status(status
));
3430 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3433 mgmt_pending_remove(cmd
);
3436 hci_dev_unlock(hdev
);
3439 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3442 struct mgmt_cp_set_local_name
*cp
= data
;
3443 struct pending_cmd
*cmd
;
3444 struct hci_request req
;
3451 /* If the old values are the same as the new ones just return a
3452 * direct command complete event.
3454 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3455 !memcmp(hdev
->short_name
, cp
->short_name
,
3456 sizeof(hdev
->short_name
))) {
3457 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3462 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3464 if (!hdev_is_powered(hdev
)) {
3465 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3467 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3472 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3478 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3484 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3486 hci_req_init(&req
, hdev
);
3488 if (lmp_bredr_capable(hdev
)) {
3493 /* The name is stored in the scan response data and so
3494 * no need to udpate the advertising data here.
3496 if (lmp_le_capable(hdev
))
3497 update_scan_rsp_data(&req
);
3499 err
= hci_req_run(&req
, set_name_complete
);
3501 mgmt_pending_remove(cmd
);
3504 hci_dev_unlock(hdev
);
3508 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3509 void *data
, u16 data_len
)
3511 struct pending_cmd
*cmd
;
3514 BT_DBG("%s", hdev
->name
);
3518 if (!hdev_is_powered(hdev
)) {
3519 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3520 MGMT_STATUS_NOT_POWERED
);
3524 if (!lmp_ssp_capable(hdev
)) {
3525 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3526 MGMT_STATUS_NOT_SUPPORTED
);
3530 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3531 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3536 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3542 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3543 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3546 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3549 mgmt_pending_remove(cmd
);
3552 hci_dev_unlock(hdev
);
3556 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3557 void *data
, u16 len
)
3561 BT_DBG("%s ", hdev
->name
);
3565 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3566 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3569 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3570 cp
->hash
, cp
->randomizer
);
3572 status
= MGMT_STATUS_FAILED
;
3574 status
= MGMT_STATUS_SUCCESS
;
3576 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3577 status
, &cp
->addr
, sizeof(cp
->addr
));
3578 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3579 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3582 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3588 status
= MGMT_STATUS_FAILED
;
3590 status
= MGMT_STATUS_SUCCESS
;
3592 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3593 status
, &cp
->addr
, sizeof(cp
->addr
));
3595 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3596 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3597 MGMT_STATUS_INVALID_PARAMS
);
3600 hci_dev_unlock(hdev
);
3604 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3605 void *data
, u16 len
)
3607 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3611 BT_DBG("%s", hdev
->name
);
3615 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3617 status
= MGMT_STATUS_INVALID_PARAMS
;
3619 status
= MGMT_STATUS_SUCCESS
;
3621 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3622 status
, &cp
->addr
, sizeof(cp
->addr
));
3624 hci_dev_unlock(hdev
);
3628 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3630 struct pending_cmd
*cmd
;
3634 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3636 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3640 type
= hdev
->discovery
.type
;
3642 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3643 &type
, sizeof(type
));
3644 mgmt_pending_remove(cmd
);
3649 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3651 unsigned long timeout
= 0;
3653 BT_DBG("status %d", status
);
3657 mgmt_start_discovery_failed(hdev
, status
);
3658 hci_dev_unlock(hdev
);
3663 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3664 hci_dev_unlock(hdev
);
3666 switch (hdev
->discovery
.type
) {
3667 case DISCOV_TYPE_LE
:
3668 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3671 case DISCOV_TYPE_INTERLEAVED
:
3672 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3675 case DISCOV_TYPE_BREDR
:
3679 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3685 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
, timeout
);
3688 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3689 void *data
, u16 len
)
3691 struct mgmt_cp_start_discovery
*cp
= data
;
3692 struct pending_cmd
*cmd
;
3693 struct hci_cp_le_set_scan_param param_cp
;
3694 struct hci_cp_le_set_scan_enable enable_cp
;
3695 struct hci_cp_inquiry inq_cp
;
3696 struct hci_request req
;
3697 /* General inquiry access code (GIAC) */
3698 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3699 u8 status
, own_addr_type
;
3702 BT_DBG("%s", hdev
->name
);
3706 if (!hdev_is_powered(hdev
)) {
3707 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3708 MGMT_STATUS_NOT_POWERED
);
3712 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3713 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3718 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3719 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3724 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3730 hdev
->discovery
.type
= cp
->type
;
3732 hci_req_init(&req
, hdev
);
3734 switch (hdev
->discovery
.type
) {
3735 case DISCOV_TYPE_BREDR
:
3736 status
= mgmt_bredr_support(hdev
);
3738 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3740 mgmt_pending_remove(cmd
);
3744 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3745 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3747 mgmt_pending_remove(cmd
);
3751 hci_inquiry_cache_flush(hdev
);
3753 memset(&inq_cp
, 0, sizeof(inq_cp
));
3754 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3755 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3756 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3759 case DISCOV_TYPE_LE
:
3760 case DISCOV_TYPE_INTERLEAVED
:
3761 status
= mgmt_le_support(hdev
);
3763 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3765 mgmt_pending_remove(cmd
);
3769 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3770 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3771 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3772 MGMT_STATUS_NOT_SUPPORTED
);
3773 mgmt_pending_remove(cmd
);
3777 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
)) {
3778 /* Don't let discovery abort an outgoing
3779 * connection attempt that's using directed
3782 if (hci_conn_hash_lookup_state(hdev
, LE_LINK
,
3784 err
= cmd_status(sk
, hdev
->id
,
3785 MGMT_OP_START_DISCOVERY
,
3786 MGMT_STATUS_REJECTED
);
3787 mgmt_pending_remove(cmd
);
3791 disable_advertising(&req
);
3794 /* If controller is scanning, it means the background scanning
3795 * is running. Thus, we should temporarily stop it in order to
3796 * set the discovery scanning parameters.
3798 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3799 hci_req_add_le_scan_disable(&req
);
3801 memset(¶m_cp
, 0, sizeof(param_cp
));
3803 /* All active scans will be done with either a resolvable
3804 * private address (when privacy feature has been enabled)
3805 * or unresolvable private address.
3807 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3809 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3810 MGMT_STATUS_FAILED
);
3811 mgmt_pending_remove(cmd
);
3815 param_cp
.type
= LE_SCAN_ACTIVE
;
3816 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3817 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3818 param_cp
.own_address_type
= own_addr_type
;
3819 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3822 memset(&enable_cp
, 0, sizeof(enable_cp
));
3823 enable_cp
.enable
= LE_SCAN_ENABLE
;
3824 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3825 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3830 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3831 MGMT_STATUS_INVALID_PARAMS
);
3832 mgmt_pending_remove(cmd
);
3836 err
= hci_req_run(&req
, start_discovery_complete
);
3838 mgmt_pending_remove(cmd
);
3840 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3843 hci_dev_unlock(hdev
);
3847 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3849 struct pending_cmd
*cmd
;
3852 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3856 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3857 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3858 mgmt_pending_remove(cmd
);
3863 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3865 BT_DBG("status %d", status
);
3870 mgmt_stop_discovery_failed(hdev
, status
);
3874 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3877 hci_dev_unlock(hdev
);
3880 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3883 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3884 struct pending_cmd
*cmd
;
3885 struct hci_request req
;
3888 BT_DBG("%s", hdev
->name
);
3892 if (!hci_discovery_active(hdev
)) {
3893 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3894 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3895 sizeof(mgmt_cp
->type
));
3899 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3900 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3901 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3902 sizeof(mgmt_cp
->type
));
3906 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3912 hci_req_init(&req
, hdev
);
3914 hci_stop_discovery(&req
);
3916 err
= hci_req_run(&req
, stop_discovery_complete
);
3918 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3922 mgmt_pending_remove(cmd
);
3924 /* If no HCI commands were sent we're done */
3925 if (err
== -ENODATA
) {
3926 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
3927 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3928 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3932 hci_dev_unlock(hdev
);
3936 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3939 struct mgmt_cp_confirm_name
*cp
= data
;
3940 struct inquiry_entry
*e
;
3943 BT_DBG("%s", hdev
->name
);
3947 if (!hci_discovery_active(hdev
)) {
3948 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3949 MGMT_STATUS_FAILED
, &cp
->addr
,
3954 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3956 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3957 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3962 if (cp
->name_known
) {
3963 e
->name_state
= NAME_KNOWN
;
3966 e
->name_state
= NAME_NEEDED
;
3967 hci_inquiry_cache_update_resolve(hdev
, e
);
3970 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3974 hci_dev_unlock(hdev
);
3978 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3981 struct mgmt_cp_block_device
*cp
= data
;
3985 BT_DBG("%s", hdev
->name
);
3987 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3988 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3989 MGMT_STATUS_INVALID_PARAMS
,
3990 &cp
->addr
, sizeof(cp
->addr
));
3994 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
3997 status
= MGMT_STATUS_FAILED
;
4001 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4003 status
= MGMT_STATUS_SUCCESS
;
4006 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4007 &cp
->addr
, sizeof(cp
->addr
));
4009 hci_dev_unlock(hdev
);
4014 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4017 struct mgmt_cp_unblock_device
*cp
= data
;
4021 BT_DBG("%s", hdev
->name
);
4023 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4024 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4025 MGMT_STATUS_INVALID_PARAMS
,
4026 &cp
->addr
, sizeof(cp
->addr
));
4030 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4033 status
= MGMT_STATUS_INVALID_PARAMS
;
4037 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4039 status
= MGMT_STATUS_SUCCESS
;
4042 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4043 &cp
->addr
, sizeof(cp
->addr
));
4045 hci_dev_unlock(hdev
);
4050 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4053 struct mgmt_cp_set_device_id
*cp
= data
;
4054 struct hci_request req
;
4058 BT_DBG("%s", hdev
->name
);
4060 source
= __le16_to_cpu(cp
->source
);
4062 if (source
> 0x0002)
4063 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4064 MGMT_STATUS_INVALID_PARAMS
);
4068 hdev
->devid_source
= source
;
4069 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4070 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4071 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4073 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
4075 hci_req_init(&req
, hdev
);
4077 hci_req_run(&req
, NULL
);
4079 hci_dev_unlock(hdev
);
4084 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
4086 struct cmd_lookup match
= { NULL
, hdev
};
4089 u8 mgmt_err
= mgmt_status(status
);
4091 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4092 cmd_status_rsp
, &mgmt_err
);
4096 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
4097 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4099 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4101 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4104 new_settings(hdev
, match
.sk
);
4110 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4113 struct mgmt_mode
*cp
= data
;
4114 struct pending_cmd
*cmd
;
4115 struct hci_request req
;
4116 u8 val
, enabled
, status
;
4119 BT_DBG("request for %s", hdev
->name
);
4121 status
= mgmt_le_support(hdev
);
4123 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4126 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4127 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4128 MGMT_STATUS_INVALID_PARAMS
);
4133 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4135 /* The following conditions are ones which mean that we should
4136 * not do any HCI communication but directly send a mgmt
4137 * response to user space (after toggling the flag if
4140 if (!hdev_is_powered(hdev
) || val
== enabled
||
4141 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4142 (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4143 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4144 bool changed
= false;
4146 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
4147 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4151 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4156 err
= new_settings(hdev
, sk
);
4161 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4162 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
4163 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4168 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4174 hci_req_init(&req
, hdev
);
4177 enable_advertising(&req
);
4179 disable_advertising(&req
);
4181 err
= hci_req_run(&req
, set_advertising_complete
);
4183 mgmt_pending_remove(cmd
);
4186 hci_dev_unlock(hdev
);
4190 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4191 void *data
, u16 len
)
4193 struct mgmt_cp_set_static_address
*cp
= data
;
4196 BT_DBG("%s", hdev
->name
);
4198 if (!lmp_le_capable(hdev
))
4199 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4200 MGMT_STATUS_NOT_SUPPORTED
);
4202 if (hdev_is_powered(hdev
))
4203 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4204 MGMT_STATUS_REJECTED
);
4206 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4207 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4208 return cmd_status(sk
, hdev
->id
,
4209 MGMT_OP_SET_STATIC_ADDRESS
,
4210 MGMT_STATUS_INVALID_PARAMS
);
4212 /* Two most significant bits shall be set */
4213 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4214 return cmd_status(sk
, hdev
->id
,
4215 MGMT_OP_SET_STATIC_ADDRESS
,
4216 MGMT_STATUS_INVALID_PARAMS
);
4221 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4223 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
4225 hci_dev_unlock(hdev
);
4230 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4231 void *data
, u16 len
)
4233 struct mgmt_cp_set_scan_params
*cp
= data
;
4234 __u16 interval
, window
;
4237 BT_DBG("%s", hdev
->name
);
4239 if (!lmp_le_capable(hdev
))
4240 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4241 MGMT_STATUS_NOT_SUPPORTED
);
4243 interval
= __le16_to_cpu(cp
->interval
);
4245 if (interval
< 0x0004 || interval
> 0x4000)
4246 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4247 MGMT_STATUS_INVALID_PARAMS
);
4249 window
= __le16_to_cpu(cp
->window
);
4251 if (window
< 0x0004 || window
> 0x4000)
4252 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4253 MGMT_STATUS_INVALID_PARAMS
);
4255 if (window
> interval
)
4256 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4257 MGMT_STATUS_INVALID_PARAMS
);
4261 hdev
->le_scan_interval
= interval
;
4262 hdev
->le_scan_window
= window
;
4264 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4266 /* If background scan is running, restart it so new parameters are
4269 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4270 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4271 struct hci_request req
;
4273 hci_req_init(&req
, hdev
);
4275 hci_req_add_le_scan_disable(&req
);
4276 hci_req_add_le_passive_scan(&req
);
4278 hci_req_run(&req
, NULL
);
4281 hci_dev_unlock(hdev
);
4286 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4288 struct pending_cmd
*cmd
;
4290 BT_DBG("status 0x%02x", status
);
4294 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4299 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4300 mgmt_status(status
));
4302 struct mgmt_mode
*cp
= cmd
->param
;
4305 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4307 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4309 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4310 new_settings(hdev
, cmd
->sk
);
4313 mgmt_pending_remove(cmd
);
4316 hci_dev_unlock(hdev
);
4319 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4320 void *data
, u16 len
)
4322 struct mgmt_mode
*cp
= data
;
4323 struct pending_cmd
*cmd
;
4324 struct hci_request req
;
4327 BT_DBG("%s", hdev
->name
);
4329 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4330 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4331 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4332 MGMT_STATUS_NOT_SUPPORTED
);
4334 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4335 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4336 MGMT_STATUS_INVALID_PARAMS
);
4338 if (!hdev_is_powered(hdev
))
4339 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4340 MGMT_STATUS_NOT_POWERED
);
4342 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4343 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4344 MGMT_STATUS_REJECTED
);
4348 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4349 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4354 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4355 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4360 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4367 hci_req_init(&req
, hdev
);
4369 write_fast_connectable(&req
, cp
->val
);
4371 err
= hci_req_run(&req
, fast_connectable_complete
);
4373 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4374 MGMT_STATUS_FAILED
);
4375 mgmt_pending_remove(cmd
);
4379 hci_dev_unlock(hdev
);
4384 static void set_bredr_scan(struct hci_request
*req
)
4386 struct hci_dev
*hdev
= req
->hdev
;
4389 /* Ensure that fast connectable is disabled. This function will
4390 * not do anything if the page scan parameters are already what
4393 write_fast_connectable(req
, false);
4395 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
) ||
4396 !list_empty(&hdev
->whitelist
))
4398 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
4399 scan
|= SCAN_INQUIRY
;
4402 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
4405 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4407 struct pending_cmd
*cmd
;
4409 BT_DBG("status 0x%02x", status
);
4413 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4418 u8 mgmt_err
= mgmt_status(status
);
4420 /* We need to restore the flag if related HCI commands
4423 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4425 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4427 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4428 new_settings(hdev
, cmd
->sk
);
4431 mgmt_pending_remove(cmd
);
4434 hci_dev_unlock(hdev
);
4437 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4439 struct mgmt_mode
*cp
= data
;
4440 struct pending_cmd
*cmd
;
4441 struct hci_request req
;
4444 BT_DBG("request for %s", hdev
->name
);
4446 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4447 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4448 MGMT_STATUS_NOT_SUPPORTED
);
4450 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4451 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4452 MGMT_STATUS_REJECTED
);
4454 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4455 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4456 MGMT_STATUS_INVALID_PARAMS
);
4460 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4461 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4465 if (!hdev_is_powered(hdev
)) {
4467 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4468 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4469 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4470 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4471 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4474 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4476 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4480 err
= new_settings(hdev
, sk
);
4484 /* Reject disabling when powered on */
4486 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4487 MGMT_STATUS_REJECTED
);
4491 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4492 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4497 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4503 /* We need to flip the bit already here so that update_adv_data
4504 * generates the correct flags.
4506 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4508 hci_req_init(&req
, hdev
);
4510 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
) ||
4511 !list_empty(&hdev
->whitelist
))
4512 set_bredr_scan(&req
);
4514 /* Since only the advertising data flags will change, there
4515 * is no need to update the scan response data.
4517 update_adv_data(&req
);
4519 err
= hci_req_run(&req
, set_bredr_complete
);
4521 mgmt_pending_remove(cmd
);
4524 hci_dev_unlock(hdev
);
4528 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4529 void *data
, u16 len
)
4531 struct mgmt_mode
*cp
= data
;
4532 struct pending_cmd
*cmd
;
4536 BT_DBG("request for %s", hdev
->name
);
4538 status
= mgmt_bredr_support(hdev
);
4540 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4543 if (!lmp_sc_capable(hdev
) &&
4544 !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
4545 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4546 MGMT_STATUS_NOT_SUPPORTED
);
4548 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4549 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4550 MGMT_STATUS_INVALID_PARAMS
);
4554 if (!hdev_is_powered(hdev
)) {
4558 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4560 if (cp
->val
== 0x02)
4561 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4563 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4565 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4567 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4570 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4575 err
= new_settings(hdev
, sk
);
4580 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4581 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4588 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4589 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4590 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4594 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4600 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4602 mgmt_pending_remove(cmd
);
4606 if (cp
->val
== 0x02)
4607 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4609 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4612 hci_dev_unlock(hdev
);
4616 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4617 void *data
, u16 len
)
4619 struct mgmt_mode
*cp
= data
;
4620 bool changed
, use_changed
;
4623 BT_DBG("request for %s", hdev
->name
);
4625 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4626 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4627 MGMT_STATUS_INVALID_PARAMS
);
4632 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4635 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4638 if (cp
->val
== 0x02)
4639 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4642 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4645 if (hdev_is_powered(hdev
) && use_changed
&&
4646 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4647 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4648 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4649 sizeof(mode
), &mode
);
4652 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4657 err
= new_settings(hdev
, sk
);
4660 hci_dev_unlock(hdev
);
4664 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4667 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4671 BT_DBG("request for %s", hdev
->name
);
4673 if (!lmp_le_capable(hdev
))
4674 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4675 MGMT_STATUS_NOT_SUPPORTED
);
4677 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4678 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4679 MGMT_STATUS_INVALID_PARAMS
);
4681 if (hdev_is_powered(hdev
))
4682 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4683 MGMT_STATUS_REJECTED
);
4687 /* If user space supports this command it is also expected to
4688 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4690 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4693 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4694 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4695 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4697 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4698 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4699 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4702 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4707 err
= new_settings(hdev
, sk
);
4710 hci_dev_unlock(hdev
);
4714 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4716 switch (irk
->addr
.type
) {
4717 case BDADDR_LE_PUBLIC
:
4720 case BDADDR_LE_RANDOM
:
4721 /* Two most significant bits shall be set */
4722 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4730 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4733 struct mgmt_cp_load_irks
*cp
= cp_data
;
4734 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
4735 sizeof(struct mgmt_irk_info
));
4736 u16 irk_count
, expected_len
;
4739 BT_DBG("request for %s", hdev
->name
);
4741 if (!lmp_le_capable(hdev
))
4742 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4743 MGMT_STATUS_NOT_SUPPORTED
);
4745 irk_count
= __le16_to_cpu(cp
->irk_count
);
4746 if (irk_count
> max_irk_count
) {
4747 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
4748 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4749 MGMT_STATUS_INVALID_PARAMS
);
4752 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4753 if (expected_len
!= len
) {
4754 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4756 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4757 MGMT_STATUS_INVALID_PARAMS
);
4760 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4762 for (i
= 0; i
< irk_count
; i
++) {
4763 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4765 if (!irk_is_valid(key
))
4766 return cmd_status(sk
, hdev
->id
,
4768 MGMT_STATUS_INVALID_PARAMS
);
4773 hci_smp_irks_clear(hdev
);
4775 for (i
= 0; i
< irk_count
; i
++) {
4776 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4779 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4780 addr_type
= ADDR_LE_DEV_PUBLIC
;
4782 addr_type
= ADDR_LE_DEV_RANDOM
;
4784 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4788 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4790 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4792 hci_dev_unlock(hdev
);
4797 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4799 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4802 switch (key
->addr
.type
) {
4803 case BDADDR_LE_PUBLIC
:
4806 case BDADDR_LE_RANDOM
:
4807 /* Two most significant bits shall be set */
4808 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4816 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4817 void *cp_data
, u16 len
)
4819 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4820 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
4821 sizeof(struct mgmt_ltk_info
));
4822 u16 key_count
, expected_len
;
4825 BT_DBG("request for %s", hdev
->name
);
4827 if (!lmp_le_capable(hdev
))
4828 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4829 MGMT_STATUS_NOT_SUPPORTED
);
4831 key_count
= __le16_to_cpu(cp
->key_count
);
4832 if (key_count
> max_key_count
) {
4833 BT_ERR("load_ltks: too big key_count value %u", key_count
);
4834 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4835 MGMT_STATUS_INVALID_PARAMS
);
4838 expected_len
= sizeof(*cp
) + key_count
*
4839 sizeof(struct mgmt_ltk_info
);
4840 if (expected_len
!= len
) {
4841 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4843 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4844 MGMT_STATUS_INVALID_PARAMS
);
4847 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4849 for (i
= 0; i
< key_count
; i
++) {
4850 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4852 if (!ltk_is_valid(key
))
4853 return cmd_status(sk
, hdev
->id
,
4854 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4855 MGMT_STATUS_INVALID_PARAMS
);
4860 hci_smp_ltks_clear(hdev
);
4862 for (i
= 0; i
< key_count
; i
++) {
4863 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4864 u8 type
, addr_type
, authenticated
;
4866 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4867 addr_type
= ADDR_LE_DEV_PUBLIC
;
4869 addr_type
= ADDR_LE_DEV_RANDOM
;
4874 type
= SMP_LTK_SLAVE
;
4876 switch (key
->type
) {
4877 case MGMT_LTK_UNAUTHENTICATED
:
4878 authenticated
= 0x00;
4880 case MGMT_LTK_AUTHENTICATED
:
4881 authenticated
= 0x01;
4887 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4888 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
4892 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4895 hci_dev_unlock(hdev
);
4900 struct cmd_conn_lookup
{
4901 struct hci_conn
*conn
;
4902 bool valid_tx_power
;
4906 static void get_conn_info_complete(struct pending_cmd
*cmd
, void *data
)
4908 struct cmd_conn_lookup
*match
= data
;
4909 struct mgmt_cp_get_conn_info
*cp
;
4910 struct mgmt_rp_get_conn_info rp
;
4911 struct hci_conn
*conn
= cmd
->user_data
;
4913 if (conn
!= match
->conn
)
4916 cp
= (struct mgmt_cp_get_conn_info
*) cmd
->param
;
4918 memset(&rp
, 0, sizeof(rp
));
4919 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4920 rp
.addr
.type
= cp
->addr
.type
;
4922 if (!match
->mgmt_status
) {
4923 rp
.rssi
= conn
->rssi
;
4925 if (match
->valid_tx_power
) {
4926 rp
.tx_power
= conn
->tx_power
;
4927 rp
.max_tx_power
= conn
->max_tx_power
;
4929 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4930 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4934 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4935 match
->mgmt_status
, &rp
, sizeof(rp
));
4937 hci_conn_drop(conn
);
4939 mgmt_pending_remove(cmd
);
4942 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 status
)
4944 struct hci_cp_read_rssi
*cp
;
4945 struct hci_conn
*conn
;
4946 struct cmd_conn_lookup match
;
4949 BT_DBG("status 0x%02x", status
);
4953 /* TX power data is valid in case request completed successfully,
4954 * otherwise we assume it's not valid. At the moment we assume that
4955 * either both or none of current and max values are valid to keep code
4958 match
.valid_tx_power
= !status
;
4960 /* Commands sent in request are either Read RSSI or Read Transmit Power
4961 * Level so we check which one was last sent to retrieve connection
4962 * handle. Both commands have handle as first parameter so it's safe to
4963 * cast data on the same command struct.
4965 * First command sent is always Read RSSI and we fail only if it fails.
4966 * In other case we simply override error to indicate success as we
4967 * already remembered if TX power value is actually valid.
4969 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4971 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4976 BT_ERR("invalid sent_cmd in response");
4980 handle
= __le16_to_cpu(cp
->handle
);
4981 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4983 BT_ERR("unknown handle (%d) in response", handle
);
4988 match
.mgmt_status
= mgmt_status(status
);
4990 /* Cache refresh is complete, now reply for mgmt request for given
4993 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO
, hdev
,
4994 get_conn_info_complete
, &match
);
4997 hci_dev_unlock(hdev
);
5000 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5003 struct mgmt_cp_get_conn_info
*cp
= data
;
5004 struct mgmt_rp_get_conn_info rp
;
5005 struct hci_conn
*conn
;
5006 unsigned long conn_info_age
;
5009 BT_DBG("%s", hdev
->name
);
5011 memset(&rp
, 0, sizeof(rp
));
5012 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5013 rp
.addr
.type
= cp
->addr
.type
;
5015 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5016 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5017 MGMT_STATUS_INVALID_PARAMS
,
5022 if (!hdev_is_powered(hdev
)) {
5023 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5024 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5028 if (cp
->addr
.type
== BDADDR_BREDR
)
5029 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5032 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5034 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5035 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5036 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
5040 /* To avoid client trying to guess when to poll again for information we
5041 * calculate conn info age as random value between min/max set in hdev.
5043 conn_info_age
= hdev
->conn_info_min_age
+
5044 prandom_u32_max(hdev
->conn_info_max_age
-
5045 hdev
->conn_info_min_age
);
5047 /* Query controller to refresh cached values if they are too old or were
5050 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5051 msecs_to_jiffies(conn_info_age
)) ||
5052 !conn
->conn_info_timestamp
) {
5053 struct hci_request req
;
5054 struct hci_cp_read_tx_power req_txp_cp
;
5055 struct hci_cp_read_rssi req_rssi_cp
;
5056 struct pending_cmd
*cmd
;
5058 hci_req_init(&req
, hdev
);
5059 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5060 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5063 /* For LE links TX power does not change thus we don't need to
5064 * query for it once value is known.
5066 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5067 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5068 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5069 req_txp_cp
.type
= 0x00;
5070 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5071 sizeof(req_txp_cp
), &req_txp_cp
);
5074 /* Max TX power needs to be read only once per connection */
5075 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5076 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5077 req_txp_cp
.type
= 0x01;
5078 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5079 sizeof(req_txp_cp
), &req_txp_cp
);
5082 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5086 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5093 hci_conn_hold(conn
);
5094 cmd
->user_data
= conn
;
5096 conn
->conn_info_timestamp
= jiffies
;
5098 /* Cache is valid, just reply with values cached in hci_conn */
5099 rp
.rssi
= conn
->rssi
;
5100 rp
.tx_power
= conn
->tx_power
;
5101 rp
.max_tx_power
= conn
->max_tx_power
;
5103 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5104 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5108 hci_dev_unlock(hdev
);
5112 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
)
5114 struct mgmt_cp_get_clock_info
*cp
;
5115 struct mgmt_rp_get_clock_info rp
;
5116 struct hci_cp_read_clock
*hci_cp
;
5117 struct pending_cmd
*cmd
;
5118 struct hci_conn
*conn
;
5120 BT_DBG("%s status %u", hdev
->name
, status
);
5124 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5128 if (hci_cp
->which
) {
5129 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5130 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5135 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5141 memset(&rp
, 0, sizeof(rp
));
5142 memcpy(&rp
.addr
, &cp
->addr
, sizeof(rp
.addr
));
5147 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5150 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5151 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5155 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
5157 mgmt_pending_remove(cmd
);
5159 hci_conn_drop(conn
);
5162 hci_dev_unlock(hdev
);
5165 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5168 struct mgmt_cp_get_clock_info
*cp
= data
;
5169 struct mgmt_rp_get_clock_info rp
;
5170 struct hci_cp_read_clock hci_cp
;
5171 struct pending_cmd
*cmd
;
5172 struct hci_request req
;
5173 struct hci_conn
*conn
;
5176 BT_DBG("%s", hdev
->name
);
5178 memset(&rp
, 0, sizeof(rp
));
5179 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5180 rp
.addr
.type
= cp
->addr
.type
;
5182 if (cp
->addr
.type
!= BDADDR_BREDR
)
5183 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5184 MGMT_STATUS_INVALID_PARAMS
,
5189 if (!hdev_is_powered(hdev
)) {
5190 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5191 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5195 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5196 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5198 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5199 err
= cmd_complete(sk
, hdev
->id
,
5200 MGMT_OP_GET_CLOCK_INFO
,
5201 MGMT_STATUS_NOT_CONNECTED
,
5209 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5215 hci_req_init(&req
, hdev
);
5217 memset(&hci_cp
, 0, sizeof(hci_cp
));
5218 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5221 hci_conn_hold(conn
);
5222 cmd
->user_data
= conn
;
5224 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5225 hci_cp
.which
= 0x01; /* Piconet clock */
5226 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5229 err
= hci_req_run(&req
, get_clock_info_complete
);
5231 mgmt_pending_remove(cmd
);
5234 hci_dev_unlock(hdev
);
5238 /* Helper for Add/Remove Device commands */
5239 static void update_page_scan(struct hci_dev
*hdev
, u8 scan
)
5241 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5244 if (!hdev_is_powered(hdev
))
5247 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5248 * make any changes to page scanning.
5250 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
5253 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
5254 scan
|= SCAN_INQUIRY
;
5256 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
5259 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5260 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5262 struct mgmt_ev_device_added ev
;
5264 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5265 ev
.addr
.type
= type
;
5268 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5271 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5272 void *data
, u16 len
)
5274 struct mgmt_cp_add_device
*cp
= data
;
5275 u8 auto_conn
, addr_type
;
5278 BT_DBG("%s", hdev
->name
);
5280 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5281 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5282 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5283 MGMT_STATUS_INVALID_PARAMS
,
5284 &cp
->addr
, sizeof(cp
->addr
));
5286 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5287 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5288 MGMT_STATUS_INVALID_PARAMS
,
5289 &cp
->addr
, sizeof(cp
->addr
));
5293 if (cp
->addr
.type
== BDADDR_BREDR
) {
5296 /* Only incoming connections action is supported for now */
5297 if (cp
->action
!= 0x01) {
5298 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5299 MGMT_STATUS_INVALID_PARAMS
,
5300 &cp
->addr
, sizeof(cp
->addr
));
5304 update_scan
= list_empty(&hdev
->whitelist
);
5306 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5312 update_page_scan(hdev
, SCAN_PAGE
);
5317 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5318 addr_type
= ADDR_LE_DEV_PUBLIC
;
5320 addr_type
= ADDR_LE_DEV_RANDOM
;
5322 if (cp
->action
== 0x02)
5323 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5324 else if (cp
->action
== 0x01)
5325 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5327 auto_conn
= HCI_AUTO_CONN_REPORT
;
5329 /* If the connection parameters don't exist for this device,
5330 * they will be created and configured with defaults.
5332 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5334 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5336 &cp
->addr
, sizeof(cp
->addr
));
5341 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5343 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5344 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5347 hci_dev_unlock(hdev
);
5351 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5352 bdaddr_t
*bdaddr
, u8 type
)
5354 struct mgmt_ev_device_removed ev
;
5356 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5357 ev
.addr
.type
= type
;
5359 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5362 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5363 void *data
, u16 len
)
5365 struct mgmt_cp_remove_device
*cp
= data
;
5368 BT_DBG("%s", hdev
->name
);
5372 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5373 struct hci_conn_params
*params
;
5376 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
5377 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5378 MGMT_STATUS_INVALID_PARAMS
,
5379 &cp
->addr
, sizeof(cp
->addr
));
5383 if (cp
->addr
.type
== BDADDR_BREDR
) {
5384 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
5388 err
= cmd_complete(sk
, hdev
->id
,
5389 MGMT_OP_REMOVE_DEVICE
,
5390 MGMT_STATUS_INVALID_PARAMS
,
5391 &cp
->addr
, sizeof(cp
->addr
));
5395 if (list_empty(&hdev
->whitelist
))
5396 update_page_scan(hdev
, SCAN_DISABLED
);
5398 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
5403 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5404 addr_type
= ADDR_LE_DEV_PUBLIC
;
5406 addr_type
= ADDR_LE_DEV_RANDOM
;
5408 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5411 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5412 MGMT_STATUS_INVALID_PARAMS
,
5413 &cp
->addr
, sizeof(cp
->addr
));
5417 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
5418 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5419 MGMT_STATUS_INVALID_PARAMS
,
5420 &cp
->addr
, sizeof(cp
->addr
));
5424 list_del(¶ms
->action
);
5425 list_del(¶ms
->list
);
5427 hci_update_background_scan(hdev
);
5429 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5431 struct hci_conn_params
*p
, *tmp
;
5432 struct bdaddr_list
*b
, *btmp
;
5434 if (cp
->addr
.type
) {
5435 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5436 MGMT_STATUS_INVALID_PARAMS
,
5437 &cp
->addr
, sizeof(cp
->addr
));
5441 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
5442 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
5447 update_page_scan(hdev
, SCAN_DISABLED
);
5449 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
5450 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
5452 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
5453 list_del(&p
->action
);
5458 BT_DBG("All LE connection parameters were removed");
5460 hci_update_background_scan(hdev
);
5464 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5465 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5468 hci_dev_unlock(hdev
);
5472 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5475 struct mgmt_cp_load_conn_param
*cp
= data
;
5476 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5477 sizeof(struct mgmt_conn_param
));
5478 u16 param_count
, expected_len
;
5481 if (!lmp_le_capable(hdev
))
5482 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5483 MGMT_STATUS_NOT_SUPPORTED
);
5485 param_count
= __le16_to_cpu(cp
->param_count
);
5486 if (param_count
> max_param_count
) {
5487 BT_ERR("load_conn_param: too big param_count value %u",
5489 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5490 MGMT_STATUS_INVALID_PARAMS
);
5493 expected_len
= sizeof(*cp
) + param_count
*
5494 sizeof(struct mgmt_conn_param
);
5495 if (expected_len
!= len
) {
5496 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5498 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5499 MGMT_STATUS_INVALID_PARAMS
);
5502 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5506 hci_conn_params_clear_disabled(hdev
);
5508 for (i
= 0; i
< param_count
; i
++) {
5509 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5510 struct hci_conn_params
*hci_param
;
5511 u16 min
, max
, latency
, timeout
;
5514 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5517 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5518 addr_type
= ADDR_LE_DEV_PUBLIC
;
5519 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5520 addr_type
= ADDR_LE_DEV_RANDOM
;
5522 BT_ERR("Ignoring invalid connection parameters");
5526 min
= le16_to_cpu(param
->min_interval
);
5527 max
= le16_to_cpu(param
->max_interval
);
5528 latency
= le16_to_cpu(param
->latency
);
5529 timeout
= le16_to_cpu(param
->timeout
);
5531 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5532 min
, max
, latency
, timeout
);
5534 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5535 BT_ERR("Ignoring invalid connection parameters");
5539 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5542 BT_ERR("Failed to add connection parameters");
5546 hci_param
->conn_min_interval
= min
;
5547 hci_param
->conn_max_interval
= max
;
5548 hci_param
->conn_latency
= latency
;
5549 hci_param
->supervision_timeout
= timeout
;
5552 hci_dev_unlock(hdev
);
5554 return cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0, NULL
, 0);
5557 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5558 void *data
, u16 len
)
5560 struct mgmt_cp_set_external_config
*cp
= data
;
5564 BT_DBG("%s", hdev
->name
);
5566 if (hdev_is_powered(hdev
))
5567 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5568 MGMT_STATUS_REJECTED
);
5570 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
5571 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5572 MGMT_STATUS_INVALID_PARAMS
);
5574 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
5575 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5576 MGMT_STATUS_NOT_SUPPORTED
);
5581 changed
= !test_and_set_bit(HCI_EXT_CONFIGURED
,
5584 changed
= test_and_clear_bit(HCI_EXT_CONFIGURED
,
5587 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
5594 err
= new_options(hdev
, sk
);
5596 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) == is_configured(hdev
)) {
5597 mgmt_index_removed(hdev
);
5599 if (test_and_change_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
5600 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
5601 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
5603 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5605 set_bit(HCI_RAW
, &hdev
->flags
);
5606 mgmt_index_added(hdev
);
5611 hci_dev_unlock(hdev
);
5615 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
5616 void *data
, u16 len
)
5618 struct mgmt_cp_set_public_address
*cp
= data
;
5622 BT_DBG("%s", hdev
->name
);
5624 if (hdev_is_powered(hdev
))
5625 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5626 MGMT_STATUS_REJECTED
);
5628 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
5629 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5630 MGMT_STATUS_INVALID_PARAMS
);
5632 if (!hdev
->set_bdaddr
)
5633 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5634 MGMT_STATUS_NOT_SUPPORTED
);
5638 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
5639 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
5641 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
5648 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5649 err
= new_options(hdev
, sk
);
5651 if (is_configured(hdev
)) {
5652 mgmt_index_removed(hdev
);
5654 clear_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
5656 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
5657 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
5659 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5663 hci_dev_unlock(hdev
);
5667 static const struct mgmt_handler
{
5668 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5672 } mgmt_handlers
[] = {
5673 { NULL
}, /* 0x0000 (no command) */
5674 { read_version
, false, MGMT_READ_VERSION_SIZE
},
5675 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
5676 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
5677 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
5678 { set_powered
, false, MGMT_SETTING_SIZE
},
5679 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
5680 { set_connectable
, false, MGMT_SETTING_SIZE
},
5681 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
5682 { set_bondable
, false, MGMT_SETTING_SIZE
},
5683 { set_link_security
, false, MGMT_SETTING_SIZE
},
5684 { set_ssp
, false, MGMT_SETTING_SIZE
},
5685 { set_hs
, false, MGMT_SETTING_SIZE
},
5686 { set_le
, false, MGMT_SETTING_SIZE
},
5687 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
5688 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
5689 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
5690 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
5691 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
5692 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
5693 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
5694 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
5695 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
5696 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
5697 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
5698 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
5699 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
5700 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
5701 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
5702 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
5703 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
5704 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
5705 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
5706 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
5707 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
5708 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
5709 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
5710 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
5711 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
5712 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
5713 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
5714 { set_advertising
, false, MGMT_SETTING_SIZE
},
5715 { set_bredr
, false, MGMT_SETTING_SIZE
},
5716 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
5717 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
5718 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
5719 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
5720 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
5721 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
5722 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
5723 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
5724 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
5725 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
5726 { load_conn_param
, true, MGMT_LOAD_CONN_PARAM_SIZE
},
5727 { read_unconf_index_list
, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE
},
5728 { read_config_info
, false, MGMT_READ_CONFIG_INFO_SIZE
},
5729 { set_external_config
, false, MGMT_SET_EXTERNAL_CONFIG_SIZE
},
5730 { set_public_address
, false, MGMT_SET_PUBLIC_ADDRESS_SIZE
},
5733 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
5737 struct mgmt_hdr
*hdr
;
5738 u16 opcode
, index
, len
;
5739 struct hci_dev
*hdev
= NULL
;
5740 const struct mgmt_handler
*handler
;
5743 BT_DBG("got %zu bytes", msglen
);
5745 if (msglen
< sizeof(*hdr
))
5748 buf
= kmalloc(msglen
, GFP_KERNEL
);
5752 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
5758 opcode
= __le16_to_cpu(hdr
->opcode
);
5759 index
= __le16_to_cpu(hdr
->index
);
5760 len
= __le16_to_cpu(hdr
->len
);
5762 if (len
!= msglen
- sizeof(*hdr
)) {
5767 if (index
!= MGMT_INDEX_NONE
) {
5768 hdev
= hci_dev_get(index
);
5770 err
= cmd_status(sk
, index
, opcode
,
5771 MGMT_STATUS_INVALID_INDEX
);
5775 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5776 test_bit(HCI_CONFIG
, &hdev
->dev_flags
) ||
5777 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
5778 err
= cmd_status(sk
, index
, opcode
,
5779 MGMT_STATUS_INVALID_INDEX
);
5783 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
5784 opcode
!= MGMT_OP_READ_CONFIG_INFO
&&
5785 opcode
!= MGMT_OP_SET_EXTERNAL_CONFIG
&&
5786 opcode
!= MGMT_OP_SET_PUBLIC_ADDRESS
) {
5787 err
= cmd_status(sk
, index
, opcode
,
5788 MGMT_STATUS_INVALID_INDEX
);
5793 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
5794 mgmt_handlers
[opcode
].func
== NULL
) {
5795 BT_DBG("Unknown op %u", opcode
);
5796 err
= cmd_status(sk
, index
, opcode
,
5797 MGMT_STATUS_UNKNOWN_COMMAND
);
5801 if (hdev
&& (opcode
<= MGMT_OP_READ_INDEX_LIST
||
5802 opcode
== MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5803 err
= cmd_status(sk
, index
, opcode
,
5804 MGMT_STATUS_INVALID_INDEX
);
5808 if (!hdev
&& (opcode
> MGMT_OP_READ_INDEX_LIST
&&
5809 opcode
!= MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5810 err
= cmd_status(sk
, index
, opcode
,
5811 MGMT_STATUS_INVALID_INDEX
);
5815 handler
= &mgmt_handlers
[opcode
];
5817 if ((handler
->var_len
&& len
< handler
->data_len
) ||
5818 (!handler
->var_len
&& len
!= handler
->data_len
)) {
5819 err
= cmd_status(sk
, index
, opcode
,
5820 MGMT_STATUS_INVALID_PARAMS
);
5825 mgmt_init_hdev(sk
, hdev
);
5827 cp
= buf
+ sizeof(*hdr
);
5829 err
= handler
->func(sk
, hdev
, cp
, len
);
5843 void mgmt_index_added(struct hci_dev
*hdev
)
5845 if (hdev
->dev_type
!= HCI_BREDR
)
5848 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
5851 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5852 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
5854 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
5857 void mgmt_index_removed(struct hci_dev
*hdev
)
5859 u8 status
= MGMT_STATUS_INVALID_INDEX
;
5861 if (hdev
->dev_type
!= HCI_BREDR
)
5864 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
5867 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
5869 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5870 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
5872 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
5875 /* This function requires the caller holds hdev->lock */
5876 static void restart_le_actions(struct hci_dev
*hdev
)
5878 struct hci_conn_params
*p
;
5880 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
5881 /* Needed for AUTO_OFF case where might not "really"
5882 * have been powered off.
5884 list_del_init(&p
->action
);
5886 switch (p
->auto_connect
) {
5887 case HCI_AUTO_CONN_DIRECT
:
5888 case HCI_AUTO_CONN_ALWAYS
:
5889 list_add(&p
->action
, &hdev
->pend_le_conns
);
5891 case HCI_AUTO_CONN_REPORT
:
5892 list_add(&p
->action
, &hdev
->pend_le_reports
);
5899 hci_update_background_scan(hdev
);
5902 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
5904 struct cmd_lookup match
= { NULL
, hdev
};
5906 BT_DBG("status 0x%02x", status
);
5910 restart_le_actions(hdev
);
5912 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5914 new_settings(hdev
, match
.sk
);
5916 hci_dev_unlock(hdev
);
5922 static int powered_update_hci(struct hci_dev
*hdev
)
5924 struct hci_request req
;
5927 hci_req_init(&req
, hdev
);
5929 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
5930 !lmp_host_ssp_capable(hdev
)) {
5933 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
5936 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
5937 lmp_bredr_capable(hdev
)) {
5938 struct hci_cp_write_le_host_supported cp
;
5943 /* Check first if we already have the right
5944 * host state (host features set)
5946 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
5947 cp
.simul
!= lmp_host_le_br_capable(hdev
))
5948 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
5952 if (lmp_le_capable(hdev
)) {
5953 /* Make sure the controller has a good default for
5954 * advertising data. This also applies to the case
5955 * where BR/EDR was toggled during the AUTO_OFF phase.
5957 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
5958 update_adv_data(&req
);
5959 update_scan_rsp_data(&req
);
5962 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5963 enable_advertising(&req
);
5966 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
5967 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
5968 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
5969 sizeof(link_sec
), &link_sec
);
5971 if (lmp_bredr_capable(hdev
)) {
5972 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5973 set_bredr_scan(&req
);
5979 return hci_req_run(&req
, powered_complete
);
5982 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
5984 struct cmd_lookup match
= { NULL
, hdev
};
5985 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
5986 u8 zero_cod
[] = { 0, 0, 0 };
5989 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
5993 if (powered_update_hci(hdev
) == 0)
5996 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
6001 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6002 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
6004 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
6005 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
6006 zero_cod
, sizeof(zero_cod
), NULL
);
6009 err
= new_settings(hdev
, match
.sk
);
6017 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
6019 struct pending_cmd
*cmd
;
6022 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6026 if (err
== -ERFKILL
)
6027 status
= MGMT_STATUS_RFKILLED
;
6029 status
= MGMT_STATUS_FAILED
;
6031 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
6033 mgmt_pending_remove(cmd
);
6036 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
6038 struct hci_request req
;
6042 /* When discoverable timeout triggers, then just make sure
6043 * the limited discoverable flag is cleared. Even in the case
6044 * of a timeout triggered from general discoverable, it is
6045 * safe to unconditionally clear the flag.
6047 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
6048 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
6050 hci_req_init(&req
, hdev
);
6051 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
6052 u8 scan
= SCAN_PAGE
;
6053 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
6054 sizeof(scan
), &scan
);
6057 update_adv_data(&req
);
6058 hci_req_run(&req
, NULL
);
6060 hdev
->discov_timeout
= 0;
6062 new_settings(hdev
, NULL
);
6064 hci_dev_unlock(hdev
);
6067 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
6070 struct mgmt_ev_new_link_key ev
;
6072 memset(&ev
, 0, sizeof(ev
));
6074 ev
.store_hint
= persistent
;
6075 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6076 ev
.key
.addr
.type
= BDADDR_BREDR
;
6077 ev
.key
.type
= key
->type
;
6078 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
6079 ev
.key
.pin_len
= key
->pin_len
;
6081 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6084 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
6086 if (ltk
->authenticated
)
6087 return MGMT_LTK_AUTHENTICATED
;
6089 return MGMT_LTK_UNAUTHENTICATED
;
6092 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
6094 struct mgmt_ev_new_long_term_key ev
;
6096 memset(&ev
, 0, sizeof(ev
));
6098 /* Devices using resolvable or non-resolvable random addresses
6099 * without providing an indentity resolving key don't require
6100 * to store long term keys. Their addresses will change the
6103 * Only when a remote device provides an identity address
6104 * make sure the long term key is stored. If the remote
6105 * identity is known, the long term keys are internally
6106 * mapped to the identity address. So allow static random
6107 * and public addresses here.
6109 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6110 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6111 ev
.store_hint
= 0x00;
6113 ev
.store_hint
= persistent
;
6115 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6116 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
6117 ev
.key
.type
= mgmt_ltk_type(key
);
6118 ev
.key
.enc_size
= key
->enc_size
;
6119 ev
.key
.ediv
= key
->ediv
;
6120 ev
.key
.rand
= key
->rand
;
6122 if (key
->type
== SMP_LTK
)
6125 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
6127 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6130 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
6132 struct mgmt_ev_new_irk ev
;
6134 memset(&ev
, 0, sizeof(ev
));
6136 /* For identity resolving keys from devices that are already
6137 * using a public address or static random address, do not
6138 * ask for storing this key. The identity resolving key really
6139 * is only mandatory for devices using resovlable random
6142 * Storing all identity resolving keys has the downside that
6143 * they will be also loaded on next boot of they system. More
6144 * identity resolving keys, means more time during scanning is
6145 * needed to actually resolve these addresses.
6147 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
6148 ev
.store_hint
= 0x01;
6150 ev
.store_hint
= 0x00;
6152 bacpy(&ev
.rpa
, &irk
->rpa
);
6153 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
6154 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
6155 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6157 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6160 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6163 struct mgmt_ev_new_csrk ev
;
6165 memset(&ev
, 0, sizeof(ev
));
6167 /* Devices using resolvable or non-resolvable random addresses
6168 * without providing an indentity resolving key don't require
6169 * to store signature resolving keys. Their addresses will change
6170 * the next time around.
6172 * Only when a remote device provides an identity address
6173 * make sure the signature resolving key is stored. So allow
6174 * static random and public addresses here.
6176 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6177 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6178 ev
.store_hint
= 0x00;
6180 ev
.store_hint
= persistent
;
6182 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6183 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6184 ev
.key
.master
= csrk
->master
;
6185 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6187 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6190 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6191 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6192 u16 max_interval
, u16 latency
, u16 timeout
)
6194 struct mgmt_ev_new_conn_param ev
;
6196 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6199 memset(&ev
, 0, sizeof(ev
));
6200 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6201 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6202 ev
.store_hint
= store_hint
;
6203 ev
.min_interval
= cpu_to_le16(min_interval
);
6204 ev
.max_interval
= cpu_to_le16(max_interval
);
6205 ev
.latency
= cpu_to_le16(latency
);
6206 ev
.timeout
= cpu_to_le16(timeout
);
6208 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6211 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6214 eir
[eir_len
++] = sizeof(type
) + data_len
;
6215 eir
[eir_len
++] = type
;
6216 memcpy(&eir
[eir_len
], data
, data_len
);
6217 eir_len
+= data_len
;
6222 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6223 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
6227 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6230 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6231 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6233 ev
->flags
= __cpu_to_le32(flags
);
6236 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6239 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
6240 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6241 EIR_CLASS_OF_DEV
, dev_class
, 3);
6243 ev
->eir_len
= cpu_to_le16(eir_len
);
6245 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6246 sizeof(*ev
) + eir_len
, NULL
);
6249 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
6251 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
6252 struct sock
**sk
= data
;
6253 struct mgmt_rp_disconnect rp
;
6255 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6256 rp
.addr
.type
= cp
->addr
.type
;
6258 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
6264 mgmt_pending_remove(cmd
);
6267 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
6269 struct hci_dev
*hdev
= data
;
6270 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6271 struct mgmt_rp_unpair_device rp
;
6273 memset(&rp
, 0, sizeof(rp
));
6274 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6275 rp
.addr
.type
= cp
->addr
.type
;
6277 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6279 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
6281 mgmt_pending_remove(cmd
);
6284 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6285 u8 link_type
, u8 addr_type
, u8 reason
,
6286 bool mgmt_connected
)
6288 struct mgmt_ev_device_disconnected ev
;
6289 struct pending_cmd
*power_off
;
6290 struct sock
*sk
= NULL
;
6292 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6294 struct mgmt_mode
*cp
= power_off
->param
;
6296 /* The connection is still in hci_conn_hash so test for 1
6297 * instead of 0 to know if this is the last one.
6299 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
6300 cancel_delayed_work(&hdev
->power_off
);
6301 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6305 if (!mgmt_connected
)
6308 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6311 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6313 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6314 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6317 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6322 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6326 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6327 u8 link_type
, u8 addr_type
, u8 status
)
6329 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6330 struct mgmt_cp_disconnect
*cp
;
6331 struct mgmt_rp_disconnect rp
;
6332 struct pending_cmd
*cmd
;
6334 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6337 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
6343 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
6346 if (cp
->addr
.type
!= bdaddr_type
)
6349 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6350 rp
.addr
.type
= bdaddr_type
;
6352 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
6353 mgmt_status(status
), &rp
, sizeof(rp
));
6355 mgmt_pending_remove(cmd
);
6358 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6359 u8 addr_type
, u8 status
)
6361 struct mgmt_ev_connect_failed ev
;
6362 struct pending_cmd
*power_off
;
6364 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6366 struct mgmt_mode
*cp
= power_off
->param
;
6368 /* The connection is still in hci_conn_hash so test for 1
6369 * instead of 0 to know if this is the last one.
6371 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
6372 cancel_delayed_work(&hdev
->power_off
);
6373 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6377 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6378 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6379 ev
.status
= mgmt_status(status
);
6381 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6384 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
6386 struct mgmt_ev_pin_code_request ev
;
6388 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6389 ev
.addr
.type
= BDADDR_BREDR
;
6392 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
6395 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6398 struct pending_cmd
*cmd
;
6399 struct mgmt_rp_pin_code_reply rp
;
6401 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6405 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6406 rp
.addr
.type
= BDADDR_BREDR
;
6408 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
6409 mgmt_status(status
), &rp
, sizeof(rp
));
6411 mgmt_pending_remove(cmd
);
6414 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6417 struct pending_cmd
*cmd
;
6418 struct mgmt_rp_pin_code_reply rp
;
6420 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6424 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6425 rp
.addr
.type
= BDADDR_BREDR
;
6427 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
6428 mgmt_status(status
), &rp
, sizeof(rp
));
6430 mgmt_pending_remove(cmd
);
6433 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6434 u8 link_type
, u8 addr_type
, u32 value
,
6437 struct mgmt_ev_user_confirm_request ev
;
6439 BT_DBG("%s", hdev
->name
);
6441 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6442 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6443 ev
.confirm_hint
= confirm_hint
;
6444 ev
.value
= cpu_to_le32(value
);
6446 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6450 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6451 u8 link_type
, u8 addr_type
)
6453 struct mgmt_ev_user_passkey_request ev
;
6455 BT_DBG("%s", hdev
->name
);
6457 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6458 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6460 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6464 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6465 u8 link_type
, u8 addr_type
, u8 status
,
6468 struct pending_cmd
*cmd
;
6469 struct mgmt_rp_user_confirm_reply rp
;
6472 cmd
= mgmt_pending_find(opcode
, hdev
);
6476 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6477 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6478 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
6481 mgmt_pending_remove(cmd
);
6486 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6487 u8 link_type
, u8 addr_type
, u8 status
)
6489 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6490 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6493 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6494 u8 link_type
, u8 addr_type
, u8 status
)
6496 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6498 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6501 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6502 u8 link_type
, u8 addr_type
, u8 status
)
6504 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6505 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6508 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6509 u8 link_type
, u8 addr_type
, u8 status
)
6511 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6513 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6516 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6517 u8 link_type
, u8 addr_type
, u32 passkey
,
6520 struct mgmt_ev_passkey_notify ev
;
6522 BT_DBG("%s", hdev
->name
);
6524 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6525 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6526 ev
.passkey
= __cpu_to_le32(passkey
);
6527 ev
.entered
= entered
;
6529 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6532 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6533 u8 addr_type
, u8 status
)
6535 struct mgmt_ev_auth_failed ev
;
6537 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6538 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6539 ev
.status
= mgmt_status(status
);
6541 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6544 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
6546 struct cmd_lookup match
= { NULL
, hdev
};
6550 u8 mgmt_err
= mgmt_status(status
);
6551 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
6552 cmd_status_rsp
, &mgmt_err
);
6556 if (test_bit(HCI_AUTH
, &hdev
->flags
))
6557 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
6560 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
6563 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
6567 new_settings(hdev
, match
.sk
);
6573 static void clear_eir(struct hci_request
*req
)
6575 struct hci_dev
*hdev
= req
->hdev
;
6576 struct hci_cp_write_eir cp
;
6578 if (!lmp_ext_inq_capable(hdev
))
6581 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
6583 memset(&cp
, 0, sizeof(cp
));
6585 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
6588 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6590 struct cmd_lookup match
= { NULL
, hdev
};
6591 struct hci_request req
;
6592 bool changed
= false;
6595 u8 mgmt_err
= mgmt_status(status
);
6597 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
6598 &hdev
->dev_flags
)) {
6599 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6600 new_settings(hdev
, NULL
);
6603 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
6609 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6611 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6613 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
6616 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6619 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
6622 new_settings(hdev
, match
.sk
);
6627 hci_req_init(&req
, hdev
);
6629 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
6630 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
6631 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
6632 sizeof(enable
), &enable
);
6638 hci_req_run(&req
, NULL
);
6641 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6643 struct cmd_lookup match
= { NULL
, hdev
};
6644 bool changed
= false;
6647 u8 mgmt_err
= mgmt_status(status
);
6650 if (test_and_clear_bit(HCI_SC_ENABLED
,
6652 new_settings(hdev
, NULL
);
6653 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6656 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6657 cmd_status_rsp
, &mgmt_err
);
6662 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6664 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6665 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6668 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6669 settings_rsp
, &match
);
6672 new_settings(hdev
, match
.sk
);
6678 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
6680 struct cmd_lookup
*match
= data
;
6682 if (match
->sk
== NULL
) {
6683 match
->sk
= cmd
->sk
;
6684 sock_hold(match
->sk
);
6688 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
6691 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
6693 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
6694 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
6695 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
6698 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
6705 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
6707 struct mgmt_cp_set_local_name ev
;
6708 struct pending_cmd
*cmd
;
6713 memset(&ev
, 0, sizeof(ev
));
6714 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
6715 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
6717 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
6719 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
6721 /* If this is a HCI command related to powering on the
6722 * HCI dev don't send any mgmt signals.
6724 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
6728 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
6729 cmd
? cmd
->sk
: NULL
);
6732 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
6733 u8
*randomizer192
, u8
*hash256
,
6734 u8
*randomizer256
, u8 status
)
6736 struct pending_cmd
*cmd
;
6738 BT_DBG("%s status %u", hdev
->name
, status
);
6740 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
6745 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
6746 mgmt_status(status
));
6748 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
6749 hash256
&& randomizer256
) {
6750 struct mgmt_rp_read_local_oob_ext_data rp
;
6752 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
6753 memcpy(rp
.randomizer192
, randomizer192
,
6754 sizeof(rp
.randomizer192
));
6756 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
6757 memcpy(rp
.randomizer256
, randomizer256
,
6758 sizeof(rp
.randomizer256
));
6760 cmd_complete(cmd
->sk
, hdev
->id
,
6761 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6764 struct mgmt_rp_read_local_oob_data rp
;
6766 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
6767 memcpy(rp
.randomizer
, randomizer192
,
6768 sizeof(rp
.randomizer
));
6770 cmd_complete(cmd
->sk
, hdev
->id
,
6771 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6776 mgmt_pending_remove(cmd
);
6779 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6780 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
6781 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
6784 struct mgmt_ev_device_found
*ev
= (void *) buf
;
6787 /* Don't send events for a non-kernel initiated discovery. With
6788 * LE one exception is if we have pend_le_reports > 0 in which
6789 * case we're doing passive scanning and want these events.
6791 if (!hci_discovery_active(hdev
)) {
6792 if (link_type
== ACL_LINK
)
6794 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
6798 /* Make sure that the buffer is big enough. The 5 extra bytes
6799 * are for the potential CoD field.
6801 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
6804 memset(buf
, 0, sizeof(buf
));
6806 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6807 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6809 ev
->flags
= cpu_to_le32(flags
);
6812 memcpy(ev
->eir
, eir
, eir_len
);
6814 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
6815 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
6818 if (scan_rsp_len
> 0)
6819 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
6821 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
6822 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
6824 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
6827 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6828 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
6830 struct mgmt_ev_device_found
*ev
;
6831 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
6834 ev
= (struct mgmt_ev_device_found
*) buf
;
6836 memset(buf
, 0, sizeof(buf
));
6838 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6839 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6842 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
6845 ev
->eir_len
= cpu_to_le16(eir_len
);
6847 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
6850 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
6852 struct mgmt_ev_discovering ev
;
6853 struct pending_cmd
*cmd
;
6855 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
6858 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
6860 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
6863 u8 type
= hdev
->discovery
.type
;
6865 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
6867 mgmt_pending_remove(cmd
);
6870 memset(&ev
, 0, sizeof(ev
));
6871 ev
.type
= hdev
->discovery
.type
;
6872 ev
.discovering
= discovering
;
6874 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
6877 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
6879 BT_DBG("%s status %u", hdev
->name
, status
);
6882 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
6884 struct hci_request req
;
6886 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6889 hci_req_init(&req
, hdev
);
6890 enable_advertising(&req
);
6891 hci_req_run(&req
, adv_enable_complete
);