2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 8
40 static const u16 mgmt_commands
[] = {
41 MGMT_OP_READ_INDEX_LIST
,
44 MGMT_OP_SET_DISCOVERABLE
,
45 MGMT_OP_SET_CONNECTABLE
,
46 MGMT_OP_SET_FAST_CONNECTABLE
,
48 MGMT_OP_SET_LINK_SECURITY
,
52 MGMT_OP_SET_DEV_CLASS
,
53 MGMT_OP_SET_LOCAL_NAME
,
56 MGMT_OP_LOAD_LINK_KEYS
,
57 MGMT_OP_LOAD_LONG_TERM_KEYS
,
59 MGMT_OP_GET_CONNECTIONS
,
60 MGMT_OP_PIN_CODE_REPLY
,
61 MGMT_OP_PIN_CODE_NEG_REPLY
,
62 MGMT_OP_SET_IO_CAPABILITY
,
64 MGMT_OP_CANCEL_PAIR_DEVICE
,
65 MGMT_OP_UNPAIR_DEVICE
,
66 MGMT_OP_USER_CONFIRM_REPLY
,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
68 MGMT_OP_USER_PASSKEY_REPLY
,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
70 MGMT_OP_READ_LOCAL_OOB_DATA
,
71 MGMT_OP_ADD_REMOTE_OOB_DATA
,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
73 MGMT_OP_START_DISCOVERY
,
74 MGMT_OP_STOP_DISCOVERY
,
77 MGMT_OP_UNBLOCK_DEVICE
,
78 MGMT_OP_SET_DEVICE_ID
,
79 MGMT_OP_SET_ADVERTISING
,
81 MGMT_OP_SET_STATIC_ADDRESS
,
82 MGMT_OP_SET_SCAN_PARAMS
,
83 MGMT_OP_SET_SECURE_CONN
,
84 MGMT_OP_SET_DEBUG_KEYS
,
87 MGMT_OP_GET_CONN_INFO
,
88 MGMT_OP_GET_CLOCK_INFO
,
90 MGMT_OP_REMOVE_DEVICE
,
91 MGMT_OP_LOAD_CONN_PARAM
,
92 MGMT_OP_READ_UNCONF_INDEX_LIST
,
93 MGMT_OP_READ_CONFIG_INFO
,
94 MGMT_OP_SET_EXTERNAL_CONFIG
,
95 MGMT_OP_SET_PUBLIC_ADDRESS
,
96 MGMT_OP_START_SERVICE_DISCOVERY
,
99 static const u16 mgmt_events
[] = {
100 MGMT_EV_CONTROLLER_ERROR
,
102 MGMT_EV_INDEX_REMOVED
,
103 MGMT_EV_NEW_SETTINGS
,
104 MGMT_EV_CLASS_OF_DEV_CHANGED
,
105 MGMT_EV_LOCAL_NAME_CHANGED
,
106 MGMT_EV_NEW_LINK_KEY
,
107 MGMT_EV_NEW_LONG_TERM_KEY
,
108 MGMT_EV_DEVICE_CONNECTED
,
109 MGMT_EV_DEVICE_DISCONNECTED
,
110 MGMT_EV_CONNECT_FAILED
,
111 MGMT_EV_PIN_CODE_REQUEST
,
112 MGMT_EV_USER_CONFIRM_REQUEST
,
113 MGMT_EV_USER_PASSKEY_REQUEST
,
115 MGMT_EV_DEVICE_FOUND
,
117 MGMT_EV_DEVICE_BLOCKED
,
118 MGMT_EV_DEVICE_UNBLOCKED
,
119 MGMT_EV_DEVICE_UNPAIRED
,
120 MGMT_EV_PASSKEY_NOTIFY
,
123 MGMT_EV_DEVICE_ADDED
,
124 MGMT_EV_DEVICE_REMOVED
,
125 MGMT_EV_NEW_CONN_PARAM
,
126 MGMT_EV_UNCONF_INDEX_ADDED
,
127 MGMT_EV_UNCONF_INDEX_REMOVED
,
128 MGMT_EV_NEW_CONFIG_OPTIONS
,
131 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
134 struct list_head list
;
141 void (*cmd_complete
)(struct pending_cmd
*cmd
, u8 status
);
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table
[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
149 MGMT_STATUS_FAILED
, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
154 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY
, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED
, /* Rejected Security */
161 MGMT_STATUS_REJECTED
, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
169 MGMT_STATUS_BUSY
, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED
, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED
, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED
, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED
, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY
, /* Role Switch Pending */
195 MGMT_STATUS_FAILED
, /* Slot Violation */
196 MGMT_STATUS_FAILED
, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY
, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
209 static u8
mgmt_status(u8 hci_status
)
211 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
212 return mgmt_status_table
[hci_status
];
214 return MGMT_STATUS_FAILED
;
217 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
218 struct sock
*skip_sk
)
221 struct mgmt_hdr
*hdr
;
223 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
227 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
228 hdr
->opcode
= cpu_to_le16(event
);
230 hdr
->index
= cpu_to_le16(hdev
->id
);
232 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
233 hdr
->len
= cpu_to_le16(data_len
);
236 memcpy(skb_put(skb
, data_len
), data
, data_len
);
239 __net_timestamp(skb
);
241 hci_send_to_control(skb
, skip_sk
);
247 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
250 struct mgmt_hdr
*hdr
;
251 struct mgmt_ev_cmd_status
*ev
;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
256 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
260 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
262 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
263 hdr
->index
= cpu_to_le16(index
);
264 hdr
->len
= cpu_to_le16(sizeof(*ev
));
266 ev
= (void *) skb_put(skb
, sizeof(*ev
));
268 ev
->opcode
= cpu_to_le16(cmd
);
270 err
= sock_queue_rcv_skb(sk
, skb
);
277 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
278 void *rp
, size_t rp_len
)
281 struct mgmt_hdr
*hdr
;
282 struct mgmt_ev_cmd_complete
*ev
;
285 BT_DBG("sock %p", sk
);
287 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
291 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
293 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
294 hdr
->index
= cpu_to_le16(index
);
295 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
297 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
298 ev
->opcode
= cpu_to_le16(cmd
);
302 memcpy(ev
->data
, rp
, rp_len
);
304 err
= sock_queue_rcv_skb(sk
, skb
);
311 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
314 struct mgmt_rp_read_version rp
;
316 BT_DBG("sock %p", sk
);
318 rp
.version
= MGMT_VERSION
;
319 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
321 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
325 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
328 struct mgmt_rp_read_commands
*rp
;
329 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
330 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
335 BT_DBG("sock %p", sk
);
337 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
339 rp
= kmalloc(rp_size
, GFP_KERNEL
);
343 rp
->num_commands
= cpu_to_le16(num_commands
);
344 rp
->num_events
= cpu_to_le16(num_events
);
346 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
347 put_unaligned_le16(mgmt_commands
[i
], opcode
);
349 for (i
= 0; i
< num_events
; i
++, opcode
++)
350 put_unaligned_le16(mgmt_events
[i
], opcode
);
352 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
359 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
362 struct mgmt_rp_read_index_list
*rp
;
368 BT_DBG("sock %p", sk
);
370 read_lock(&hci_dev_list_lock
);
373 list_for_each_entry(d
, &hci_dev_list
, list
) {
374 if (d
->dev_type
== HCI_BREDR
&&
375 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
379 rp_len
= sizeof(*rp
) + (2 * count
);
380 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
382 read_unlock(&hci_dev_list_lock
);
387 list_for_each_entry(d
, &hci_dev_list
, list
) {
388 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
389 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
390 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
399 if (d
->dev_type
== HCI_BREDR
&&
400 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
401 rp
->index
[count
++] = cpu_to_le16(d
->id
);
402 BT_DBG("Added hci%u", d
->id
);
406 rp
->num_controllers
= cpu_to_le16(count
);
407 rp_len
= sizeof(*rp
) + (2 * count
);
409 read_unlock(&hci_dev_list_lock
);
411 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
419 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
420 void *data
, u16 data_len
)
422 struct mgmt_rp_read_unconf_index_list
*rp
;
428 BT_DBG("sock %p", sk
);
430 read_lock(&hci_dev_list_lock
);
433 list_for_each_entry(d
, &hci_dev_list
, list
) {
434 if (d
->dev_type
== HCI_BREDR
&&
435 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
439 rp_len
= sizeof(*rp
) + (2 * count
);
440 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
442 read_unlock(&hci_dev_list_lock
);
447 list_for_each_entry(d
, &hci_dev_list
, list
) {
448 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
449 test_bit(HCI_CONFIG
, &d
->dev_flags
) ||
450 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
459 if (d
->dev_type
== HCI_BREDR
&&
460 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
461 rp
->index
[count
++] = cpu_to_le16(d
->id
);
462 BT_DBG("Added hci%u", d
->id
);
466 rp
->num_controllers
= cpu_to_le16(count
);
467 rp_len
= sizeof(*rp
) + (2 * count
);
469 read_unlock(&hci_dev_list_lock
);
471 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_UNCONF_INDEX_LIST
,
479 static bool is_configured(struct hci_dev
*hdev
)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
482 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
486 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
492 static __le32
get_missing_options(struct hci_dev
*hdev
)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
497 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
498 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
501 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
502 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
504 return cpu_to_le32(options
);
507 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
509 __le32 options
= get_missing_options(hdev
);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
512 sizeof(options
), skip
);
515 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
517 __le32 options
= get_missing_options(hdev
);
519 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
523 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
524 void *data
, u16 data_len
)
526 struct mgmt_rp_read_config_info rp
;
529 BT_DBG("sock %p %s", sk
, hdev
->name
);
533 memset(&rp
, 0, sizeof(rp
));
534 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
537 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
539 if (hdev
->set_bdaddr
)
540 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
542 rp
.supported_options
= cpu_to_le32(options
);
543 rp
.missing_options
= get_missing_options(hdev
);
545 hci_dev_unlock(hdev
);
547 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0, &rp
,
551 static u32
get_supported_settings(struct hci_dev
*hdev
)
555 settings
|= MGMT_SETTING_POWERED
;
556 settings
|= MGMT_SETTING_BONDABLE
;
557 settings
|= MGMT_SETTING_DEBUG_KEYS
;
558 settings
|= MGMT_SETTING_CONNECTABLE
;
559 settings
|= MGMT_SETTING_DISCOVERABLE
;
561 if (lmp_bredr_capable(hdev
)) {
562 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
563 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
564 settings
|= MGMT_SETTING_BREDR
;
565 settings
|= MGMT_SETTING_LINK_SECURITY
;
567 if (lmp_ssp_capable(hdev
)) {
568 settings
|= MGMT_SETTING_SSP
;
569 settings
|= MGMT_SETTING_HS
;
572 if (lmp_sc_capable(hdev
) ||
573 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
574 settings
|= MGMT_SETTING_SECURE_CONN
;
577 if (lmp_le_capable(hdev
)) {
578 settings
|= MGMT_SETTING_LE
;
579 settings
|= MGMT_SETTING_ADVERTISING
;
580 settings
|= MGMT_SETTING_SECURE_CONN
;
581 settings
|= MGMT_SETTING_PRIVACY
;
584 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
586 settings
|= MGMT_SETTING_CONFIGURATION
;
591 static u32
get_current_settings(struct hci_dev
*hdev
)
595 if (hdev_is_powered(hdev
))
596 settings
|= MGMT_SETTING_POWERED
;
598 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
599 settings
|= MGMT_SETTING_CONNECTABLE
;
601 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
602 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
604 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
605 settings
|= MGMT_SETTING_DISCOVERABLE
;
607 if (test_bit(HCI_BONDABLE
, &hdev
->dev_flags
))
608 settings
|= MGMT_SETTING_BONDABLE
;
610 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
611 settings
|= MGMT_SETTING_BREDR
;
613 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
614 settings
|= MGMT_SETTING_LE
;
616 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
617 settings
|= MGMT_SETTING_LINK_SECURITY
;
619 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
620 settings
|= MGMT_SETTING_SSP
;
622 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
623 settings
|= MGMT_SETTING_HS
;
625 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
626 settings
|= MGMT_SETTING_ADVERTISING
;
628 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
629 settings
|= MGMT_SETTING_SECURE_CONN
;
631 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
632 settings
|= MGMT_SETTING_DEBUG_KEYS
;
634 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
635 settings
|= MGMT_SETTING_PRIVACY
;
640 #define PNP_INFO_SVCLASS_ID 0x1200
642 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
644 u8
*ptr
= data
, *uuids_start
= NULL
;
645 struct bt_uuid
*uuid
;
650 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
653 if (uuid
->size
!= 16)
656 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
660 if (uuid16
== PNP_INFO_SVCLASS_ID
)
666 uuids_start
[1] = EIR_UUID16_ALL
;
670 /* Stop if not enough space to put next UUID */
671 if ((ptr
- data
) + sizeof(u16
) > len
) {
672 uuids_start
[1] = EIR_UUID16_SOME
;
676 *ptr
++ = (uuid16
& 0x00ff);
677 *ptr
++ = (uuid16
& 0xff00) >> 8;
678 uuids_start
[0] += sizeof(uuid16
);
684 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
686 u8
*ptr
= data
, *uuids_start
= NULL
;
687 struct bt_uuid
*uuid
;
692 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
693 if (uuid
->size
!= 32)
699 uuids_start
[1] = EIR_UUID32_ALL
;
703 /* Stop if not enough space to put next UUID */
704 if ((ptr
- data
) + sizeof(u32
) > len
) {
705 uuids_start
[1] = EIR_UUID32_SOME
;
709 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
711 uuids_start
[0] += sizeof(u32
);
717 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
719 u8
*ptr
= data
, *uuids_start
= NULL
;
720 struct bt_uuid
*uuid
;
725 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
726 if (uuid
->size
!= 128)
732 uuids_start
[1] = EIR_UUID128_ALL
;
736 /* Stop if not enough space to put next UUID */
737 if ((ptr
- data
) + 16 > len
) {
738 uuids_start
[1] = EIR_UUID128_SOME
;
742 memcpy(ptr
, uuid
->uuid
, 16);
744 uuids_start
[0] += 16;
750 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
752 struct pending_cmd
*cmd
;
754 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
755 if (cmd
->opcode
== opcode
)
762 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
763 struct hci_dev
*hdev
,
766 struct pending_cmd
*cmd
;
768 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
769 if (cmd
->user_data
!= data
)
771 if (cmd
->opcode
== opcode
)
778 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
783 name_len
= strlen(hdev
->dev_name
);
785 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
787 if (name_len
> max_len
) {
789 ptr
[1] = EIR_NAME_SHORT
;
791 ptr
[1] = EIR_NAME_COMPLETE
;
793 ptr
[0] = name_len
+ 1;
795 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
797 ad_len
+= (name_len
+ 2);
798 ptr
+= (name_len
+ 2);
804 static void update_scan_rsp_data(struct hci_request
*req
)
806 struct hci_dev
*hdev
= req
->hdev
;
807 struct hci_cp_le_set_scan_rsp_data cp
;
810 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
813 memset(&cp
, 0, sizeof(cp
));
815 len
= create_scan_rsp_data(hdev
, cp
.data
);
817 if (hdev
->scan_rsp_data_len
== len
&&
818 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
821 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
822 hdev
->scan_rsp_data_len
= len
;
826 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
829 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
831 struct pending_cmd
*cmd
;
833 /* If there's a pending mgmt command the flags will not yet have
834 * their final values, so check for this first.
836 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
838 struct mgmt_mode
*cp
= cmd
->param
;
840 return LE_AD_GENERAL
;
841 else if (cp
->val
== 0x02)
842 return LE_AD_LIMITED
;
844 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
845 return LE_AD_LIMITED
;
846 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
847 return LE_AD_GENERAL
;
853 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
855 u8 ad_len
= 0, flags
= 0;
857 flags
|= get_adv_discov_flags(hdev
);
859 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
860 flags
|= LE_AD_NO_BREDR
;
863 BT_DBG("adv flags 0x%02x", flags
);
873 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
875 ptr
[1] = EIR_TX_POWER
;
876 ptr
[2] = (u8
) hdev
->adv_tx_power
;
885 static void update_adv_data(struct hci_request
*req
)
887 struct hci_dev
*hdev
= req
->hdev
;
888 struct hci_cp_le_set_adv_data cp
;
891 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
894 memset(&cp
, 0, sizeof(cp
));
896 len
= create_adv_data(hdev
, cp
.data
);
898 if (hdev
->adv_data_len
== len
&&
899 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
902 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
903 hdev
->adv_data_len
= len
;
907 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
910 int mgmt_update_adv_data(struct hci_dev
*hdev
)
912 struct hci_request req
;
914 hci_req_init(&req
, hdev
);
915 update_adv_data(&req
);
917 return hci_req_run(&req
, NULL
);
920 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
925 name_len
= strlen(hdev
->dev_name
);
931 ptr
[1] = EIR_NAME_SHORT
;
933 ptr
[1] = EIR_NAME_COMPLETE
;
935 /* EIR Data length */
936 ptr
[0] = name_len
+ 1;
938 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
940 ptr
+= (name_len
+ 2);
943 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
945 ptr
[1] = EIR_TX_POWER
;
946 ptr
[2] = (u8
) hdev
->inq_tx_power
;
951 if (hdev
->devid_source
> 0) {
953 ptr
[1] = EIR_DEVICE_ID
;
955 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
956 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
957 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
958 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
963 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
964 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
965 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
968 static void update_eir(struct hci_request
*req
)
970 struct hci_dev
*hdev
= req
->hdev
;
971 struct hci_cp_write_eir cp
;
973 if (!hdev_is_powered(hdev
))
976 if (!lmp_ext_inq_capable(hdev
))
979 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
982 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
985 memset(&cp
, 0, sizeof(cp
));
987 create_eir(hdev
, cp
.data
);
989 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
992 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
994 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
997 static u8
get_service_classes(struct hci_dev
*hdev
)
999 struct bt_uuid
*uuid
;
1002 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1003 val
|= uuid
->svc_hint
;
1008 static void update_class(struct hci_request
*req
)
1010 struct hci_dev
*hdev
= req
->hdev
;
1013 BT_DBG("%s", hdev
->name
);
1015 if (!hdev_is_powered(hdev
))
1018 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1021 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1024 cod
[0] = hdev
->minor_class
;
1025 cod
[1] = hdev
->major_class
;
1026 cod
[2] = get_service_classes(hdev
);
1028 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
1031 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1034 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1037 static bool get_connectable(struct hci_dev
*hdev
)
1039 struct pending_cmd
*cmd
;
1041 /* If there's a pending mgmt command the flag will not yet have
1042 * it's final value, so check for this first.
1044 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1046 struct mgmt_mode
*cp
= cmd
->param
;
1050 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1053 static void disable_advertising(struct hci_request
*req
)
1057 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1060 static void enable_advertising(struct hci_request
*req
)
1062 struct hci_dev
*hdev
= req
->hdev
;
1063 struct hci_cp_le_set_adv_param cp
;
1064 u8 own_addr_type
, enable
= 0x01;
1067 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1070 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1071 disable_advertising(req
);
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1078 clear_bit(HCI_LE_ADV
, &hdev
->dev_flags
);
1080 connectable
= get_connectable(hdev
);
1082 /* Set require_privacy to true only when non-connectable
1083 * advertising is used. In that case it is fine to use a
1084 * non-resolvable private address.
1086 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1089 memset(&cp
, 0, sizeof(cp
));
1090 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1091 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1092 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
1093 cp
.own_address_type
= own_addr_type
;
1094 cp
.channel_map
= hdev
->le_adv_channel_map
;
1096 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1098 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1101 static void service_cache_off(struct work_struct
*work
)
1103 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1104 service_cache
.work
);
1105 struct hci_request req
;
1107 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1110 hci_req_init(&req
, hdev
);
1117 hci_dev_unlock(hdev
);
1119 hci_req_run(&req
, NULL
);
1122 static void rpa_expired(struct work_struct
*work
)
1124 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1126 struct hci_request req
;
1130 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
1132 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1135 /* The generation of a new RPA and programming it into the
1136 * controller happens in the enable_advertising() function.
1138 hci_req_init(&req
, hdev
);
1139 enable_advertising(&req
);
1140 hci_req_run(&req
, NULL
);
1143 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1145 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
1148 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1149 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1151 /* Non-mgmt controlled devices get this bit set
1152 * implicitly so that pairing works for them, however
1153 * for mgmt we require user-space to explicitly enable
1156 clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1159 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1160 void *data
, u16 data_len
)
1162 struct mgmt_rp_read_info rp
;
1164 BT_DBG("sock %p %s", sk
, hdev
->name
);
1168 memset(&rp
, 0, sizeof(rp
));
1170 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1172 rp
.version
= hdev
->hci_ver
;
1173 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1175 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1176 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1178 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1180 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1181 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1183 hci_dev_unlock(hdev
);
1185 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1189 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1196 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1197 struct hci_dev
*hdev
, void *data
,
1200 struct pending_cmd
*cmd
;
1202 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1206 cmd
->opcode
= opcode
;
1207 cmd
->index
= hdev
->id
;
1209 cmd
->param
= kmemdup(data
, len
, GFP_KERNEL
);
1215 cmd
->param_len
= len
;
1220 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1225 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1226 void (*cb
)(struct pending_cmd
*cmd
,
1230 struct pending_cmd
*cmd
, *tmp
;
1232 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1233 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1240 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1242 list_del(&cmd
->list
);
1243 mgmt_pending_free(cmd
);
1246 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1248 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1250 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1254 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1256 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1258 if (hci_conn_count(hdev
) == 0) {
1259 cancel_delayed_work(&hdev
->power_off
);
1260 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1264 static bool hci_stop_discovery(struct hci_request
*req
)
1266 struct hci_dev
*hdev
= req
->hdev
;
1267 struct hci_cp_remote_name_req_cancel cp
;
1268 struct inquiry_entry
*e
;
1270 switch (hdev
->discovery
.state
) {
1271 case DISCOVERY_FINDING
:
1272 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1273 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1275 cancel_delayed_work(&hdev
->le_scan_disable
);
1276 hci_req_add_le_scan_disable(req
);
1281 case DISCOVERY_RESOLVING
:
1282 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1287 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1288 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
)) {
1296 hci_req_add_le_scan_disable(req
);
1306 static int clean_up_hci_state(struct hci_dev
*hdev
)
1308 struct hci_request req
;
1309 struct hci_conn
*conn
;
1310 bool discov_stopped
;
1313 hci_req_init(&req
, hdev
);
1315 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1316 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1318 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1321 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
1322 disable_advertising(&req
);
1324 discov_stopped
= hci_stop_discovery(&req
);
1326 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1327 struct hci_cp_disconnect dc
;
1328 struct hci_cp_reject_conn_req rej
;
1330 switch (conn
->state
) {
1333 dc
.handle
= cpu_to_le16(conn
->handle
);
1334 dc
.reason
= 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1338 if (conn
->type
== LE_LINK
)
1339 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1341 else if (conn
->type
== ACL_LINK
)
1342 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1346 bacpy(&rej
.bdaddr
, &conn
->dst
);
1347 rej
.reason
= 0x15; /* Terminated due to Power Off */
1348 if (conn
->type
== ACL_LINK
)
1349 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1351 else if (conn
->type
== SCO_LINK
)
1352 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1358 err
= hci_req_run(&req
, clean_up_hci_complete
);
1359 if (!err
&& discov_stopped
)
1360 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1365 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1368 struct mgmt_mode
*cp
= data
;
1369 struct pending_cmd
*cmd
;
1372 BT_DBG("request for %s", hdev
->name
);
1374 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1375 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1376 MGMT_STATUS_INVALID_PARAMS
);
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1381 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1386 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1387 cancel_delayed_work(&hdev
->power_off
);
1390 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1392 err
= mgmt_powered(hdev
, 1);
1397 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1398 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1402 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1409 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1412 /* Disconnect connections, stop scans, etc */
1413 err
= clean_up_hci_state(hdev
);
1415 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1416 HCI_POWER_OFF_TIMEOUT
);
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err
== -ENODATA
) {
1420 cancel_delayed_work(&hdev
->power_off
);
1421 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1427 hci_dev_unlock(hdev
);
1431 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1435 ev
= cpu_to_le32(get_current_settings(hdev
));
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1440 int mgmt_new_settings(struct hci_dev
*hdev
)
1442 return new_settings(hdev
, NULL
);
1447 struct hci_dev
*hdev
;
1451 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1453 struct cmd_lookup
*match
= data
;
1455 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1457 list_del(&cmd
->list
);
1459 if (match
->sk
== NULL
) {
1460 match
->sk
= cmd
->sk
;
1461 sock_hold(match
->sk
);
1464 mgmt_pending_free(cmd
);
1467 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1471 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1472 mgmt_pending_remove(cmd
);
1475 static void cmd_complete_rsp(struct pending_cmd
*cmd
, void *data
)
1477 if (cmd
->cmd_complete
) {
1480 cmd
->cmd_complete(cmd
, *status
);
1481 mgmt_pending_remove(cmd
);
1486 cmd_status_rsp(cmd
, data
);
1489 static void generic_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
1491 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
,
1495 static void addr_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
1497 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
,
1498 sizeof(struct mgmt_addr_info
));
1501 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1503 if (!lmp_bredr_capable(hdev
))
1504 return MGMT_STATUS_NOT_SUPPORTED
;
1505 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1506 return MGMT_STATUS_REJECTED
;
1508 return MGMT_STATUS_SUCCESS
;
1511 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1513 if (!lmp_le_capable(hdev
))
1514 return MGMT_STATUS_NOT_SUPPORTED
;
1515 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1516 return MGMT_STATUS_REJECTED
;
1518 return MGMT_STATUS_SUCCESS
;
1521 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1523 struct pending_cmd
*cmd
;
1524 struct mgmt_mode
*cp
;
1525 struct hci_request req
;
1528 BT_DBG("status 0x%02x", status
);
1532 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1537 u8 mgmt_err
= mgmt_status(status
);
1538 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1539 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1545 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1548 if (hdev
->discov_timeout
> 0) {
1549 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1550 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1554 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1558 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1561 new_settings(hdev
, cmd
->sk
);
1563 /* When the discoverable mode gets changed, make sure
1564 * that class of device has the limited discoverable
1565 * bit correctly set. Also update page scan based on whitelist
1568 hci_req_init(&req
, hdev
);
1569 hci_update_page_scan(hdev
, &req
);
1571 hci_req_run(&req
, NULL
);
1574 mgmt_pending_remove(cmd
);
1577 hci_dev_unlock(hdev
);
1580 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1583 struct mgmt_cp_set_discoverable
*cp
= data
;
1584 struct pending_cmd
*cmd
;
1585 struct hci_request req
;
1590 BT_DBG("request for %s", hdev
->name
);
1592 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1593 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1594 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1595 MGMT_STATUS_REJECTED
);
1597 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1598 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1599 MGMT_STATUS_INVALID_PARAMS
);
1601 timeout
= __le16_to_cpu(cp
->timeout
);
1603 /* Disabling discoverable requires that no timeout is set,
1604 * and enabling limited discoverable requires a timeout.
1606 if ((cp
->val
== 0x00 && timeout
> 0) ||
1607 (cp
->val
== 0x02 && timeout
== 0))
1608 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1609 MGMT_STATUS_INVALID_PARAMS
);
1613 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1614 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1615 MGMT_STATUS_NOT_POWERED
);
1619 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1620 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1621 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1626 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1627 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1628 MGMT_STATUS_REJECTED
);
1632 if (!hdev_is_powered(hdev
)) {
1633 bool changed
= false;
1635 /* Setting limited discoverable when powered off is
1636 * not a valid operation since it requires a timeout
1637 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1639 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1640 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1644 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1649 err
= new_settings(hdev
, sk
);
1654 /* If the current mode is the same, then just update the timeout
1655 * value with the new value. And if only the timeout gets updated,
1656 * then no need for any HCI transactions.
1658 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1659 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1660 &hdev
->dev_flags
)) {
1661 cancel_delayed_work(&hdev
->discov_off
);
1662 hdev
->discov_timeout
= timeout
;
1664 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1665 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1666 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1670 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1674 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1680 /* Cancel any potential discoverable timeout that might be
1681 * still active and store new timeout value. The arming of
1682 * the timeout happens in the complete handler.
1684 cancel_delayed_work(&hdev
->discov_off
);
1685 hdev
->discov_timeout
= timeout
;
1687 /* Limited discoverable mode */
1688 if (cp
->val
== 0x02)
1689 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1691 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1693 hci_req_init(&req
, hdev
);
1695 /* The procedure for LE-only controllers is much simpler - just
1696 * update the advertising data.
1698 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1704 struct hci_cp_write_current_iac_lap hci_cp
;
1706 if (cp
->val
== 0x02) {
1707 /* Limited discoverable mode */
1708 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1709 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1710 hci_cp
.iac_lap
[1] = 0x8b;
1711 hci_cp
.iac_lap
[2] = 0x9e;
1712 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1713 hci_cp
.iac_lap
[4] = 0x8b;
1714 hci_cp
.iac_lap
[5] = 0x9e;
1716 /* General discoverable mode */
1718 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1719 hci_cp
.iac_lap
[1] = 0x8b;
1720 hci_cp
.iac_lap
[2] = 0x9e;
1723 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1724 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1726 scan
|= SCAN_INQUIRY
;
1728 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1731 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1734 update_adv_data(&req
);
1736 err
= hci_req_run(&req
, set_discoverable_complete
);
1738 mgmt_pending_remove(cmd
);
1741 hci_dev_unlock(hdev
);
1745 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1747 struct hci_dev
*hdev
= req
->hdev
;
1748 struct hci_cp_write_page_scan_activity acp
;
1751 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1754 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1758 type
= PAGE_SCAN_TYPE_INTERLACED
;
1760 /* 160 msec page scan interval */
1761 acp
.interval
= cpu_to_le16(0x0100);
1763 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1765 /* default 1.28 sec page scan */
1766 acp
.interval
= cpu_to_le16(0x0800);
1769 acp
.window
= cpu_to_le16(0x0012);
1771 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1772 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1773 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1776 if (hdev
->page_scan_type
!= type
)
1777 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1780 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1782 struct pending_cmd
*cmd
;
1783 struct mgmt_mode
*cp
;
1784 bool conn_changed
, discov_changed
;
1786 BT_DBG("status 0x%02x", status
);
1790 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1795 u8 mgmt_err
= mgmt_status(status
);
1796 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1802 conn_changed
= !test_and_set_bit(HCI_CONNECTABLE
,
1804 discov_changed
= false;
1806 conn_changed
= test_and_clear_bit(HCI_CONNECTABLE
,
1808 discov_changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1812 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1814 if (conn_changed
|| discov_changed
) {
1815 new_settings(hdev
, cmd
->sk
);
1816 hci_update_page_scan(hdev
, NULL
);
1818 mgmt_update_adv_data(hdev
);
1819 hci_update_background_scan(hdev
);
1823 mgmt_pending_remove(cmd
);
1826 hci_dev_unlock(hdev
);
1829 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1830 struct sock
*sk
, u8 val
)
1832 bool changed
= false;
1835 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1839 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1841 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1842 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1845 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1850 hci_update_page_scan(hdev
, NULL
);
1851 hci_update_background_scan(hdev
);
1852 return new_settings(hdev
, sk
);
1858 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1861 struct mgmt_mode
*cp
= data
;
1862 struct pending_cmd
*cmd
;
1863 struct hci_request req
;
1867 BT_DBG("request for %s", hdev
->name
);
1869 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1870 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1871 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1872 MGMT_STATUS_REJECTED
);
1874 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1875 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1876 MGMT_STATUS_INVALID_PARAMS
);
1880 if (!hdev_is_powered(hdev
)) {
1881 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1885 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1886 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1887 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1892 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1898 hci_req_init(&req
, hdev
);
1900 /* If BR/EDR is not enabled and we disable advertising as a
1901 * by-product of disabling connectable, we need to update the
1902 * advertising flags.
1904 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1906 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1907 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1909 update_adv_data(&req
);
1910 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1914 /* If we don't have any whitelist entries just
1915 * disable all scanning. If there are entries
1916 * and we had both page and inquiry scanning
1917 * enabled then fall back to only page scanning.
1918 * Otherwise no changes are needed.
1920 if (list_empty(&hdev
->whitelist
))
1921 scan
= SCAN_DISABLED
;
1922 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
1925 goto no_scan_update
;
1927 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1928 hdev
->discov_timeout
> 0)
1929 cancel_delayed_work(&hdev
->discov_off
);
1932 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1936 /* If we're going from non-connectable to connectable or
1937 * vice-versa when fast connectable is enabled ensure that fast
1938 * connectable gets disabled. write_fast_connectable won't do
1939 * anything if the page scan parameters are already what they
1942 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1943 write_fast_connectable(&req
, false);
1945 /* Update the advertising parameters if necessary */
1946 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1947 enable_advertising(&req
);
1949 err
= hci_req_run(&req
, set_connectable_complete
);
1951 mgmt_pending_remove(cmd
);
1952 if (err
== -ENODATA
)
1953 err
= set_connectable_update_settings(hdev
, sk
,
1959 hci_dev_unlock(hdev
);
1963 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1966 struct mgmt_mode
*cp
= data
;
1970 BT_DBG("request for %s", hdev
->name
);
1972 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1973 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1974 MGMT_STATUS_INVALID_PARAMS
);
1979 changed
= !test_and_set_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1981 changed
= test_and_clear_bit(HCI_BONDABLE
, &hdev
->dev_flags
);
1983 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1988 err
= new_settings(hdev
, sk
);
1991 hci_dev_unlock(hdev
);
1995 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1998 struct mgmt_mode
*cp
= data
;
1999 struct pending_cmd
*cmd
;
2003 BT_DBG("request for %s", hdev
->name
);
2005 status
= mgmt_bredr_support(hdev
);
2007 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2010 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2011 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2012 MGMT_STATUS_INVALID_PARAMS
);
2016 if (!hdev_is_powered(hdev
)) {
2017 bool changed
= false;
2019 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
2020 &hdev
->dev_flags
)) {
2021 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
2025 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2030 err
= new_settings(hdev
, sk
);
2035 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2036 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2043 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2044 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2048 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2054 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2056 mgmt_pending_remove(cmd
);
2061 hci_dev_unlock(hdev
);
2065 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2067 struct mgmt_mode
*cp
= data
;
2068 struct pending_cmd
*cmd
;
2072 BT_DBG("request for %s", hdev
->name
);
2074 status
= mgmt_bredr_support(hdev
);
2076 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2078 if (!lmp_ssp_capable(hdev
))
2079 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2080 MGMT_STATUS_NOT_SUPPORTED
);
2082 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2083 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2084 MGMT_STATUS_INVALID_PARAMS
);
2088 if (!hdev_is_powered(hdev
)) {
2092 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
2095 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
2098 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
2101 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2104 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2109 err
= new_settings(hdev
, sk
);
2114 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
2115 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
2116 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2121 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
2122 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2126 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2132 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
2133 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2134 sizeof(cp
->val
), &cp
->val
);
2136 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2138 mgmt_pending_remove(cmd
);
2143 hci_dev_unlock(hdev
);
2147 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2149 struct mgmt_mode
*cp
= data
;
2154 BT_DBG("request for %s", hdev
->name
);
2156 status
= mgmt_bredr_support(hdev
);
2158 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2160 if (!lmp_ssp_capable(hdev
))
2161 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2162 MGMT_STATUS_NOT_SUPPORTED
);
2164 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
2165 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2166 MGMT_STATUS_REJECTED
);
2168 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2169 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2170 MGMT_STATUS_INVALID_PARAMS
);
2175 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2177 if (hdev_is_powered(hdev
)) {
2178 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2179 MGMT_STATUS_REJECTED
);
2183 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2186 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2191 err
= new_settings(hdev
, sk
);
2194 hci_dev_unlock(hdev
);
2198 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
2200 struct cmd_lookup match
= { NULL
, hdev
};
2203 u8 mgmt_err
= mgmt_status(status
);
2205 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2210 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2212 new_settings(hdev
, match
.sk
);
2217 /* Make sure the controller has a good default for
2218 * advertising data. Restrict the update to when LE
2219 * has actually been enabled. During power on, the
2220 * update in powered_update_hci will take care of it.
2222 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2223 struct hci_request req
;
2227 hci_req_init(&req
, hdev
);
2228 update_adv_data(&req
);
2229 update_scan_rsp_data(&req
);
2230 hci_req_run(&req
, NULL
);
2232 hci_update_background_scan(hdev
);
2234 hci_dev_unlock(hdev
);
2238 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2240 struct mgmt_mode
*cp
= data
;
2241 struct hci_cp_write_le_host_supported hci_cp
;
2242 struct pending_cmd
*cmd
;
2243 struct hci_request req
;
2247 BT_DBG("request for %s", hdev
->name
);
2249 if (!lmp_le_capable(hdev
))
2250 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2251 MGMT_STATUS_NOT_SUPPORTED
);
2253 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2254 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2255 MGMT_STATUS_INVALID_PARAMS
);
2257 /* LE-only devices do not allow toggling LE on/off */
2258 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2259 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2260 MGMT_STATUS_REJECTED
);
2265 enabled
= lmp_host_le_capable(hdev
);
2267 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2268 bool changed
= false;
2270 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2271 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2275 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2276 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2280 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2285 err
= new_settings(hdev
, sk
);
2290 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2291 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2292 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2297 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2303 hci_req_init(&req
, hdev
);
2305 memset(&hci_cp
, 0, sizeof(hci_cp
));
2309 hci_cp
.simul
= 0x00;
2311 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
2312 disable_advertising(&req
);
2315 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2318 err
= hci_req_run(&req
, le_enable_complete
);
2320 mgmt_pending_remove(cmd
);
2323 hci_dev_unlock(hdev
);
2327 /* This is a helper function to test for pending mgmt commands that can
2328 * cause CoD or EIR HCI commands. We can only allow one such pending
2329 * mgmt command at a time since otherwise we cannot easily track what
2330 * the current values are, will be, and based on that calculate if a new
2331 * HCI command needs to be sent and if yes with what value.
2333 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2335 struct pending_cmd
*cmd
;
2337 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2338 switch (cmd
->opcode
) {
2339 case MGMT_OP_ADD_UUID
:
2340 case MGMT_OP_REMOVE_UUID
:
2341 case MGMT_OP_SET_DEV_CLASS
:
2342 case MGMT_OP_SET_POWERED
:
2350 static const u8 bluetooth_base_uuid
[] = {
2351 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2352 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2355 static u8
get_uuid_size(const u8
*uuid
)
2359 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2362 val
= get_unaligned_le32(&uuid
[12]);
2369 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2371 struct pending_cmd
*cmd
;
2375 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2379 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2380 hdev
->dev_class
, 3);
2382 mgmt_pending_remove(cmd
);
2385 hci_dev_unlock(hdev
);
2388 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2390 BT_DBG("status 0x%02x", status
);
2392 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2395 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2397 struct mgmt_cp_add_uuid
*cp
= data
;
2398 struct pending_cmd
*cmd
;
2399 struct hci_request req
;
2400 struct bt_uuid
*uuid
;
2403 BT_DBG("request for %s", hdev
->name
);
2407 if (pending_eir_or_class(hdev
)) {
2408 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2413 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2419 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2420 uuid
->svc_hint
= cp
->svc_hint
;
2421 uuid
->size
= get_uuid_size(cp
->uuid
);
2423 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2425 hci_req_init(&req
, hdev
);
2430 err
= hci_req_run(&req
, add_uuid_complete
);
2432 if (err
!= -ENODATA
)
2435 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2436 hdev
->dev_class
, 3);
2440 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2449 hci_dev_unlock(hdev
);
2453 static bool enable_service_cache(struct hci_dev
*hdev
)
2455 if (!hdev_is_powered(hdev
))
2458 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2459 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2467 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2469 BT_DBG("status 0x%02x", status
);
2471 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2474 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2477 struct mgmt_cp_remove_uuid
*cp
= data
;
2478 struct pending_cmd
*cmd
;
2479 struct bt_uuid
*match
, *tmp
;
2480 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2481 struct hci_request req
;
2484 BT_DBG("request for %s", hdev
->name
);
2488 if (pending_eir_or_class(hdev
)) {
2489 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2494 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2495 hci_uuids_clear(hdev
);
2497 if (enable_service_cache(hdev
)) {
2498 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2499 0, hdev
->dev_class
, 3);
2508 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2509 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2512 list_del(&match
->list
);
2518 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2519 MGMT_STATUS_INVALID_PARAMS
);
2524 hci_req_init(&req
, hdev
);
2529 err
= hci_req_run(&req
, remove_uuid_complete
);
2531 if (err
!= -ENODATA
)
2534 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2535 hdev
->dev_class
, 3);
2539 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2548 hci_dev_unlock(hdev
);
2552 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2554 BT_DBG("status 0x%02x", status
);
2556 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2559 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2562 struct mgmt_cp_set_dev_class
*cp
= data
;
2563 struct pending_cmd
*cmd
;
2564 struct hci_request req
;
2567 BT_DBG("request for %s", hdev
->name
);
2569 if (!lmp_bredr_capable(hdev
))
2570 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2571 MGMT_STATUS_NOT_SUPPORTED
);
2575 if (pending_eir_or_class(hdev
)) {
2576 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2581 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2582 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2583 MGMT_STATUS_INVALID_PARAMS
);
2587 hdev
->major_class
= cp
->major
;
2588 hdev
->minor_class
= cp
->minor
;
2590 if (!hdev_is_powered(hdev
)) {
2591 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2592 hdev
->dev_class
, 3);
2596 hci_req_init(&req
, hdev
);
2598 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2599 hci_dev_unlock(hdev
);
2600 cancel_delayed_work_sync(&hdev
->service_cache
);
2607 err
= hci_req_run(&req
, set_class_complete
);
2609 if (err
!= -ENODATA
)
2612 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2613 hdev
->dev_class
, 3);
2617 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2626 hci_dev_unlock(hdev
);
2630 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2633 struct mgmt_cp_load_link_keys
*cp
= data
;
2634 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2635 sizeof(struct mgmt_link_key_info
));
2636 u16 key_count
, expected_len
;
2640 BT_DBG("request for %s", hdev
->name
);
2642 if (!lmp_bredr_capable(hdev
))
2643 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2644 MGMT_STATUS_NOT_SUPPORTED
);
2646 key_count
= __le16_to_cpu(cp
->key_count
);
2647 if (key_count
> max_key_count
) {
2648 BT_ERR("load_link_keys: too big key_count value %u",
2650 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2651 MGMT_STATUS_INVALID_PARAMS
);
2654 expected_len
= sizeof(*cp
) + key_count
*
2655 sizeof(struct mgmt_link_key_info
);
2656 if (expected_len
!= len
) {
2657 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2659 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2660 MGMT_STATUS_INVALID_PARAMS
);
2663 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2664 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2665 MGMT_STATUS_INVALID_PARAMS
);
2667 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2670 for (i
= 0; i
< key_count
; i
++) {
2671 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2673 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2674 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2675 MGMT_STATUS_INVALID_PARAMS
);
2680 hci_link_keys_clear(hdev
);
2683 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2686 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2690 new_settings(hdev
, NULL
);
2692 for (i
= 0; i
< key_count
; i
++) {
2693 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2695 /* Always ignore debug keys and require a new pairing if
2696 * the user wants to use them.
2698 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2701 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2702 key
->type
, key
->pin_len
, NULL
);
2705 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2707 hci_dev_unlock(hdev
);
2712 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2713 u8 addr_type
, struct sock
*skip_sk
)
2715 struct mgmt_ev_device_unpaired ev
;
2717 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2718 ev
.addr
.type
= addr_type
;
2720 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2724 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2727 struct mgmt_cp_unpair_device
*cp
= data
;
2728 struct mgmt_rp_unpair_device rp
;
2729 struct hci_cp_disconnect dc
;
2730 struct pending_cmd
*cmd
;
2731 struct hci_conn
*conn
;
2734 memset(&rp
, 0, sizeof(rp
));
2735 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2736 rp
.addr
.type
= cp
->addr
.type
;
2738 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2739 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2740 MGMT_STATUS_INVALID_PARAMS
,
2743 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2744 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2745 MGMT_STATUS_INVALID_PARAMS
,
2750 if (!hdev_is_powered(hdev
)) {
2751 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2752 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2756 if (cp
->addr
.type
== BDADDR_BREDR
) {
2757 /* If disconnection is requested, then look up the
2758 * connection. If the remote device is connected, it
2759 * will be later used to terminate the link.
2761 * Setting it to NULL explicitly will cause no
2762 * termination of the link.
2765 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2770 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2774 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2777 /* Defer clearing up the connection parameters
2778 * until closing to give a chance of keeping
2779 * them if a repairing happens.
2781 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2783 /* If disconnection is not requested, then
2784 * clear the connection variable so that the
2785 * link is not terminated.
2787 if (!cp
->disconnect
)
2791 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2792 addr_type
= ADDR_LE_DEV_PUBLIC
;
2794 addr_type
= ADDR_LE_DEV_RANDOM
;
2796 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2798 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2802 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2803 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2807 /* If the connection variable is set, then termination of the
2808 * link is requested.
2811 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2813 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2817 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2824 cmd
->cmd_complete
= addr_cmd_complete
;
2826 dc
.handle
= cpu_to_le16(conn
->handle
);
2827 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2828 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2830 mgmt_pending_remove(cmd
);
2833 hci_dev_unlock(hdev
);
2837 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2840 struct mgmt_cp_disconnect
*cp
= data
;
2841 struct mgmt_rp_disconnect rp
;
2842 struct pending_cmd
*cmd
;
2843 struct hci_conn
*conn
;
2848 memset(&rp
, 0, sizeof(rp
));
2849 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2850 rp
.addr
.type
= cp
->addr
.type
;
2852 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2853 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2854 MGMT_STATUS_INVALID_PARAMS
,
2859 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2860 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2861 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2865 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2866 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2867 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2871 if (cp
->addr
.type
== BDADDR_BREDR
)
2872 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2875 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2877 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2878 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2879 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2883 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2889 cmd
->cmd_complete
= generic_cmd_complete
;
2891 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2893 mgmt_pending_remove(cmd
);
2896 hci_dev_unlock(hdev
);
2900 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2902 switch (link_type
) {
2904 switch (addr_type
) {
2905 case ADDR_LE_DEV_PUBLIC
:
2906 return BDADDR_LE_PUBLIC
;
2909 /* Fallback to LE Random address type */
2910 return BDADDR_LE_RANDOM
;
2914 /* Fallback to BR/EDR type */
2915 return BDADDR_BREDR
;
2919 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2922 struct mgmt_rp_get_connections
*rp
;
2932 if (!hdev_is_powered(hdev
)) {
2933 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2934 MGMT_STATUS_NOT_POWERED
);
2939 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2940 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2944 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2945 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2952 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2953 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2955 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2956 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2957 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2962 rp
->conn_count
= cpu_to_le16(i
);
2964 /* Recalculate length in case of filtered SCO connections, etc */
2965 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2967 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2973 hci_dev_unlock(hdev
);
2977 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2978 struct mgmt_cp_pin_code_neg_reply
*cp
)
2980 struct pending_cmd
*cmd
;
2983 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2988 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2989 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2991 mgmt_pending_remove(cmd
);
2996 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2999 struct hci_conn
*conn
;
3000 struct mgmt_cp_pin_code_reply
*cp
= data
;
3001 struct hci_cp_pin_code_reply reply
;
3002 struct pending_cmd
*cmd
;
3009 if (!hdev_is_powered(hdev
)) {
3010 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3011 MGMT_STATUS_NOT_POWERED
);
3015 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3017 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3018 MGMT_STATUS_NOT_CONNECTED
);
3022 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3023 struct mgmt_cp_pin_code_neg_reply ncp
;
3025 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3027 BT_ERR("PIN code is not 16 bytes long");
3029 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3031 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3032 MGMT_STATUS_INVALID_PARAMS
);
3037 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3043 cmd
->cmd_complete
= addr_cmd_complete
;
3045 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3046 reply
.pin_len
= cp
->pin_len
;
3047 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3049 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3051 mgmt_pending_remove(cmd
);
3054 hci_dev_unlock(hdev
);
3058 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3061 struct mgmt_cp_set_io_capability
*cp
= data
;
3065 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3066 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3067 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3071 hdev
->io_capability
= cp
->io_capability
;
3073 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3074 hdev
->io_capability
);
3076 hci_dev_unlock(hdev
);
3078 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
3082 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
3084 struct hci_dev
*hdev
= conn
->hdev
;
3085 struct pending_cmd
*cmd
;
3087 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3088 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3091 if (cmd
->user_data
!= conn
)
3100 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
3102 struct mgmt_rp_pair_device rp
;
3103 struct hci_conn
*conn
= cmd
->user_data
;
3105 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3106 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3108 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
3111 /* So we don't get further callbacks for this connection */
3112 conn
->connect_cfm_cb
= NULL
;
3113 conn
->security_cfm_cb
= NULL
;
3114 conn
->disconn_cfm_cb
= NULL
;
3116 hci_conn_drop(conn
);
3119 mgmt_pending_remove(cmd
);
3121 /* The device is paired so there is no need to remove
3122 * its connection parameters anymore.
3124 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3127 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3129 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3130 struct pending_cmd
*cmd
;
3132 cmd
= find_pairing(conn
);
3134 cmd
->cmd_complete(cmd
, status
);
3137 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3139 struct pending_cmd
*cmd
;
3141 BT_DBG("status %u", status
);
3143 cmd
= find_pairing(conn
);
3145 BT_DBG("Unable to find a pending command");
3147 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3150 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3152 struct pending_cmd
*cmd
;
3154 BT_DBG("status %u", status
);
3159 cmd
= find_pairing(conn
);
3161 BT_DBG("Unable to find a pending command");
3163 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3166 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3169 struct mgmt_cp_pair_device
*cp
= data
;
3170 struct mgmt_rp_pair_device rp
;
3171 struct pending_cmd
*cmd
;
3172 u8 sec_level
, auth_type
;
3173 struct hci_conn
*conn
;
3178 memset(&rp
, 0, sizeof(rp
));
3179 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3180 rp
.addr
.type
= cp
->addr
.type
;
3182 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3183 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3184 MGMT_STATUS_INVALID_PARAMS
,
3187 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3188 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3189 MGMT_STATUS_INVALID_PARAMS
,
3194 if (!hdev_is_powered(hdev
)) {
3195 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3196 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
3200 sec_level
= BT_SECURITY_MEDIUM
;
3201 auth_type
= HCI_AT_DEDICATED_BONDING
;
3203 if (cp
->addr
.type
== BDADDR_BREDR
) {
3204 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3209 /* Convert from L2CAP channel address type to HCI address type
3211 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3212 addr_type
= ADDR_LE_DEV_PUBLIC
;
3214 addr_type
= ADDR_LE_DEV_RANDOM
;
3216 /* When pairing a new device, it is expected to remember
3217 * this device for future connections. Adding the connection
3218 * parameter information ahead of time allows tracking
3219 * of the slave preferred values and will speed up any
3220 * further connection establishment.
3222 * If connection parameters already exist, then they
3223 * will be kept and this function does nothing.
3225 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3227 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3228 sec_level
, HCI_LE_CONN_TIMEOUT
,
3235 if (PTR_ERR(conn
) == -EBUSY
)
3236 status
= MGMT_STATUS_BUSY
;
3238 status
= MGMT_STATUS_CONNECT_FAILED
;
3240 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3246 if (conn
->connect_cfm_cb
) {
3247 hci_conn_drop(conn
);
3248 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3249 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3253 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3256 hci_conn_drop(conn
);
3260 cmd
->cmd_complete
= pairing_complete
;
3262 /* For LE, just connecting isn't a proof that the pairing finished */
3263 if (cp
->addr
.type
== BDADDR_BREDR
) {
3264 conn
->connect_cfm_cb
= pairing_complete_cb
;
3265 conn
->security_cfm_cb
= pairing_complete_cb
;
3266 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3268 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3269 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3270 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3273 conn
->io_capability
= cp
->io_cap
;
3274 cmd
->user_data
= hci_conn_get(conn
);
3276 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3277 hci_conn_security(conn
, sec_level
, auth_type
, true))
3278 pairing_complete(cmd
, 0);
3283 hci_dev_unlock(hdev
);
3287 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3290 struct mgmt_addr_info
*addr
= data
;
3291 struct pending_cmd
*cmd
;
3292 struct hci_conn
*conn
;
3299 if (!hdev_is_powered(hdev
)) {
3300 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3301 MGMT_STATUS_NOT_POWERED
);
3305 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3307 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3308 MGMT_STATUS_INVALID_PARAMS
);
3312 conn
= cmd
->user_data
;
3314 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3315 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3316 MGMT_STATUS_INVALID_PARAMS
);
3320 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
3322 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3323 addr
, sizeof(*addr
));
3325 hci_dev_unlock(hdev
);
3329 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3330 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3331 u16 hci_op
, __le32 passkey
)
3333 struct pending_cmd
*cmd
;
3334 struct hci_conn
*conn
;
3339 if (!hdev_is_powered(hdev
)) {
3340 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3341 MGMT_STATUS_NOT_POWERED
, addr
,
3346 if (addr
->type
== BDADDR_BREDR
)
3347 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3349 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3352 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3353 MGMT_STATUS_NOT_CONNECTED
, addr
,
3358 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3359 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3361 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3362 MGMT_STATUS_SUCCESS
, addr
,
3365 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3366 MGMT_STATUS_FAILED
, addr
,
3372 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3378 cmd
->cmd_complete
= addr_cmd_complete
;
3380 /* Continue with pairing via HCI */
3381 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3382 struct hci_cp_user_passkey_reply cp
;
3384 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3385 cp
.passkey
= passkey
;
3386 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3388 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3392 mgmt_pending_remove(cmd
);
3395 hci_dev_unlock(hdev
);
3399 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3400 void *data
, u16 len
)
3402 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3406 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3407 MGMT_OP_PIN_CODE_NEG_REPLY
,
3408 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3411 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3414 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3418 if (len
!= sizeof(*cp
))
3419 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3420 MGMT_STATUS_INVALID_PARAMS
);
3422 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3423 MGMT_OP_USER_CONFIRM_REPLY
,
3424 HCI_OP_USER_CONFIRM_REPLY
, 0);
3427 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3428 void *data
, u16 len
)
3430 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3434 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3435 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3436 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3439 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3442 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3446 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3447 MGMT_OP_USER_PASSKEY_REPLY
,
3448 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3451 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3452 void *data
, u16 len
)
3454 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3458 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3459 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3460 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3463 static void update_name(struct hci_request
*req
)
3465 struct hci_dev
*hdev
= req
->hdev
;
3466 struct hci_cp_write_local_name cp
;
3468 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3470 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3473 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3475 struct mgmt_cp_set_local_name
*cp
;
3476 struct pending_cmd
*cmd
;
3478 BT_DBG("status 0x%02x", status
);
3482 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3489 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3490 mgmt_status(status
));
3492 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3495 mgmt_pending_remove(cmd
);
3498 hci_dev_unlock(hdev
);
3501 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3504 struct mgmt_cp_set_local_name
*cp
= data
;
3505 struct pending_cmd
*cmd
;
3506 struct hci_request req
;
3513 /* If the old values are the same as the new ones just return a
3514 * direct command complete event.
3516 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3517 !memcmp(hdev
->short_name
, cp
->short_name
,
3518 sizeof(hdev
->short_name
))) {
3519 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3524 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3526 if (!hdev_is_powered(hdev
)) {
3527 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3529 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3534 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3540 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3546 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3548 hci_req_init(&req
, hdev
);
3550 if (lmp_bredr_capable(hdev
)) {
3555 /* The name is stored in the scan response data and so
3556 * no need to udpate the advertising data here.
3558 if (lmp_le_capable(hdev
))
3559 update_scan_rsp_data(&req
);
3561 err
= hci_req_run(&req
, set_name_complete
);
3563 mgmt_pending_remove(cmd
);
3566 hci_dev_unlock(hdev
);
3570 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3571 void *data
, u16 data_len
)
3573 struct pending_cmd
*cmd
;
3576 BT_DBG("%s", hdev
->name
);
3580 if (!hdev_is_powered(hdev
)) {
3581 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3582 MGMT_STATUS_NOT_POWERED
);
3586 if (!lmp_ssp_capable(hdev
)) {
3587 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3588 MGMT_STATUS_NOT_SUPPORTED
);
3592 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3593 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3598 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3604 if (bredr_sc_enabled(hdev
))
3605 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3608 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3611 mgmt_pending_remove(cmd
);
3614 hci_dev_unlock(hdev
);
3618 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3619 void *data
, u16 len
)
3623 BT_DBG("%s ", hdev
->name
);
3627 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3628 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3631 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3632 err
= cmd_complete(sk
, hdev
->id
,
3633 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3634 MGMT_STATUS_INVALID_PARAMS
,
3635 &cp
->addr
, sizeof(cp
->addr
));
3639 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3640 cp
->addr
.type
, cp
->hash
,
3641 cp
->rand
, NULL
, NULL
);
3643 status
= MGMT_STATUS_FAILED
;
3645 status
= MGMT_STATUS_SUCCESS
;
3647 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3648 status
, &cp
->addr
, sizeof(cp
->addr
));
3649 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3650 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3651 u8
*rand192
, *hash192
;
3654 if (cp
->addr
.type
!= BDADDR_BREDR
) {
3655 err
= cmd_complete(sk
, hdev
->id
,
3656 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3657 MGMT_STATUS_INVALID_PARAMS
,
3658 &cp
->addr
, sizeof(cp
->addr
));
3662 if (bdaddr_type_is_le(cp
->addr
.type
)) {
3666 rand192
= cp
->rand192
;
3667 hash192
= cp
->hash192
;
3670 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3671 cp
->addr
.type
, hash192
, rand192
,
3672 cp
->hash256
, cp
->rand256
);
3674 status
= MGMT_STATUS_FAILED
;
3676 status
= MGMT_STATUS_SUCCESS
;
3678 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3679 status
, &cp
->addr
, sizeof(cp
->addr
));
3681 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3682 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3683 MGMT_STATUS_INVALID_PARAMS
);
3687 hci_dev_unlock(hdev
);
3691 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3692 void *data
, u16 len
)
3694 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3698 BT_DBG("%s", hdev
->name
);
3700 if (cp
->addr
.type
!= BDADDR_BREDR
)
3701 return cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3702 MGMT_STATUS_INVALID_PARAMS
,
3703 &cp
->addr
, sizeof(cp
->addr
));
3707 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
3708 hci_remote_oob_data_clear(hdev
);
3709 status
= MGMT_STATUS_SUCCESS
;
3713 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3715 status
= MGMT_STATUS_INVALID_PARAMS
;
3717 status
= MGMT_STATUS_SUCCESS
;
3720 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3721 status
, &cp
->addr
, sizeof(cp
->addr
));
3723 hci_dev_unlock(hdev
);
3727 static bool trigger_discovery(struct hci_request
*req
, u8
*status
)
3729 struct hci_dev
*hdev
= req
->hdev
;
3730 struct hci_cp_le_set_scan_param param_cp
;
3731 struct hci_cp_le_set_scan_enable enable_cp
;
3732 struct hci_cp_inquiry inq_cp
;
3733 /* General inquiry access code (GIAC) */
3734 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3738 switch (hdev
->discovery
.type
) {
3739 case DISCOV_TYPE_BREDR
:
3740 *status
= mgmt_bredr_support(hdev
);
3744 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3745 *status
= MGMT_STATUS_BUSY
;
3749 hci_inquiry_cache_flush(hdev
);
3751 memset(&inq_cp
, 0, sizeof(inq_cp
));
3752 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3753 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3754 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3757 case DISCOV_TYPE_LE
:
3758 case DISCOV_TYPE_INTERLEAVED
:
3759 *status
= mgmt_le_support(hdev
);
3763 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3764 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3765 *status
= MGMT_STATUS_NOT_SUPPORTED
;
3769 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
)) {
3770 /* Don't let discovery abort an outgoing
3771 * connection attempt that's using directed
3774 if (hci_conn_hash_lookup_state(hdev
, LE_LINK
,
3776 *status
= MGMT_STATUS_REJECTED
;
3780 disable_advertising(req
);
3783 /* If controller is scanning, it means the background scanning
3784 * is running. Thus, we should temporarily stop it in order to
3785 * set the discovery scanning parameters.
3787 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3788 hci_req_add_le_scan_disable(req
);
3790 memset(¶m_cp
, 0, sizeof(param_cp
));
3792 /* All active scans will be done with either a resolvable
3793 * private address (when privacy feature has been enabled)
3794 * or unresolvable private address.
3796 err
= hci_update_random_address(req
, true, &own_addr_type
);
3798 *status
= MGMT_STATUS_FAILED
;
3802 param_cp
.type
= LE_SCAN_ACTIVE
;
3803 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3804 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3805 param_cp
.own_address_type
= own_addr_type
;
3806 hci_req_add(req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3809 memset(&enable_cp
, 0, sizeof(enable_cp
));
3810 enable_cp
.enable
= LE_SCAN_ENABLE
;
3811 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3812 hci_req_add(req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3817 *status
= MGMT_STATUS_INVALID_PARAMS
;
3824 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3826 struct pending_cmd
*cmd
;
3827 unsigned long timeout
;
3829 BT_DBG("status %d", status
);
3833 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3835 cmd
= mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
3838 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3839 mgmt_pending_remove(cmd
);
3843 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3847 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3849 switch (hdev
->discovery
.type
) {
3850 case DISCOV_TYPE_LE
:
3851 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3853 case DISCOV_TYPE_INTERLEAVED
:
3854 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3856 case DISCOV_TYPE_BREDR
:
3860 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3866 queue_delayed_work(hdev
->workqueue
,
3867 &hdev
->le_scan_disable
, timeout
);
3870 hci_dev_unlock(hdev
);
3873 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3874 void *data
, u16 len
)
3876 struct mgmt_cp_start_discovery
*cp
= data
;
3877 struct pending_cmd
*cmd
;
3878 struct hci_request req
;
3882 BT_DBG("%s", hdev
->name
);
3886 if (!hdev_is_powered(hdev
)) {
3887 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3888 MGMT_STATUS_NOT_POWERED
,
3889 &cp
->type
, sizeof(cp
->type
));
3893 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3894 test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3895 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3896 MGMT_STATUS_BUSY
, &cp
->type
,
3901 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
3907 cmd
->cmd_complete
= generic_cmd_complete
;
3909 /* Clear the discovery filter first to free any previously
3910 * allocated memory for the UUID list.
3912 hci_discovery_filter_clear(hdev
);
3914 hdev
->discovery
.type
= cp
->type
;
3915 hdev
->discovery
.report_invalid_rssi
= false;
3917 hci_req_init(&req
, hdev
);
3919 if (!trigger_discovery(&req
, &status
)) {
3920 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3921 status
, &cp
->type
, sizeof(cp
->type
));
3922 mgmt_pending_remove(cmd
);
3926 err
= hci_req_run(&req
, start_discovery_complete
);
3928 mgmt_pending_remove(cmd
);
3932 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3935 hci_dev_unlock(hdev
);
3939 static void service_discovery_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
3941 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, cmd
->param
, 1);
3944 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3945 void *data
, u16 len
)
3947 struct mgmt_cp_start_service_discovery
*cp
= data
;
3948 struct pending_cmd
*cmd
;
3949 struct hci_request req
;
3950 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
3951 u16 uuid_count
, expected_len
;
3955 BT_DBG("%s", hdev
->name
);
3959 if (!hdev_is_powered(hdev
)) {
3960 err
= cmd_complete(sk
, hdev
->id
,
3961 MGMT_OP_START_SERVICE_DISCOVERY
,
3962 MGMT_STATUS_NOT_POWERED
,
3963 &cp
->type
, sizeof(cp
->type
));
3967 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
3968 test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3969 err
= cmd_complete(sk
, hdev
->id
,
3970 MGMT_OP_START_SERVICE_DISCOVERY
,
3971 MGMT_STATUS_BUSY
, &cp
->type
,
3976 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
3977 if (uuid_count
> max_uuid_count
) {
3978 BT_ERR("service_discovery: too big uuid_count value %u",
3980 err
= cmd_complete(sk
, hdev
->id
,
3981 MGMT_OP_START_SERVICE_DISCOVERY
,
3982 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3987 expected_len
= sizeof(*cp
) + uuid_count
* 16;
3988 if (expected_len
!= len
) {
3989 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3991 err
= cmd_complete(sk
, hdev
->id
,
3992 MGMT_OP_START_SERVICE_DISCOVERY
,
3993 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
3998 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4005 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4007 /* Clear the discovery filter first to free any previously
4008 * allocated memory for the UUID list.
4010 hci_discovery_filter_clear(hdev
);
4012 hdev
->discovery
.type
= cp
->type
;
4013 hdev
->discovery
.rssi
= cp
->rssi
;
4014 hdev
->discovery
.uuid_count
= uuid_count
;
4016 if (uuid_count
> 0) {
4017 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4019 if (!hdev
->discovery
.uuids
) {
4020 err
= cmd_complete(sk
, hdev
->id
,
4021 MGMT_OP_START_SERVICE_DISCOVERY
,
4023 &cp
->type
, sizeof(cp
->type
));
4024 mgmt_pending_remove(cmd
);
4029 hci_req_init(&req
, hdev
);
4031 if (!trigger_discovery(&req
, &status
)) {
4032 err
= cmd_complete(sk
, hdev
->id
,
4033 MGMT_OP_START_SERVICE_DISCOVERY
,
4034 status
, &cp
->type
, sizeof(cp
->type
));
4035 mgmt_pending_remove(cmd
);
4039 err
= hci_req_run(&req
, start_discovery_complete
);
4041 mgmt_pending_remove(cmd
);
4045 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4048 hci_dev_unlock(hdev
);
4052 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
4054 struct pending_cmd
*cmd
;
4056 BT_DBG("status %d", status
);
4060 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4062 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4063 mgmt_pending_remove(cmd
);
4067 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4069 hci_dev_unlock(hdev
);
4072 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4075 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4076 struct pending_cmd
*cmd
;
4077 struct hci_request req
;
4080 BT_DBG("%s", hdev
->name
);
4084 if (!hci_discovery_active(hdev
)) {
4085 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4086 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4087 sizeof(mgmt_cp
->type
));
4091 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4092 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4093 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
4094 sizeof(mgmt_cp
->type
));
4098 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4104 cmd
->cmd_complete
= generic_cmd_complete
;
4106 hci_req_init(&req
, hdev
);
4108 hci_stop_discovery(&req
);
4110 err
= hci_req_run(&req
, stop_discovery_complete
);
4112 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4116 mgmt_pending_remove(cmd
);
4118 /* If no HCI commands were sent we're done */
4119 if (err
== -ENODATA
) {
4120 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
4121 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4122 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
4126 hci_dev_unlock(hdev
);
4130 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4133 struct mgmt_cp_confirm_name
*cp
= data
;
4134 struct inquiry_entry
*e
;
4137 BT_DBG("%s", hdev
->name
);
4141 if (!hci_discovery_active(hdev
)) {
4142 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4143 MGMT_STATUS_FAILED
, &cp
->addr
,
4148 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4150 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4151 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4156 if (cp
->name_known
) {
4157 e
->name_state
= NAME_KNOWN
;
4160 e
->name_state
= NAME_NEEDED
;
4161 hci_inquiry_cache_update_resolve(hdev
, e
);
4164 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
4168 hci_dev_unlock(hdev
);
4172 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4175 struct mgmt_cp_block_device
*cp
= data
;
4179 BT_DBG("%s", hdev
->name
);
4181 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4182 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4183 MGMT_STATUS_INVALID_PARAMS
,
4184 &cp
->addr
, sizeof(cp
->addr
));
4188 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4191 status
= MGMT_STATUS_FAILED
;
4195 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4197 status
= MGMT_STATUS_SUCCESS
;
4200 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4201 &cp
->addr
, sizeof(cp
->addr
));
4203 hci_dev_unlock(hdev
);
4208 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4211 struct mgmt_cp_unblock_device
*cp
= data
;
4215 BT_DBG("%s", hdev
->name
);
4217 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4218 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4219 MGMT_STATUS_INVALID_PARAMS
,
4220 &cp
->addr
, sizeof(cp
->addr
));
4224 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4227 status
= MGMT_STATUS_INVALID_PARAMS
;
4231 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4233 status
= MGMT_STATUS_SUCCESS
;
4236 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4237 &cp
->addr
, sizeof(cp
->addr
));
4239 hci_dev_unlock(hdev
);
4244 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4247 struct mgmt_cp_set_device_id
*cp
= data
;
4248 struct hci_request req
;
4252 BT_DBG("%s", hdev
->name
);
4254 source
= __le16_to_cpu(cp
->source
);
4256 if (source
> 0x0002)
4257 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4258 MGMT_STATUS_INVALID_PARAMS
);
4262 hdev
->devid_source
= source
;
4263 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4264 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4265 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4267 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
4269 hci_req_init(&req
, hdev
);
4271 hci_req_run(&req
, NULL
);
4273 hci_dev_unlock(hdev
);
4278 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
4280 struct cmd_lookup match
= { NULL
, hdev
};
4283 u8 mgmt_err
= mgmt_status(status
);
4285 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4286 cmd_status_rsp
, &mgmt_err
);
4290 if (test_bit(HCI_LE_ADV
, &hdev
->dev_flags
))
4291 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4293 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4295 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4298 new_settings(hdev
, match
.sk
);
4304 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4307 struct mgmt_mode
*cp
= data
;
4308 struct pending_cmd
*cmd
;
4309 struct hci_request req
;
4310 u8 val
, enabled
, status
;
4313 BT_DBG("request for %s", hdev
->name
);
4315 status
= mgmt_le_support(hdev
);
4317 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4320 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4321 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4322 MGMT_STATUS_INVALID_PARAMS
);
4327 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4329 /* The following conditions are ones which mean that we should
4330 * not do any HCI communication but directly send a mgmt
4331 * response to user space (after toggling the flag if
4334 if (!hdev_is_powered(hdev
) || val
== enabled
||
4335 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4336 (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4337 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4338 bool changed
= false;
4340 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
4341 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4345 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4350 err
= new_settings(hdev
, sk
);
4355 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4356 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
4357 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4362 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4368 hci_req_init(&req
, hdev
);
4371 enable_advertising(&req
);
4373 disable_advertising(&req
);
4375 err
= hci_req_run(&req
, set_advertising_complete
);
4377 mgmt_pending_remove(cmd
);
4380 hci_dev_unlock(hdev
);
4384 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4385 void *data
, u16 len
)
4387 struct mgmt_cp_set_static_address
*cp
= data
;
4390 BT_DBG("%s", hdev
->name
);
4392 if (!lmp_le_capable(hdev
))
4393 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4394 MGMT_STATUS_NOT_SUPPORTED
);
4396 if (hdev_is_powered(hdev
))
4397 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4398 MGMT_STATUS_REJECTED
);
4400 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4401 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4402 return cmd_status(sk
, hdev
->id
,
4403 MGMT_OP_SET_STATIC_ADDRESS
,
4404 MGMT_STATUS_INVALID_PARAMS
);
4406 /* Two most significant bits shall be set */
4407 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4408 return cmd_status(sk
, hdev
->id
,
4409 MGMT_OP_SET_STATIC_ADDRESS
,
4410 MGMT_STATUS_INVALID_PARAMS
);
4415 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4417 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
4419 hci_dev_unlock(hdev
);
4424 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4425 void *data
, u16 len
)
4427 struct mgmt_cp_set_scan_params
*cp
= data
;
4428 __u16 interval
, window
;
4431 BT_DBG("%s", hdev
->name
);
4433 if (!lmp_le_capable(hdev
))
4434 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4435 MGMT_STATUS_NOT_SUPPORTED
);
4437 interval
= __le16_to_cpu(cp
->interval
);
4439 if (interval
< 0x0004 || interval
> 0x4000)
4440 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4441 MGMT_STATUS_INVALID_PARAMS
);
4443 window
= __le16_to_cpu(cp
->window
);
4445 if (window
< 0x0004 || window
> 0x4000)
4446 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4447 MGMT_STATUS_INVALID_PARAMS
);
4449 if (window
> interval
)
4450 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4451 MGMT_STATUS_INVALID_PARAMS
);
4455 hdev
->le_scan_interval
= interval
;
4456 hdev
->le_scan_window
= window
;
4458 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4460 /* If background scan is running, restart it so new parameters are
4463 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4464 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4465 struct hci_request req
;
4467 hci_req_init(&req
, hdev
);
4469 hci_req_add_le_scan_disable(&req
);
4470 hci_req_add_le_passive_scan(&req
);
4472 hci_req_run(&req
, NULL
);
4475 hci_dev_unlock(hdev
);
4480 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4482 struct pending_cmd
*cmd
;
4484 BT_DBG("status 0x%02x", status
);
4488 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4493 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4494 mgmt_status(status
));
4496 struct mgmt_mode
*cp
= cmd
->param
;
4499 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4501 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4503 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4504 new_settings(hdev
, cmd
->sk
);
4507 mgmt_pending_remove(cmd
);
4510 hci_dev_unlock(hdev
);
4513 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4514 void *data
, u16 len
)
4516 struct mgmt_mode
*cp
= data
;
4517 struct pending_cmd
*cmd
;
4518 struct hci_request req
;
4521 BT_DBG("%s", hdev
->name
);
4523 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4524 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4525 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4526 MGMT_STATUS_NOT_SUPPORTED
);
4528 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4529 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4530 MGMT_STATUS_INVALID_PARAMS
);
4532 if (!hdev_is_powered(hdev
))
4533 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4534 MGMT_STATUS_NOT_POWERED
);
4536 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4537 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4538 MGMT_STATUS_REJECTED
);
4542 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4543 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4548 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4549 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4554 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4561 hci_req_init(&req
, hdev
);
4563 write_fast_connectable(&req
, cp
->val
);
4565 err
= hci_req_run(&req
, fast_connectable_complete
);
4567 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4568 MGMT_STATUS_FAILED
);
4569 mgmt_pending_remove(cmd
);
4573 hci_dev_unlock(hdev
);
4578 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4580 struct pending_cmd
*cmd
;
4582 BT_DBG("status 0x%02x", status
);
4586 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4591 u8 mgmt_err
= mgmt_status(status
);
4593 /* We need to restore the flag if related HCI commands
4596 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4598 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4600 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4601 new_settings(hdev
, cmd
->sk
);
4604 mgmt_pending_remove(cmd
);
4607 hci_dev_unlock(hdev
);
4610 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4612 struct mgmt_mode
*cp
= data
;
4613 struct pending_cmd
*cmd
;
4614 struct hci_request req
;
4617 BT_DBG("request for %s", hdev
->name
);
4619 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4620 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4621 MGMT_STATUS_NOT_SUPPORTED
);
4623 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4624 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4625 MGMT_STATUS_REJECTED
);
4627 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4628 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4629 MGMT_STATUS_INVALID_PARAMS
);
4633 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4634 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4638 if (!hdev_is_powered(hdev
)) {
4640 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4641 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4642 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4643 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4644 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4647 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4649 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4653 err
= new_settings(hdev
, sk
);
4657 /* Reject disabling when powered on */
4659 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4660 MGMT_STATUS_REJECTED
);
4664 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4665 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4670 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4676 /* We need to flip the bit already here so that update_adv_data
4677 * generates the correct flags.
4679 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4681 hci_req_init(&req
, hdev
);
4683 write_fast_connectable(&req
, false);
4684 hci_update_page_scan(hdev
, &req
);
4686 /* Since only the advertising data flags will change, there
4687 * is no need to update the scan response data.
4689 update_adv_data(&req
);
4691 err
= hci_req_run(&req
, set_bredr_complete
);
4693 mgmt_pending_remove(cmd
);
4696 hci_dev_unlock(hdev
);
4700 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4701 void *data
, u16 len
)
4703 struct mgmt_mode
*cp
= data
;
4704 struct pending_cmd
*cmd
;
4708 BT_DBG("request for %s", hdev
->name
);
4710 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
4711 !lmp_sc_capable(hdev
) && !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
4712 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4713 MGMT_STATUS_NOT_SUPPORTED
);
4715 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4716 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4717 MGMT_STATUS_INVALID_PARAMS
);
4721 if (!hdev_is_powered(hdev
) ||
4722 (!lmp_sc_capable(hdev
) &&
4723 !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
)) ||
4724 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4728 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4730 if (cp
->val
== 0x02)
4731 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4733 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4735 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4737 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4740 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4745 err
= new_settings(hdev
, sk
);
4750 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4751 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4758 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4759 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4760 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4764 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4770 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4772 mgmt_pending_remove(cmd
);
4776 if (cp
->val
== 0x02)
4777 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4779 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4782 hci_dev_unlock(hdev
);
4786 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4787 void *data
, u16 len
)
4789 struct mgmt_mode
*cp
= data
;
4790 bool changed
, use_changed
;
4793 BT_DBG("request for %s", hdev
->name
);
4795 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4796 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4797 MGMT_STATUS_INVALID_PARAMS
);
4802 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4805 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4808 if (cp
->val
== 0x02)
4809 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4812 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4815 if (hdev_is_powered(hdev
) && use_changed
&&
4816 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4817 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4818 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4819 sizeof(mode
), &mode
);
4822 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4827 err
= new_settings(hdev
, sk
);
4830 hci_dev_unlock(hdev
);
4834 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4837 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4841 BT_DBG("request for %s", hdev
->name
);
4843 if (!lmp_le_capable(hdev
))
4844 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4845 MGMT_STATUS_NOT_SUPPORTED
);
4847 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4848 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4849 MGMT_STATUS_INVALID_PARAMS
);
4851 if (hdev_is_powered(hdev
))
4852 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4853 MGMT_STATUS_REJECTED
);
4857 /* If user space supports this command it is also expected to
4858 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4860 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4863 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4864 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4865 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4867 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4868 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4869 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4872 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4877 err
= new_settings(hdev
, sk
);
4880 hci_dev_unlock(hdev
);
4884 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4886 switch (irk
->addr
.type
) {
4887 case BDADDR_LE_PUBLIC
:
4890 case BDADDR_LE_RANDOM
:
4891 /* Two most significant bits shall be set */
4892 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4900 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4903 struct mgmt_cp_load_irks
*cp
= cp_data
;
4904 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
4905 sizeof(struct mgmt_irk_info
));
4906 u16 irk_count
, expected_len
;
4909 BT_DBG("request for %s", hdev
->name
);
4911 if (!lmp_le_capable(hdev
))
4912 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4913 MGMT_STATUS_NOT_SUPPORTED
);
4915 irk_count
= __le16_to_cpu(cp
->irk_count
);
4916 if (irk_count
> max_irk_count
) {
4917 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
4918 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4919 MGMT_STATUS_INVALID_PARAMS
);
4922 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4923 if (expected_len
!= len
) {
4924 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4926 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4927 MGMT_STATUS_INVALID_PARAMS
);
4930 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4932 for (i
= 0; i
< irk_count
; i
++) {
4933 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4935 if (!irk_is_valid(key
))
4936 return cmd_status(sk
, hdev
->id
,
4938 MGMT_STATUS_INVALID_PARAMS
);
4943 hci_smp_irks_clear(hdev
);
4945 for (i
= 0; i
< irk_count
; i
++) {
4946 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4949 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4950 addr_type
= ADDR_LE_DEV_PUBLIC
;
4952 addr_type
= ADDR_LE_DEV_RANDOM
;
4954 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4958 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4960 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4962 hci_dev_unlock(hdev
);
4967 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4969 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4972 switch (key
->addr
.type
) {
4973 case BDADDR_LE_PUBLIC
:
4976 case BDADDR_LE_RANDOM
:
4977 /* Two most significant bits shall be set */
4978 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4986 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4987 void *cp_data
, u16 len
)
4989 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4990 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
4991 sizeof(struct mgmt_ltk_info
));
4992 u16 key_count
, expected_len
;
4995 BT_DBG("request for %s", hdev
->name
);
4997 if (!lmp_le_capable(hdev
))
4998 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4999 MGMT_STATUS_NOT_SUPPORTED
);
5001 key_count
= __le16_to_cpu(cp
->key_count
);
5002 if (key_count
> max_key_count
) {
5003 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5004 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5005 MGMT_STATUS_INVALID_PARAMS
);
5008 expected_len
= sizeof(*cp
) + key_count
*
5009 sizeof(struct mgmt_ltk_info
);
5010 if (expected_len
!= len
) {
5011 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5013 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5014 MGMT_STATUS_INVALID_PARAMS
);
5017 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5019 for (i
= 0; i
< key_count
; i
++) {
5020 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5022 if (!ltk_is_valid(key
))
5023 return cmd_status(sk
, hdev
->id
,
5024 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5025 MGMT_STATUS_INVALID_PARAMS
);
5030 hci_smp_ltks_clear(hdev
);
5032 for (i
= 0; i
< key_count
; i
++) {
5033 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5034 u8 type
, addr_type
, authenticated
;
5036 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
5037 addr_type
= ADDR_LE_DEV_PUBLIC
;
5039 addr_type
= ADDR_LE_DEV_RANDOM
;
5041 switch (key
->type
) {
5042 case MGMT_LTK_UNAUTHENTICATED
:
5043 authenticated
= 0x00;
5044 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5046 case MGMT_LTK_AUTHENTICATED
:
5047 authenticated
= 0x01;
5048 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5050 case MGMT_LTK_P256_UNAUTH
:
5051 authenticated
= 0x00;
5052 type
= SMP_LTK_P256
;
5054 case MGMT_LTK_P256_AUTH
:
5055 authenticated
= 0x01;
5056 type
= SMP_LTK_P256
;
5058 case MGMT_LTK_P256_DEBUG
:
5059 authenticated
= 0x00;
5060 type
= SMP_LTK_P256_DEBUG
;
5065 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
5066 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
5070 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5073 hci_dev_unlock(hdev
);
5078 static void conn_info_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
5080 struct hci_conn
*conn
= cmd
->user_data
;
5081 struct mgmt_rp_get_conn_info rp
;
5083 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5085 if (status
== MGMT_STATUS_SUCCESS
) {
5086 rp
.rssi
= conn
->rssi
;
5087 rp
.tx_power
= conn
->tx_power
;
5088 rp
.max_tx_power
= conn
->max_tx_power
;
5090 rp
.rssi
= HCI_RSSI_INVALID
;
5091 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5092 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5095 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
, status
,
5098 hci_conn_drop(conn
);
5102 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
)
5104 struct hci_cp_read_rssi
*cp
;
5105 struct pending_cmd
*cmd
;
5106 struct hci_conn
*conn
;
5110 BT_DBG("status 0x%02x", hci_status
);
5114 /* Commands sent in request are either Read RSSI or Read Transmit Power
5115 * Level so we check which one was last sent to retrieve connection
5116 * handle. Both commands have handle as first parameter so it's safe to
5117 * cast data on the same command struct.
5119 * First command sent is always Read RSSI and we fail only if it fails.
5120 * In other case we simply override error to indicate success as we
5121 * already remembered if TX power value is actually valid.
5123 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5125 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5126 status
= MGMT_STATUS_SUCCESS
;
5128 status
= mgmt_status(hci_status
);
5132 BT_ERR("invalid sent_cmd in conn_info response");
5136 handle
= __le16_to_cpu(cp
->handle
);
5137 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5139 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5143 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5147 cmd
->cmd_complete(cmd
, status
);
5148 mgmt_pending_remove(cmd
);
5151 hci_dev_unlock(hdev
);
5154 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5157 struct mgmt_cp_get_conn_info
*cp
= data
;
5158 struct mgmt_rp_get_conn_info rp
;
5159 struct hci_conn
*conn
;
5160 unsigned long conn_info_age
;
5163 BT_DBG("%s", hdev
->name
);
5165 memset(&rp
, 0, sizeof(rp
));
5166 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5167 rp
.addr
.type
= cp
->addr
.type
;
5169 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5170 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5171 MGMT_STATUS_INVALID_PARAMS
,
5176 if (!hdev_is_powered(hdev
)) {
5177 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5178 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5182 if (cp
->addr
.type
== BDADDR_BREDR
)
5183 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5186 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5188 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5189 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5190 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
5194 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5195 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5196 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5200 /* To avoid client trying to guess when to poll again for information we
5201 * calculate conn info age as random value between min/max set in hdev.
5203 conn_info_age
= hdev
->conn_info_min_age
+
5204 prandom_u32_max(hdev
->conn_info_max_age
-
5205 hdev
->conn_info_min_age
);
5207 /* Query controller to refresh cached values if they are too old or were
5210 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5211 msecs_to_jiffies(conn_info_age
)) ||
5212 !conn
->conn_info_timestamp
) {
5213 struct hci_request req
;
5214 struct hci_cp_read_tx_power req_txp_cp
;
5215 struct hci_cp_read_rssi req_rssi_cp
;
5216 struct pending_cmd
*cmd
;
5218 hci_req_init(&req
, hdev
);
5219 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5220 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5223 /* For LE links TX power does not change thus we don't need to
5224 * query for it once value is known.
5226 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5227 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5228 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5229 req_txp_cp
.type
= 0x00;
5230 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5231 sizeof(req_txp_cp
), &req_txp_cp
);
5234 /* Max TX power needs to be read only once per connection */
5235 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5236 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5237 req_txp_cp
.type
= 0x01;
5238 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5239 sizeof(req_txp_cp
), &req_txp_cp
);
5242 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5246 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5253 hci_conn_hold(conn
);
5254 cmd
->user_data
= hci_conn_get(conn
);
5255 cmd
->cmd_complete
= conn_info_cmd_complete
;
5257 conn
->conn_info_timestamp
= jiffies
;
5259 /* Cache is valid, just reply with values cached in hci_conn */
5260 rp
.rssi
= conn
->rssi
;
5261 rp
.tx_power
= conn
->tx_power
;
5262 rp
.max_tx_power
= conn
->max_tx_power
;
5264 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5265 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5269 hci_dev_unlock(hdev
);
5273 static void clock_info_cmd_complete(struct pending_cmd
*cmd
, u8 status
)
5275 struct hci_conn
*conn
= cmd
->user_data
;
5276 struct mgmt_rp_get_clock_info rp
;
5277 struct hci_dev
*hdev
;
5279 memset(&rp
, 0, sizeof(rp
));
5280 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5285 hdev
= hci_dev_get(cmd
->index
);
5287 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5292 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5293 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5297 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
, sizeof(rp
));
5300 hci_conn_drop(conn
);
5305 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
)
5307 struct hci_cp_read_clock
*hci_cp
;
5308 struct pending_cmd
*cmd
;
5309 struct hci_conn
*conn
;
5311 BT_DBG("%s status %u", hdev
->name
, status
);
5315 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5319 if (hci_cp
->which
) {
5320 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5321 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5326 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5330 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5331 mgmt_pending_remove(cmd
);
5334 hci_dev_unlock(hdev
);
5337 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5340 struct mgmt_cp_get_clock_info
*cp
= data
;
5341 struct mgmt_rp_get_clock_info rp
;
5342 struct hci_cp_read_clock hci_cp
;
5343 struct pending_cmd
*cmd
;
5344 struct hci_request req
;
5345 struct hci_conn
*conn
;
5348 BT_DBG("%s", hdev
->name
);
5350 memset(&rp
, 0, sizeof(rp
));
5351 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5352 rp
.addr
.type
= cp
->addr
.type
;
5354 if (cp
->addr
.type
!= BDADDR_BREDR
)
5355 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5356 MGMT_STATUS_INVALID_PARAMS
,
5361 if (!hdev_is_powered(hdev
)) {
5362 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5363 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5367 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5368 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5370 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5371 err
= cmd_complete(sk
, hdev
->id
,
5372 MGMT_OP_GET_CLOCK_INFO
,
5373 MGMT_STATUS_NOT_CONNECTED
,
5381 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5387 cmd
->cmd_complete
= clock_info_cmd_complete
;
5389 hci_req_init(&req
, hdev
);
5391 memset(&hci_cp
, 0, sizeof(hci_cp
));
5392 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5395 hci_conn_hold(conn
);
5396 cmd
->user_data
= hci_conn_get(conn
);
5398 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5399 hci_cp
.which
= 0x01; /* Piconet clock */
5400 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5403 err
= hci_req_run(&req
, get_clock_info_complete
);
5405 mgmt_pending_remove(cmd
);
5408 hci_dev_unlock(hdev
);
5412 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5413 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5415 struct mgmt_ev_device_added ev
;
5417 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5418 ev
.addr
.type
= type
;
5421 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5424 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5425 void *data
, u16 len
)
5427 struct mgmt_cp_add_device
*cp
= data
;
5428 u8 auto_conn
, addr_type
;
5431 BT_DBG("%s", hdev
->name
);
5433 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5434 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5435 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5436 MGMT_STATUS_INVALID_PARAMS
,
5437 &cp
->addr
, sizeof(cp
->addr
));
5439 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5440 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5441 MGMT_STATUS_INVALID_PARAMS
,
5442 &cp
->addr
, sizeof(cp
->addr
));
5446 if (cp
->addr
.type
== BDADDR_BREDR
) {
5447 /* Only incoming connections action is supported for now */
5448 if (cp
->action
!= 0x01) {
5449 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5450 MGMT_STATUS_INVALID_PARAMS
,
5451 &cp
->addr
, sizeof(cp
->addr
));
5455 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5460 hci_update_page_scan(hdev
, NULL
);
5465 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5466 addr_type
= ADDR_LE_DEV_PUBLIC
;
5468 addr_type
= ADDR_LE_DEV_RANDOM
;
5470 if (cp
->action
== 0x02)
5471 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5472 else if (cp
->action
== 0x01)
5473 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5475 auto_conn
= HCI_AUTO_CONN_REPORT
;
5477 /* If the connection parameters don't exist for this device,
5478 * they will be created and configured with defaults.
5480 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5482 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5484 &cp
->addr
, sizeof(cp
->addr
));
5489 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5491 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5492 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5495 hci_dev_unlock(hdev
);
5499 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5500 bdaddr_t
*bdaddr
, u8 type
)
5502 struct mgmt_ev_device_removed ev
;
5504 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5505 ev
.addr
.type
= type
;
5507 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5510 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5511 void *data
, u16 len
)
5513 struct mgmt_cp_remove_device
*cp
= data
;
5516 BT_DBG("%s", hdev
->name
);
5520 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5521 struct hci_conn_params
*params
;
5524 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
5525 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5526 MGMT_STATUS_INVALID_PARAMS
,
5527 &cp
->addr
, sizeof(cp
->addr
));
5531 if (cp
->addr
.type
== BDADDR_BREDR
) {
5532 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
5536 err
= cmd_complete(sk
, hdev
->id
,
5537 MGMT_OP_REMOVE_DEVICE
,
5538 MGMT_STATUS_INVALID_PARAMS
,
5539 &cp
->addr
, sizeof(cp
->addr
));
5543 hci_update_page_scan(hdev
, NULL
);
5545 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
5550 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5551 addr_type
= ADDR_LE_DEV_PUBLIC
;
5553 addr_type
= ADDR_LE_DEV_RANDOM
;
5555 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5558 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5559 MGMT_STATUS_INVALID_PARAMS
,
5560 &cp
->addr
, sizeof(cp
->addr
));
5564 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
5565 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5566 MGMT_STATUS_INVALID_PARAMS
,
5567 &cp
->addr
, sizeof(cp
->addr
));
5571 list_del(¶ms
->action
);
5572 list_del(¶ms
->list
);
5574 hci_update_background_scan(hdev
);
5576 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5578 struct hci_conn_params
*p
, *tmp
;
5579 struct bdaddr_list
*b
, *btmp
;
5581 if (cp
->addr
.type
) {
5582 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5583 MGMT_STATUS_INVALID_PARAMS
,
5584 &cp
->addr
, sizeof(cp
->addr
));
5588 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
5589 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
5594 hci_update_page_scan(hdev
, NULL
);
5596 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
5597 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
5599 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
5600 list_del(&p
->action
);
5605 BT_DBG("All LE connection parameters were removed");
5607 hci_update_background_scan(hdev
);
5611 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5612 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5615 hci_dev_unlock(hdev
);
5619 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5622 struct mgmt_cp_load_conn_param
*cp
= data
;
5623 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5624 sizeof(struct mgmt_conn_param
));
5625 u16 param_count
, expected_len
;
5628 if (!lmp_le_capable(hdev
))
5629 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5630 MGMT_STATUS_NOT_SUPPORTED
);
5632 param_count
= __le16_to_cpu(cp
->param_count
);
5633 if (param_count
> max_param_count
) {
5634 BT_ERR("load_conn_param: too big param_count value %u",
5636 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5637 MGMT_STATUS_INVALID_PARAMS
);
5640 expected_len
= sizeof(*cp
) + param_count
*
5641 sizeof(struct mgmt_conn_param
);
5642 if (expected_len
!= len
) {
5643 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5645 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5646 MGMT_STATUS_INVALID_PARAMS
);
5649 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5653 hci_conn_params_clear_disabled(hdev
);
5655 for (i
= 0; i
< param_count
; i
++) {
5656 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5657 struct hci_conn_params
*hci_param
;
5658 u16 min
, max
, latency
, timeout
;
5661 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5664 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5665 addr_type
= ADDR_LE_DEV_PUBLIC
;
5666 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5667 addr_type
= ADDR_LE_DEV_RANDOM
;
5669 BT_ERR("Ignoring invalid connection parameters");
5673 min
= le16_to_cpu(param
->min_interval
);
5674 max
= le16_to_cpu(param
->max_interval
);
5675 latency
= le16_to_cpu(param
->latency
);
5676 timeout
= le16_to_cpu(param
->timeout
);
5678 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5679 min
, max
, latency
, timeout
);
5681 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5682 BT_ERR("Ignoring invalid connection parameters");
5686 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5689 BT_ERR("Failed to add connection parameters");
5693 hci_param
->conn_min_interval
= min
;
5694 hci_param
->conn_max_interval
= max
;
5695 hci_param
->conn_latency
= latency
;
5696 hci_param
->supervision_timeout
= timeout
;
5699 hci_dev_unlock(hdev
);
5701 return cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0, NULL
, 0);
5704 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5705 void *data
, u16 len
)
5707 struct mgmt_cp_set_external_config
*cp
= data
;
5711 BT_DBG("%s", hdev
->name
);
5713 if (hdev_is_powered(hdev
))
5714 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5715 MGMT_STATUS_REJECTED
);
5717 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
5718 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5719 MGMT_STATUS_INVALID_PARAMS
);
5721 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
5722 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5723 MGMT_STATUS_NOT_SUPPORTED
);
5728 changed
= !test_and_set_bit(HCI_EXT_CONFIGURED
,
5731 changed
= test_and_clear_bit(HCI_EXT_CONFIGURED
,
5734 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
5741 err
= new_options(hdev
, sk
);
5743 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) == is_configured(hdev
)) {
5744 mgmt_index_removed(hdev
);
5746 if (test_and_change_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
)) {
5747 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
5748 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
5750 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5752 set_bit(HCI_RAW
, &hdev
->flags
);
5753 mgmt_index_added(hdev
);
5758 hci_dev_unlock(hdev
);
5762 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
5763 void *data
, u16 len
)
5765 struct mgmt_cp_set_public_address
*cp
= data
;
5769 BT_DBG("%s", hdev
->name
);
5771 if (hdev_is_powered(hdev
))
5772 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5773 MGMT_STATUS_REJECTED
);
5775 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
5776 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5777 MGMT_STATUS_INVALID_PARAMS
);
5779 if (!hdev
->set_bdaddr
)
5780 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
5781 MGMT_STATUS_NOT_SUPPORTED
);
5785 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
5786 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
5788 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
5795 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5796 err
= new_options(hdev
, sk
);
5798 if (is_configured(hdev
)) {
5799 mgmt_index_removed(hdev
);
5801 clear_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
5803 set_bit(HCI_CONFIG
, &hdev
->dev_flags
);
5804 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
5806 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
5810 hci_dev_unlock(hdev
);
5814 static const struct mgmt_handler
{
5815 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5819 } mgmt_handlers
[] = {
5820 { NULL
}, /* 0x0000 (no command) */
5821 { read_version
, false, MGMT_READ_VERSION_SIZE
},
5822 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
5823 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
5824 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
5825 { set_powered
, false, MGMT_SETTING_SIZE
},
5826 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
5827 { set_connectable
, false, MGMT_SETTING_SIZE
},
5828 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
5829 { set_bondable
, false, MGMT_SETTING_SIZE
},
5830 { set_link_security
, false, MGMT_SETTING_SIZE
},
5831 { set_ssp
, false, MGMT_SETTING_SIZE
},
5832 { set_hs
, false, MGMT_SETTING_SIZE
},
5833 { set_le
, false, MGMT_SETTING_SIZE
},
5834 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
5835 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
5836 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
5837 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
5838 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
5839 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
5840 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
5841 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
5842 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
5843 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
5844 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
5845 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
5846 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
5847 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
5848 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
5849 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
5850 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
5851 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
5852 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
5853 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
5854 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
5855 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
5856 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
5857 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
5858 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
5859 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
5860 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
5861 { set_advertising
, false, MGMT_SETTING_SIZE
},
5862 { set_bredr
, false, MGMT_SETTING_SIZE
},
5863 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
5864 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
5865 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
5866 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
5867 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
5868 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
5869 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
5870 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
5871 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
5872 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
5873 { load_conn_param
, true, MGMT_LOAD_CONN_PARAM_SIZE
},
5874 { read_unconf_index_list
, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE
},
5875 { read_config_info
, false, MGMT_READ_CONFIG_INFO_SIZE
},
5876 { set_external_config
, false, MGMT_SET_EXTERNAL_CONFIG_SIZE
},
5877 { set_public_address
, false, MGMT_SET_PUBLIC_ADDRESS_SIZE
},
5878 { start_service_discovery
,true, MGMT_START_SERVICE_DISCOVERY_SIZE
},
5881 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
5885 struct mgmt_hdr
*hdr
;
5886 u16 opcode
, index
, len
;
5887 struct hci_dev
*hdev
= NULL
;
5888 const struct mgmt_handler
*handler
;
5891 BT_DBG("got %zu bytes", msglen
);
5893 if (msglen
< sizeof(*hdr
))
5896 buf
= kmalloc(msglen
, GFP_KERNEL
);
5900 if (memcpy_from_msg(buf
, msg
, msglen
)) {
5906 opcode
= __le16_to_cpu(hdr
->opcode
);
5907 index
= __le16_to_cpu(hdr
->index
);
5908 len
= __le16_to_cpu(hdr
->len
);
5910 if (len
!= msglen
- sizeof(*hdr
)) {
5915 if (index
!= MGMT_INDEX_NONE
) {
5916 hdev
= hci_dev_get(index
);
5918 err
= cmd_status(sk
, index
, opcode
,
5919 MGMT_STATUS_INVALID_INDEX
);
5923 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5924 test_bit(HCI_CONFIG
, &hdev
->dev_flags
) ||
5925 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
5926 err
= cmd_status(sk
, index
, opcode
,
5927 MGMT_STATUS_INVALID_INDEX
);
5931 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
5932 opcode
!= MGMT_OP_READ_CONFIG_INFO
&&
5933 opcode
!= MGMT_OP_SET_EXTERNAL_CONFIG
&&
5934 opcode
!= MGMT_OP_SET_PUBLIC_ADDRESS
) {
5935 err
= cmd_status(sk
, index
, opcode
,
5936 MGMT_STATUS_INVALID_INDEX
);
5941 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
5942 mgmt_handlers
[opcode
].func
== NULL
) {
5943 BT_DBG("Unknown op %u", opcode
);
5944 err
= cmd_status(sk
, index
, opcode
,
5945 MGMT_STATUS_UNKNOWN_COMMAND
);
5949 if (hdev
&& (opcode
<= MGMT_OP_READ_INDEX_LIST
||
5950 opcode
== MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5951 err
= cmd_status(sk
, index
, opcode
,
5952 MGMT_STATUS_INVALID_INDEX
);
5956 if (!hdev
&& (opcode
> MGMT_OP_READ_INDEX_LIST
&&
5957 opcode
!= MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5958 err
= cmd_status(sk
, index
, opcode
,
5959 MGMT_STATUS_INVALID_INDEX
);
5963 handler
= &mgmt_handlers
[opcode
];
5965 if ((handler
->var_len
&& len
< handler
->data_len
) ||
5966 (!handler
->var_len
&& len
!= handler
->data_len
)) {
5967 err
= cmd_status(sk
, index
, opcode
,
5968 MGMT_STATUS_INVALID_PARAMS
);
5973 mgmt_init_hdev(sk
, hdev
);
5975 cp
= buf
+ sizeof(*hdr
);
5977 err
= handler
->func(sk
, hdev
, cp
, len
);
5991 void mgmt_index_added(struct hci_dev
*hdev
)
5993 if (hdev
->dev_type
!= HCI_BREDR
)
5996 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
5999 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6000 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
6002 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
6005 void mgmt_index_removed(struct hci_dev
*hdev
)
6007 u8 status
= MGMT_STATUS_INVALID_INDEX
;
6009 if (hdev
->dev_type
!= HCI_BREDR
)
6012 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
6015 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
6017 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
6018 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
6020 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
6023 /* This function requires the caller holds hdev->lock */
6024 static void restart_le_actions(struct hci_dev
*hdev
)
6026 struct hci_conn_params
*p
;
6028 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
6029 /* Needed for AUTO_OFF case where might not "really"
6030 * have been powered off.
6032 list_del_init(&p
->action
);
6034 switch (p
->auto_connect
) {
6035 case HCI_AUTO_CONN_DIRECT
:
6036 case HCI_AUTO_CONN_ALWAYS
:
6037 list_add(&p
->action
, &hdev
->pend_le_conns
);
6039 case HCI_AUTO_CONN_REPORT
:
6040 list_add(&p
->action
, &hdev
->pend_le_reports
);
6047 hci_update_background_scan(hdev
);
6050 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
6052 struct cmd_lookup match
= { NULL
, hdev
};
6054 BT_DBG("status 0x%02x", status
);
6058 restart_le_actions(hdev
);
6060 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6062 new_settings(hdev
, match
.sk
);
6064 hci_dev_unlock(hdev
);
6070 static int powered_update_hci(struct hci_dev
*hdev
)
6072 struct hci_request req
;
6075 hci_req_init(&req
, hdev
);
6077 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
6078 !lmp_host_ssp_capable(hdev
)) {
6081 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
6084 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
6085 lmp_bredr_capable(hdev
)) {
6086 struct hci_cp_write_le_host_supported cp
;
6091 /* Check first if we already have the right
6092 * host state (host features set)
6094 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
6095 cp
.simul
!= lmp_host_le_br_capable(hdev
))
6096 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
6100 if (lmp_le_capable(hdev
)) {
6101 /* Make sure the controller has a good default for
6102 * advertising data. This also applies to the case
6103 * where BR/EDR was toggled during the AUTO_OFF phase.
6105 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
6106 update_adv_data(&req
);
6107 update_scan_rsp_data(&req
);
6110 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6111 enable_advertising(&req
);
6114 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
6115 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
6116 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
6117 sizeof(link_sec
), &link_sec
);
6119 if (lmp_bredr_capable(hdev
)) {
6120 write_fast_connectable(&req
, false);
6121 hci_update_page_scan(hdev
, &req
);
6127 return hci_req_run(&req
, powered_complete
);
6130 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
6132 struct cmd_lookup match
= { NULL
, hdev
};
6133 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
6134 u8 zero_cod
[] = { 0, 0, 0 };
6137 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
6141 if (powered_update_hci(hdev
) == 0)
6144 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
6149 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
6150 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status_not_powered
);
6152 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
6153 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
6154 zero_cod
, sizeof(zero_cod
), NULL
);
6157 err
= new_settings(hdev
, match
.sk
);
6165 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
6167 struct pending_cmd
*cmd
;
6170 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6174 if (err
== -ERFKILL
)
6175 status
= MGMT_STATUS_RFKILLED
;
6177 status
= MGMT_STATUS_FAILED
;
6179 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
6181 mgmt_pending_remove(cmd
);
6184 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
6186 struct hci_request req
;
6190 /* When discoverable timeout triggers, then just make sure
6191 * the limited discoverable flag is cleared. Even in the case
6192 * of a timeout triggered from general discoverable, it is
6193 * safe to unconditionally clear the flag.
6195 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
6196 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
6198 hci_req_init(&req
, hdev
);
6199 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
6200 u8 scan
= SCAN_PAGE
;
6201 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
6202 sizeof(scan
), &scan
);
6205 update_adv_data(&req
);
6206 hci_req_run(&req
, NULL
);
6208 hdev
->discov_timeout
= 0;
6210 new_settings(hdev
, NULL
);
6212 hci_dev_unlock(hdev
);
6215 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
6218 struct mgmt_ev_new_link_key ev
;
6220 memset(&ev
, 0, sizeof(ev
));
6222 ev
.store_hint
= persistent
;
6223 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6224 ev
.key
.addr
.type
= BDADDR_BREDR
;
6225 ev
.key
.type
= key
->type
;
6226 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
6227 ev
.key
.pin_len
= key
->pin_len
;
6229 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6232 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
6234 switch (ltk
->type
) {
6237 if (ltk
->authenticated
)
6238 return MGMT_LTK_AUTHENTICATED
;
6239 return MGMT_LTK_UNAUTHENTICATED
;
6241 if (ltk
->authenticated
)
6242 return MGMT_LTK_P256_AUTH
;
6243 return MGMT_LTK_P256_UNAUTH
;
6244 case SMP_LTK_P256_DEBUG
:
6245 return MGMT_LTK_P256_DEBUG
;
6248 return MGMT_LTK_UNAUTHENTICATED
;
6251 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
6253 struct mgmt_ev_new_long_term_key ev
;
6255 memset(&ev
, 0, sizeof(ev
));
6257 /* Devices using resolvable or non-resolvable random addresses
6258 * without providing an indentity resolving key don't require
6259 * to store long term keys. Their addresses will change the
6262 * Only when a remote device provides an identity address
6263 * make sure the long term key is stored. If the remote
6264 * identity is known, the long term keys are internally
6265 * mapped to the identity address. So allow static random
6266 * and public addresses here.
6268 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6269 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6270 ev
.store_hint
= 0x00;
6272 ev
.store_hint
= persistent
;
6274 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
6275 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
6276 ev
.key
.type
= mgmt_ltk_type(key
);
6277 ev
.key
.enc_size
= key
->enc_size
;
6278 ev
.key
.ediv
= key
->ediv
;
6279 ev
.key
.rand
= key
->rand
;
6281 if (key
->type
== SMP_LTK
)
6284 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
6286 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
6289 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
6291 struct mgmt_ev_new_irk ev
;
6293 memset(&ev
, 0, sizeof(ev
));
6295 /* For identity resolving keys from devices that are already
6296 * using a public address or static random address, do not
6297 * ask for storing this key. The identity resolving key really
6298 * is only mandatory for devices using resovlable random
6301 * Storing all identity resolving keys has the downside that
6302 * they will be also loaded on next boot of they system. More
6303 * identity resolving keys, means more time during scanning is
6304 * needed to actually resolve these addresses.
6306 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
6307 ev
.store_hint
= 0x01;
6309 ev
.store_hint
= 0x00;
6311 bacpy(&ev
.rpa
, &irk
->rpa
);
6312 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
6313 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
6314 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6316 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6319 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6322 struct mgmt_ev_new_csrk ev
;
6324 memset(&ev
, 0, sizeof(ev
));
6326 /* Devices using resolvable or non-resolvable random addresses
6327 * without providing an indentity resolving key don't require
6328 * to store signature resolving keys. Their addresses will change
6329 * the next time around.
6331 * Only when a remote device provides an identity address
6332 * make sure the signature resolving key is stored. So allow
6333 * static random and public addresses here.
6335 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6336 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6337 ev
.store_hint
= 0x00;
6339 ev
.store_hint
= persistent
;
6341 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6342 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6343 ev
.key
.master
= csrk
->master
;
6344 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6346 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6349 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6350 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6351 u16 max_interval
, u16 latency
, u16 timeout
)
6353 struct mgmt_ev_new_conn_param ev
;
6355 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6358 memset(&ev
, 0, sizeof(ev
));
6359 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6360 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6361 ev
.store_hint
= store_hint
;
6362 ev
.min_interval
= cpu_to_le16(min_interval
);
6363 ev
.max_interval
= cpu_to_le16(max_interval
);
6364 ev
.latency
= cpu_to_le16(latency
);
6365 ev
.timeout
= cpu_to_le16(timeout
);
6367 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6370 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6373 eir
[eir_len
++] = sizeof(type
) + data_len
;
6374 eir
[eir_len
++] = type
;
6375 memcpy(&eir
[eir_len
], data
, data_len
);
6376 eir_len
+= data_len
;
6381 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
6382 u32 flags
, u8
*name
, u8 name_len
)
6385 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6388 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
6389 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6391 ev
->flags
= __cpu_to_le32(flags
);
6393 /* We must ensure that the EIR Data fields are ordered and
6394 * unique. Keep it simple for now and avoid the problem by not
6395 * adding any BR/EDR data to the LE adv.
6397 if (conn
->le_adv_data_len
> 0) {
6398 memcpy(&ev
->eir
[eir_len
],
6399 conn
->le_adv_data
, conn
->le_adv_data_len
);
6400 eir_len
= conn
->le_adv_data_len
;
6403 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6406 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
6407 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6409 conn
->dev_class
, 3);
6412 ev
->eir_len
= cpu_to_le16(eir_len
);
6414 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6415 sizeof(*ev
) + eir_len
, NULL
);
6418 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
6420 struct sock
**sk
= data
;
6422 cmd
->cmd_complete(cmd
, 0);
6427 mgmt_pending_remove(cmd
);
6430 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
6432 struct hci_dev
*hdev
= data
;
6433 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6435 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6437 cmd
->cmd_complete(cmd
, 0);
6438 mgmt_pending_remove(cmd
);
6441 bool mgmt_powering_down(struct hci_dev
*hdev
)
6443 struct pending_cmd
*cmd
;
6444 struct mgmt_mode
*cp
;
6446 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6457 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6458 u8 link_type
, u8 addr_type
, u8 reason
,
6459 bool mgmt_connected
)
6461 struct mgmt_ev_device_disconnected ev
;
6462 struct sock
*sk
= NULL
;
6464 /* The connection is still in hci_conn_hash so test for 1
6465 * instead of 0 to know if this is the last one.
6467 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6468 cancel_delayed_work(&hdev
->power_off
);
6469 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6472 if (!mgmt_connected
)
6475 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6478 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6480 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6481 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6484 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6489 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6493 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6494 u8 link_type
, u8 addr_type
, u8 status
)
6496 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6497 struct mgmt_cp_disconnect
*cp
;
6498 struct pending_cmd
*cmd
;
6500 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6503 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
6509 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
6512 if (cp
->addr
.type
!= bdaddr_type
)
6515 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6516 mgmt_pending_remove(cmd
);
6519 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6520 u8 addr_type
, u8 status
)
6522 struct mgmt_ev_connect_failed ev
;
6524 /* The connection is still in hci_conn_hash so test for 1
6525 * instead of 0 to know if this is the last one.
6527 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
6528 cancel_delayed_work(&hdev
->power_off
);
6529 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6532 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6533 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6534 ev
.status
= mgmt_status(status
);
6536 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6539 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
6541 struct mgmt_ev_pin_code_request ev
;
6543 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6544 ev
.addr
.type
= BDADDR_BREDR
;
6547 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
6550 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6553 struct pending_cmd
*cmd
;
6555 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6559 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6560 mgmt_pending_remove(cmd
);
6563 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6566 struct pending_cmd
*cmd
;
6568 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6572 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6573 mgmt_pending_remove(cmd
);
6576 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6577 u8 link_type
, u8 addr_type
, u32 value
,
6580 struct mgmt_ev_user_confirm_request ev
;
6582 BT_DBG("%s", hdev
->name
);
6584 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6585 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6586 ev
.confirm_hint
= confirm_hint
;
6587 ev
.value
= cpu_to_le32(value
);
6589 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6593 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6594 u8 link_type
, u8 addr_type
)
6596 struct mgmt_ev_user_passkey_request ev
;
6598 BT_DBG("%s", hdev
->name
);
6600 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6601 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6603 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6607 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6608 u8 link_type
, u8 addr_type
, u8 status
,
6611 struct pending_cmd
*cmd
;
6613 cmd
= mgmt_pending_find(opcode
, hdev
);
6617 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6618 mgmt_pending_remove(cmd
);
6623 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6624 u8 link_type
, u8 addr_type
, u8 status
)
6626 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6627 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6630 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6631 u8 link_type
, u8 addr_type
, u8 status
)
6633 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6635 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6638 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6639 u8 link_type
, u8 addr_type
, u8 status
)
6641 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6642 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6645 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6646 u8 link_type
, u8 addr_type
, u8 status
)
6648 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6650 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6653 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6654 u8 link_type
, u8 addr_type
, u32 passkey
,
6657 struct mgmt_ev_passkey_notify ev
;
6659 BT_DBG("%s", hdev
->name
);
6661 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6662 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6663 ev
.passkey
= __cpu_to_le32(passkey
);
6664 ev
.entered
= entered
;
6666 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6669 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
6671 struct mgmt_ev_auth_failed ev
;
6672 struct pending_cmd
*cmd
;
6673 u8 status
= mgmt_status(hci_status
);
6675 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
6676 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
6679 cmd
= find_pairing(conn
);
6681 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
6682 cmd
? cmd
->sk
: NULL
);
6685 pairing_complete(cmd
, status
);
6688 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
6690 struct cmd_lookup match
= { NULL
, hdev
};
6694 u8 mgmt_err
= mgmt_status(status
);
6695 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
6696 cmd_status_rsp
, &mgmt_err
);
6700 if (test_bit(HCI_AUTH
, &hdev
->flags
))
6701 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
6704 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
6707 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
6711 new_settings(hdev
, match
.sk
);
6717 static void clear_eir(struct hci_request
*req
)
6719 struct hci_dev
*hdev
= req
->hdev
;
6720 struct hci_cp_write_eir cp
;
6722 if (!lmp_ext_inq_capable(hdev
))
6725 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
6727 memset(&cp
, 0, sizeof(cp
));
6729 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
6732 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6734 struct cmd_lookup match
= { NULL
, hdev
};
6735 struct hci_request req
;
6736 bool changed
= false;
6739 u8 mgmt_err
= mgmt_status(status
);
6741 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
6742 &hdev
->dev_flags
)) {
6743 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6744 new_settings(hdev
, NULL
);
6747 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
6753 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6755 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6757 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
6760 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6763 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
6766 new_settings(hdev
, match
.sk
);
6771 hci_req_init(&req
, hdev
);
6773 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
6774 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
6775 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
6776 sizeof(enable
), &enable
);
6782 hci_req_run(&req
, NULL
);
6785 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6787 struct cmd_lookup match
= { NULL
, hdev
};
6788 bool changed
= false;
6791 u8 mgmt_err
= mgmt_status(status
);
6794 if (test_and_clear_bit(HCI_SC_ENABLED
,
6796 new_settings(hdev
, NULL
);
6797 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6800 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6801 cmd_status_rsp
, &mgmt_err
);
6806 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6808 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6809 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6812 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6813 settings_rsp
, &match
);
6816 new_settings(hdev
, match
.sk
);
6822 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
6824 struct cmd_lookup
*match
= data
;
6826 if (match
->sk
== NULL
) {
6827 match
->sk
= cmd
->sk
;
6828 sock_hold(match
->sk
);
6832 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
6835 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
6837 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
6838 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
6839 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
6842 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
6849 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
6851 struct mgmt_cp_set_local_name ev
;
6852 struct pending_cmd
*cmd
;
6857 memset(&ev
, 0, sizeof(ev
));
6858 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
6859 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
6861 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
6863 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
6865 /* If this is a HCI command related to powering on the
6866 * HCI dev don't send any mgmt signals.
6868 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
6872 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
6873 cmd
? cmd
->sk
: NULL
);
6876 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
6877 u8
*rand192
, u8
*hash256
, u8
*rand256
,
6880 struct pending_cmd
*cmd
;
6882 BT_DBG("%s status %u", hdev
->name
, status
);
6884 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
6889 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
6890 mgmt_status(status
));
6892 if (bredr_sc_enabled(hdev
) && hash256
&& rand256
) {
6893 struct mgmt_rp_read_local_oob_ext_data rp
;
6895 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
6896 memcpy(rp
.rand192
, rand192
, sizeof(rp
.rand192
));
6898 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
6899 memcpy(rp
.rand256
, rand256
, sizeof(rp
.rand256
));
6901 cmd_complete(cmd
->sk
, hdev
->id
,
6902 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6905 struct mgmt_rp_read_local_oob_data rp
;
6907 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
6908 memcpy(rp
.rand
, rand192
, sizeof(rp
.rand
));
6910 cmd_complete(cmd
->sk
, hdev
->id
,
6911 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6916 mgmt_pending_remove(cmd
);
6919 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
6923 for (i
= 0; i
< uuid_count
; i
++) {
6924 if (!memcmp(uuid
, uuids
[i
], 16))
6931 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
6935 while (parsed
< eir_len
) {
6936 u8 field_len
= eir
[0];
6943 if (eir_len
- parsed
< field_len
+ 1)
6947 case EIR_UUID16_ALL
:
6948 case EIR_UUID16_SOME
:
6949 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
6950 memcpy(uuid
, bluetooth_base_uuid
, 16);
6951 uuid
[13] = eir
[i
+ 3];
6952 uuid
[12] = eir
[i
+ 2];
6953 if (has_uuid(uuid
, uuid_count
, uuids
))
6957 case EIR_UUID32_ALL
:
6958 case EIR_UUID32_SOME
:
6959 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
6960 memcpy(uuid
, bluetooth_base_uuid
, 16);
6961 uuid
[15] = eir
[i
+ 5];
6962 uuid
[14] = eir
[i
+ 4];
6963 uuid
[13] = eir
[i
+ 3];
6964 uuid
[12] = eir
[i
+ 2];
6965 if (has_uuid(uuid
, uuid_count
, uuids
))
6969 case EIR_UUID128_ALL
:
6970 case EIR_UUID128_SOME
:
6971 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
6972 memcpy(uuid
, eir
+ i
+ 2, 16);
6973 if (has_uuid(uuid
, uuid_count
, uuids
))
6979 parsed
+= field_len
+ 1;
6980 eir
+= field_len
+ 1;
6986 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6987 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
6988 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
6991 struct mgmt_ev_device_found
*ev
= (void *) buf
;
6995 /* Don't send events for a non-kernel initiated discovery. With
6996 * LE one exception is if we have pend_le_reports > 0 in which
6997 * case we're doing passive scanning and want these events.
6999 if (!hci_discovery_active(hdev
)) {
7000 if (link_type
== ACL_LINK
)
7002 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
7006 /* When using service discovery with a RSSI threshold, then check
7007 * if such a RSSI threshold is specified. If a RSSI threshold has
7008 * been specified, then all results with a RSSI smaller than the
7009 * RSSI threshold will be dropped.
7011 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7012 * the results are also dropped.
7014 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
7015 (rssi
< hdev
->discovery
.rssi
|| rssi
== HCI_RSSI_INVALID
))
7018 /* Make sure that the buffer is big enough. The 5 extra bytes
7019 * are for the potential CoD field.
7021 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
7024 memset(buf
, 0, sizeof(buf
));
7026 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7027 * RSSI value was reported as 0 when not available. This behavior
7028 * is kept when using device discovery. This is required for full
7029 * backwards compatibility with the API.
7031 * However when using service discovery, the value 127 will be
7032 * returned when the RSSI is not available.
7034 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
)
7037 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7038 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7040 ev
->flags
= cpu_to_le32(flags
);
7043 /* When using service discovery and a list of UUID is
7044 * provided, results with no matching UUID should be
7045 * dropped. In case there is a match the result is
7046 * kept and checking possible scan response data
7049 if (hdev
->discovery
.uuid_count
> 0) {
7050 match
= eir_has_uuids(eir
, eir_len
,
7051 hdev
->discovery
.uuid_count
,
7052 hdev
->discovery
.uuids
);
7057 /* Copy EIR or advertising data into event */
7058 memcpy(ev
->eir
, eir
, eir_len
);
7060 /* When using service discovery and a list of UUID is
7061 * provided, results with empty EIR or advertising data
7062 * should be dropped since they do not match any UUID.
7064 if (hdev
->discovery
.uuid_count
> 0)
7068 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
7069 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
7072 if (scan_rsp_len
> 0) {
7073 /* When using service discovery and a list of UUID is
7074 * provided, results with no matching UUID should be
7075 * dropped if there is no previous match from the
7078 if (hdev
->discovery
.uuid_count
> 0) {
7079 if (!match
&& !eir_has_uuids(scan_rsp
, scan_rsp_len
,
7080 hdev
->discovery
.uuid_count
,
7081 hdev
->discovery
.uuids
))
7085 /* Append scan response data to event */
7086 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
7088 /* When using service discovery and a list of UUID is
7089 * provided, results with empty scan response and no
7090 * previous matched advertising data should be dropped.
7092 if (hdev
->discovery
.uuid_count
> 0 && !match
)
7096 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
7097 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
7099 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
7102 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7103 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
7105 struct mgmt_ev_device_found
*ev
;
7106 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
7109 ev
= (struct mgmt_ev_device_found
*) buf
;
7111 memset(buf
, 0, sizeof(buf
));
7113 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
7114 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7117 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
7120 ev
->eir_len
= cpu_to_le16(eir_len
);
7122 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
7125 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
7127 struct mgmt_ev_discovering ev
;
7129 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
7131 memset(&ev
, 0, sizeof(ev
));
7132 ev
.type
= hdev
->discovery
.type
;
7133 ev
.discovering
= discovering
;
7135 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
7138 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
7140 BT_DBG("%s status %u", hdev
->name
, status
);
7143 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
7145 struct hci_request req
;
7147 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
7150 hci_req_init(&req
, hdev
);
7151 enable_advertising(&req
);
7152 hci_req_run(&req
, adv_enable_complete
);