2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 6
40 static const u16 mgmt_commands
[] = {
41 MGMT_OP_READ_INDEX_LIST
,
44 MGMT_OP_SET_DISCOVERABLE
,
45 MGMT_OP_SET_CONNECTABLE
,
46 MGMT_OP_SET_FAST_CONNECTABLE
,
48 MGMT_OP_SET_LINK_SECURITY
,
52 MGMT_OP_SET_DEV_CLASS
,
53 MGMT_OP_SET_LOCAL_NAME
,
56 MGMT_OP_LOAD_LINK_KEYS
,
57 MGMT_OP_LOAD_LONG_TERM_KEYS
,
59 MGMT_OP_GET_CONNECTIONS
,
60 MGMT_OP_PIN_CODE_REPLY
,
61 MGMT_OP_PIN_CODE_NEG_REPLY
,
62 MGMT_OP_SET_IO_CAPABILITY
,
64 MGMT_OP_CANCEL_PAIR_DEVICE
,
65 MGMT_OP_UNPAIR_DEVICE
,
66 MGMT_OP_USER_CONFIRM_REPLY
,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
68 MGMT_OP_USER_PASSKEY_REPLY
,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
70 MGMT_OP_READ_LOCAL_OOB_DATA
,
71 MGMT_OP_ADD_REMOTE_OOB_DATA
,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
73 MGMT_OP_START_DISCOVERY
,
74 MGMT_OP_STOP_DISCOVERY
,
77 MGMT_OP_UNBLOCK_DEVICE
,
78 MGMT_OP_SET_DEVICE_ID
,
79 MGMT_OP_SET_ADVERTISING
,
81 MGMT_OP_SET_STATIC_ADDRESS
,
82 MGMT_OP_SET_SCAN_PARAMS
,
83 MGMT_OP_SET_SECURE_CONN
,
84 MGMT_OP_SET_DEBUG_KEYS
,
87 MGMT_OP_GET_CONN_INFO
,
90 static const u16 mgmt_events
[] = {
91 MGMT_EV_CONTROLLER_ERROR
,
93 MGMT_EV_INDEX_REMOVED
,
95 MGMT_EV_CLASS_OF_DEV_CHANGED
,
96 MGMT_EV_LOCAL_NAME_CHANGED
,
98 MGMT_EV_NEW_LONG_TERM_KEY
,
99 MGMT_EV_DEVICE_CONNECTED
,
100 MGMT_EV_DEVICE_DISCONNECTED
,
101 MGMT_EV_CONNECT_FAILED
,
102 MGMT_EV_PIN_CODE_REQUEST
,
103 MGMT_EV_USER_CONFIRM_REQUEST
,
104 MGMT_EV_USER_PASSKEY_REQUEST
,
106 MGMT_EV_DEVICE_FOUND
,
108 MGMT_EV_DEVICE_BLOCKED
,
109 MGMT_EV_DEVICE_UNBLOCKED
,
110 MGMT_EV_DEVICE_UNPAIRED
,
111 MGMT_EV_PASSKEY_NOTIFY
,
116 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
118 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
119 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
122 struct list_head list
;
130 /* HCI to MGMT error code conversion table */
131 static u8 mgmt_status_table
[] = {
133 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
134 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
135 MGMT_STATUS_FAILED
, /* Hardware Failure */
136 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
137 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
138 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
139 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
140 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
141 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
142 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
143 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
144 MGMT_STATUS_BUSY
, /* Command Disallowed */
145 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
146 MGMT_STATUS_REJECTED
, /* Rejected Security */
147 MGMT_STATUS_REJECTED
, /* Rejected Personal */
148 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
149 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
150 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
151 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
152 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
153 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
154 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
155 MGMT_STATUS_BUSY
, /* Repeated Attempts */
156 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
157 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
158 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
159 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
160 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
161 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
162 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
163 MGMT_STATUS_FAILED
, /* Unspecified Error */
164 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
165 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
166 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
167 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
168 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
169 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
170 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
171 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
172 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
173 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
174 MGMT_STATUS_FAILED
, /* Transaction Collision */
175 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
176 MGMT_STATUS_REJECTED
, /* QoS Rejected */
177 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
178 MGMT_STATUS_REJECTED
, /* Insufficient Security */
179 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
180 MGMT_STATUS_BUSY
, /* Role Switch Pending */
181 MGMT_STATUS_FAILED
, /* Slot Violation */
182 MGMT_STATUS_FAILED
, /* Role Switch Failed */
183 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
184 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
185 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
186 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
187 MGMT_STATUS_BUSY
, /* Controller Busy */
188 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
189 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
190 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
191 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
192 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
195 static u8
mgmt_status(u8 hci_status
)
197 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
198 return mgmt_status_table
[hci_status
];
200 return MGMT_STATUS_FAILED
;
203 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
206 struct mgmt_hdr
*hdr
;
207 struct mgmt_ev_cmd_status
*ev
;
210 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
212 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
216 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
218 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
219 hdr
->index
= cpu_to_le16(index
);
220 hdr
->len
= cpu_to_le16(sizeof(*ev
));
222 ev
= (void *) skb_put(skb
, sizeof(*ev
));
224 ev
->opcode
= cpu_to_le16(cmd
);
226 err
= sock_queue_rcv_skb(sk
, skb
);
233 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
234 void *rp
, size_t rp_len
)
237 struct mgmt_hdr
*hdr
;
238 struct mgmt_ev_cmd_complete
*ev
;
241 BT_DBG("sock %p", sk
);
243 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
247 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
249 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
250 hdr
->index
= cpu_to_le16(index
);
251 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
253 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
254 ev
->opcode
= cpu_to_le16(cmd
);
258 memcpy(ev
->data
, rp
, rp_len
);
260 err
= sock_queue_rcv_skb(sk
, skb
);
267 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
270 struct mgmt_rp_read_version rp
;
272 BT_DBG("sock %p", sk
);
274 rp
.version
= MGMT_VERSION
;
275 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
277 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
281 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
284 struct mgmt_rp_read_commands
*rp
;
285 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
286 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
291 BT_DBG("sock %p", sk
);
293 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
295 rp
= kmalloc(rp_size
, GFP_KERNEL
);
299 rp
->num_commands
= cpu_to_le16(num_commands
);
300 rp
->num_events
= cpu_to_le16(num_events
);
302 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
303 put_unaligned_le16(mgmt_commands
[i
], opcode
);
305 for (i
= 0; i
< num_events
; i
++, opcode
++)
306 put_unaligned_le16(mgmt_events
[i
], opcode
);
308 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
315 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
318 struct mgmt_rp_read_index_list
*rp
;
324 BT_DBG("sock %p", sk
);
326 read_lock(&hci_dev_list_lock
);
329 list_for_each_entry(d
, &hci_dev_list
, list
) {
330 if (d
->dev_type
== HCI_BREDR
)
334 rp_len
= sizeof(*rp
) + (2 * count
);
335 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
337 read_unlock(&hci_dev_list_lock
);
342 list_for_each_entry(d
, &hci_dev_list
, list
) {
343 if (test_bit(HCI_SETUP
, &d
->dev_flags
))
346 if (test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
349 if (d
->dev_type
== HCI_BREDR
) {
350 rp
->index
[count
++] = cpu_to_le16(d
->id
);
351 BT_DBG("Added hci%u", d
->id
);
355 rp
->num_controllers
= cpu_to_le16(count
);
356 rp_len
= sizeof(*rp
) + (2 * count
);
358 read_unlock(&hci_dev_list_lock
);
360 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
368 static u32
get_supported_settings(struct hci_dev
*hdev
)
372 settings
|= MGMT_SETTING_POWERED
;
373 settings
|= MGMT_SETTING_PAIRABLE
;
374 settings
|= MGMT_SETTING_DEBUG_KEYS
;
376 if (lmp_bredr_capable(hdev
)) {
377 settings
|= MGMT_SETTING_CONNECTABLE
;
378 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
379 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
380 settings
|= MGMT_SETTING_DISCOVERABLE
;
381 settings
|= MGMT_SETTING_BREDR
;
382 settings
|= MGMT_SETTING_LINK_SECURITY
;
384 if (lmp_ssp_capable(hdev
)) {
385 settings
|= MGMT_SETTING_SSP
;
386 settings
|= MGMT_SETTING_HS
;
389 if (lmp_sc_capable(hdev
) ||
390 test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
391 settings
|= MGMT_SETTING_SECURE_CONN
;
394 if (lmp_le_capable(hdev
)) {
395 settings
|= MGMT_SETTING_LE
;
396 settings
|= MGMT_SETTING_ADVERTISING
;
397 settings
|= MGMT_SETTING_PRIVACY
;
403 static u32
get_current_settings(struct hci_dev
*hdev
)
407 if (hdev_is_powered(hdev
))
408 settings
|= MGMT_SETTING_POWERED
;
410 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
411 settings
|= MGMT_SETTING_CONNECTABLE
;
413 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
414 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
416 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
417 settings
|= MGMT_SETTING_DISCOVERABLE
;
419 if (test_bit(HCI_PAIRABLE
, &hdev
->dev_flags
))
420 settings
|= MGMT_SETTING_PAIRABLE
;
422 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
423 settings
|= MGMT_SETTING_BREDR
;
425 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
426 settings
|= MGMT_SETTING_LE
;
428 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
429 settings
|= MGMT_SETTING_LINK_SECURITY
;
431 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
432 settings
|= MGMT_SETTING_SSP
;
434 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
435 settings
|= MGMT_SETTING_HS
;
437 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
438 settings
|= MGMT_SETTING_ADVERTISING
;
440 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
441 settings
|= MGMT_SETTING_SECURE_CONN
;
443 if (test_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
))
444 settings
|= MGMT_SETTING_DEBUG_KEYS
;
446 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
447 settings
|= MGMT_SETTING_PRIVACY
;
452 #define PNP_INFO_SVCLASS_ID 0x1200
454 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
456 u8
*ptr
= data
, *uuids_start
= NULL
;
457 struct bt_uuid
*uuid
;
462 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
465 if (uuid
->size
!= 16)
468 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
472 if (uuid16
== PNP_INFO_SVCLASS_ID
)
478 uuids_start
[1] = EIR_UUID16_ALL
;
482 /* Stop if not enough space to put next UUID */
483 if ((ptr
- data
) + sizeof(u16
) > len
) {
484 uuids_start
[1] = EIR_UUID16_SOME
;
488 *ptr
++ = (uuid16
& 0x00ff);
489 *ptr
++ = (uuid16
& 0xff00) >> 8;
490 uuids_start
[0] += sizeof(uuid16
);
496 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
498 u8
*ptr
= data
, *uuids_start
= NULL
;
499 struct bt_uuid
*uuid
;
504 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
505 if (uuid
->size
!= 32)
511 uuids_start
[1] = EIR_UUID32_ALL
;
515 /* Stop if not enough space to put next UUID */
516 if ((ptr
- data
) + sizeof(u32
) > len
) {
517 uuids_start
[1] = EIR_UUID32_SOME
;
521 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
523 uuids_start
[0] += sizeof(u32
);
529 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
531 u8
*ptr
= data
, *uuids_start
= NULL
;
532 struct bt_uuid
*uuid
;
537 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
538 if (uuid
->size
!= 128)
544 uuids_start
[1] = EIR_UUID128_ALL
;
548 /* Stop if not enough space to put next UUID */
549 if ((ptr
- data
) + 16 > len
) {
550 uuids_start
[1] = EIR_UUID128_SOME
;
554 memcpy(ptr
, uuid
->uuid
, 16);
556 uuids_start
[0] += 16;
562 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
564 struct pending_cmd
*cmd
;
566 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
567 if (cmd
->opcode
== opcode
)
574 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
579 name_len
= strlen(hdev
->dev_name
);
581 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
583 if (name_len
> max_len
) {
585 ptr
[1] = EIR_NAME_SHORT
;
587 ptr
[1] = EIR_NAME_COMPLETE
;
589 ptr
[0] = name_len
+ 1;
591 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
593 ad_len
+= (name_len
+ 2);
594 ptr
+= (name_len
+ 2);
600 static void update_scan_rsp_data(struct hci_request
*req
)
602 struct hci_dev
*hdev
= req
->hdev
;
603 struct hci_cp_le_set_scan_rsp_data cp
;
606 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
609 memset(&cp
, 0, sizeof(cp
));
611 len
= create_scan_rsp_data(hdev
, cp
.data
);
613 if (hdev
->scan_rsp_data_len
== len
&&
614 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
617 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
618 hdev
->scan_rsp_data_len
= len
;
622 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
625 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
627 struct pending_cmd
*cmd
;
629 /* If there's a pending mgmt command the flags will not yet have
630 * their final values, so check for this first.
632 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
634 struct mgmt_mode
*cp
= cmd
->param
;
636 return LE_AD_GENERAL
;
637 else if (cp
->val
== 0x02)
638 return LE_AD_LIMITED
;
640 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
641 return LE_AD_LIMITED
;
642 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
643 return LE_AD_GENERAL
;
649 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
651 u8 ad_len
= 0, flags
= 0;
653 flags
|= get_adv_discov_flags(hdev
);
655 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
656 flags
|= LE_AD_NO_BREDR
;
659 BT_DBG("adv flags 0x%02x", flags
);
669 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
671 ptr
[1] = EIR_TX_POWER
;
672 ptr
[2] = (u8
) hdev
->adv_tx_power
;
681 static void update_adv_data(struct hci_request
*req
)
683 struct hci_dev
*hdev
= req
->hdev
;
684 struct hci_cp_le_set_adv_data cp
;
687 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
690 memset(&cp
, 0, sizeof(cp
));
692 len
= create_adv_data(hdev
, cp
.data
);
694 if (hdev
->adv_data_len
== len
&&
695 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
698 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
699 hdev
->adv_data_len
= len
;
703 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
706 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
711 name_len
= strlen(hdev
->dev_name
);
717 ptr
[1] = EIR_NAME_SHORT
;
719 ptr
[1] = EIR_NAME_COMPLETE
;
721 /* EIR Data length */
722 ptr
[0] = name_len
+ 1;
724 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
726 ptr
+= (name_len
+ 2);
729 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
731 ptr
[1] = EIR_TX_POWER
;
732 ptr
[2] = (u8
) hdev
->inq_tx_power
;
737 if (hdev
->devid_source
> 0) {
739 ptr
[1] = EIR_DEVICE_ID
;
741 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
742 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
743 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
744 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
749 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
750 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
751 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
754 static void update_eir(struct hci_request
*req
)
756 struct hci_dev
*hdev
= req
->hdev
;
757 struct hci_cp_write_eir cp
;
759 if (!hdev_is_powered(hdev
))
762 if (!lmp_ext_inq_capable(hdev
))
765 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
768 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
771 memset(&cp
, 0, sizeof(cp
));
773 create_eir(hdev
, cp
.data
);
775 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
778 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
780 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
783 static u8
get_service_classes(struct hci_dev
*hdev
)
785 struct bt_uuid
*uuid
;
788 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
789 val
|= uuid
->svc_hint
;
794 static void update_class(struct hci_request
*req
)
796 struct hci_dev
*hdev
= req
->hdev
;
799 BT_DBG("%s", hdev
->name
);
801 if (!hdev_is_powered(hdev
))
804 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
807 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
810 cod
[0] = hdev
->minor_class
;
811 cod
[1] = hdev
->major_class
;
812 cod
[2] = get_service_classes(hdev
);
814 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
817 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
820 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
823 static bool get_connectable(struct hci_dev
*hdev
)
825 struct pending_cmd
*cmd
;
827 /* If there's a pending mgmt command the flag will not yet have
828 * it's final value, so check for this first.
830 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
832 struct mgmt_mode
*cp
= cmd
->param
;
836 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
839 static void enable_advertising(struct hci_request
*req
)
841 struct hci_dev
*hdev
= req
->hdev
;
842 struct hci_cp_le_set_adv_param cp
;
843 u8 own_addr_type
, enable
= 0x01;
846 /* Clear the HCI_ADVERTISING bit temporarily so that the
847 * hci_update_random_address knows that it's safe to go ahead
848 * and write a new random address. The flag will be set back on
849 * as soon as the SET_ADV_ENABLE HCI command completes.
851 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
853 connectable
= get_connectable(hdev
);
855 /* Set require_privacy to true only when non-connectable
856 * advertising is used. In that case it is fine to use a
857 * non-resolvable private address.
859 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
862 memset(&cp
, 0, sizeof(cp
));
863 cp
.min_interval
= cpu_to_le16(0x0800);
864 cp
.max_interval
= cpu_to_le16(0x0800);
865 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
866 cp
.own_address_type
= own_addr_type
;
867 cp
.channel_map
= hdev
->le_adv_channel_map
;
869 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
871 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
874 static void disable_advertising(struct hci_request
*req
)
878 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
881 static void service_cache_off(struct work_struct
*work
)
883 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
885 struct hci_request req
;
887 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
890 hci_req_init(&req
, hdev
);
897 hci_dev_unlock(hdev
);
899 hci_req_run(&req
, NULL
);
902 static void rpa_expired(struct work_struct
*work
)
904 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
906 struct hci_request req
;
910 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
912 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) ||
913 hci_conn_num(hdev
, LE_LINK
) > 0)
916 /* The generation of a new RPA and programming it into the
917 * controller happens in the enable_advertising() function.
920 hci_req_init(&req
, hdev
);
922 disable_advertising(&req
);
923 enable_advertising(&req
);
925 hci_req_run(&req
, NULL
);
928 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
930 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
933 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
934 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
936 /* Non-mgmt controlled devices get this bit set
937 * implicitly so that pairing works for them, however
938 * for mgmt we require user-space to explicitly enable
941 clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
944 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
945 void *data
, u16 data_len
)
947 struct mgmt_rp_read_info rp
;
949 BT_DBG("sock %p %s", sk
, hdev
->name
);
953 memset(&rp
, 0, sizeof(rp
));
955 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
957 rp
.version
= hdev
->hci_ver
;
958 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
960 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
961 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
963 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
965 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
966 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
968 hci_dev_unlock(hdev
);
970 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
974 static void mgmt_pending_free(struct pending_cmd
*cmd
)
981 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
982 struct hci_dev
*hdev
, void *data
,
985 struct pending_cmd
*cmd
;
987 cmd
= kmalloc(sizeof(*cmd
), GFP_KERNEL
);
991 cmd
->opcode
= opcode
;
992 cmd
->index
= hdev
->id
;
994 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
1001 memcpy(cmd
->param
, data
, len
);
1006 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1011 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1012 void (*cb
)(struct pending_cmd
*cmd
,
1016 struct pending_cmd
*cmd
, *tmp
;
1018 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1019 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1026 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1028 list_del(&cmd
->list
);
1029 mgmt_pending_free(cmd
);
1032 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1034 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1036 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1040 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1042 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1044 if (hci_conn_count(hdev
) == 0) {
1045 cancel_delayed_work(&hdev
->power_off
);
1046 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1050 static void hci_stop_discovery(struct hci_request
*req
)
1052 struct hci_dev
*hdev
= req
->hdev
;
1053 struct hci_cp_remote_name_req_cancel cp
;
1054 struct inquiry_entry
*e
;
1056 switch (hdev
->discovery
.state
) {
1057 case DISCOVERY_FINDING
:
1058 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1059 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1061 cancel_delayed_work(&hdev
->le_scan_disable
);
1062 hci_req_add_le_scan_disable(req
);
1067 case DISCOVERY_RESOLVING
:
1068 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1073 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1074 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1082 hci_req_add_le_scan_disable(req
);
1087 static int clean_up_hci_state(struct hci_dev
*hdev
)
1089 struct hci_request req
;
1090 struct hci_conn
*conn
;
1092 hci_req_init(&req
, hdev
);
1094 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1095 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1097 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1100 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1101 disable_advertising(&req
);
1103 hci_stop_discovery(&req
);
1105 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1106 struct hci_cp_disconnect dc
;
1107 struct hci_cp_reject_conn_req rej
;
1109 switch (conn
->state
) {
1112 dc
.handle
= cpu_to_le16(conn
->handle
);
1113 dc
.reason
= 0x15; /* Terminated due to Power Off */
1114 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1117 if (conn
->type
== LE_LINK
)
1118 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1120 else if (conn
->type
== ACL_LINK
)
1121 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1125 bacpy(&rej
.bdaddr
, &conn
->dst
);
1126 rej
.reason
= 0x15; /* Terminated due to Power Off */
1127 if (conn
->type
== ACL_LINK
)
1128 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1130 else if (conn
->type
== SCO_LINK
)
1131 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1137 return hci_req_run(&req
, clean_up_hci_complete
);
1140 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1143 struct mgmt_mode
*cp
= data
;
1144 struct pending_cmd
*cmd
;
1147 BT_DBG("request for %s", hdev
->name
);
1149 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1150 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1151 MGMT_STATUS_INVALID_PARAMS
);
1155 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1156 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1161 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1162 cancel_delayed_work(&hdev
->power_off
);
1165 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1167 err
= mgmt_powered(hdev
, 1);
1172 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1173 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1177 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1184 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1187 /* Disconnect connections, stop scans, etc */
1188 err
= clean_up_hci_state(hdev
);
1190 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1191 HCI_POWER_OFF_TIMEOUT
);
1193 /* ENODATA means there were no HCI commands queued */
1194 if (err
== -ENODATA
) {
1195 cancel_delayed_work(&hdev
->power_off
);
1196 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1202 hci_dev_unlock(hdev
);
1206 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
1207 struct sock
*skip_sk
)
1209 struct sk_buff
*skb
;
1210 struct mgmt_hdr
*hdr
;
1212 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
1216 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
1217 hdr
->opcode
= cpu_to_le16(event
);
1219 hdr
->index
= cpu_to_le16(hdev
->id
);
1221 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
1222 hdr
->len
= cpu_to_le16(data_len
);
1225 memcpy(skb_put(skb
, data_len
), data
, data_len
);
1228 __net_timestamp(skb
);
1230 hci_send_to_control(skb
, skip_sk
);
1236 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1240 ev
= cpu_to_le32(get_current_settings(hdev
));
1242 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1247 struct hci_dev
*hdev
;
1251 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1253 struct cmd_lookup
*match
= data
;
1255 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1257 list_del(&cmd
->list
);
1259 if (match
->sk
== NULL
) {
1260 match
->sk
= cmd
->sk
;
1261 sock_hold(match
->sk
);
1264 mgmt_pending_free(cmd
);
1267 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1271 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1272 mgmt_pending_remove(cmd
);
1275 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1277 if (!lmp_bredr_capable(hdev
))
1278 return MGMT_STATUS_NOT_SUPPORTED
;
1279 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1280 return MGMT_STATUS_REJECTED
;
1282 return MGMT_STATUS_SUCCESS
;
1285 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1287 if (!lmp_le_capable(hdev
))
1288 return MGMT_STATUS_NOT_SUPPORTED
;
1289 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1290 return MGMT_STATUS_REJECTED
;
1292 return MGMT_STATUS_SUCCESS
;
1295 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1297 struct pending_cmd
*cmd
;
1298 struct mgmt_mode
*cp
;
1299 struct hci_request req
;
1302 BT_DBG("status 0x%02x", status
);
1306 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1311 u8 mgmt_err
= mgmt_status(status
);
1312 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1313 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1319 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1322 if (hdev
->discov_timeout
> 0) {
1323 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1324 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1328 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1332 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1335 new_settings(hdev
, cmd
->sk
);
1337 /* When the discoverable mode gets changed, make sure
1338 * that class of device has the limited discoverable
1339 * bit correctly set.
1341 hci_req_init(&req
, hdev
);
1343 hci_req_run(&req
, NULL
);
1346 mgmt_pending_remove(cmd
);
1349 hci_dev_unlock(hdev
);
1352 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1355 struct mgmt_cp_set_discoverable
*cp
= data
;
1356 struct pending_cmd
*cmd
;
1357 struct hci_request req
;
1362 BT_DBG("request for %s", hdev
->name
);
1364 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1365 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1366 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1367 MGMT_STATUS_REJECTED
);
1369 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1370 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1371 MGMT_STATUS_INVALID_PARAMS
);
1373 timeout
= __le16_to_cpu(cp
->timeout
);
1375 /* Disabling discoverable requires that no timeout is set,
1376 * and enabling limited discoverable requires a timeout.
1378 if ((cp
->val
== 0x00 && timeout
> 0) ||
1379 (cp
->val
== 0x02 && timeout
== 0))
1380 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1381 MGMT_STATUS_INVALID_PARAMS
);
1385 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1386 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1387 MGMT_STATUS_NOT_POWERED
);
1391 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1392 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1393 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1398 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1399 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1400 MGMT_STATUS_REJECTED
);
1404 if (!hdev_is_powered(hdev
)) {
1405 bool changed
= false;
1407 /* Setting limited discoverable when powered off is
1408 * not a valid operation since it requires a timeout
1409 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1411 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1412 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1416 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1421 err
= new_settings(hdev
, sk
);
1426 /* If the current mode is the same, then just update the timeout
1427 * value with the new value. And if only the timeout gets updated,
1428 * then no need for any HCI transactions.
1430 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1431 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1432 &hdev
->dev_flags
)) {
1433 cancel_delayed_work(&hdev
->discov_off
);
1434 hdev
->discov_timeout
= timeout
;
1436 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1437 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1438 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1442 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1446 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1452 /* Cancel any potential discoverable timeout that might be
1453 * still active and store new timeout value. The arming of
1454 * the timeout happens in the complete handler.
1456 cancel_delayed_work(&hdev
->discov_off
);
1457 hdev
->discov_timeout
= timeout
;
1459 /* Limited discoverable mode */
1460 if (cp
->val
== 0x02)
1461 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1463 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1465 hci_req_init(&req
, hdev
);
1467 /* The procedure for LE-only controllers is much simpler - just
1468 * update the advertising data.
1470 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1476 struct hci_cp_write_current_iac_lap hci_cp
;
1478 if (cp
->val
== 0x02) {
1479 /* Limited discoverable mode */
1480 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1481 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1482 hci_cp
.iac_lap
[1] = 0x8b;
1483 hci_cp
.iac_lap
[2] = 0x9e;
1484 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1485 hci_cp
.iac_lap
[4] = 0x8b;
1486 hci_cp
.iac_lap
[5] = 0x9e;
1488 /* General discoverable mode */
1490 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1491 hci_cp
.iac_lap
[1] = 0x8b;
1492 hci_cp
.iac_lap
[2] = 0x9e;
1495 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1496 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1498 scan
|= SCAN_INQUIRY
;
1500 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1503 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1506 update_adv_data(&req
);
1508 err
= hci_req_run(&req
, set_discoverable_complete
);
1510 mgmt_pending_remove(cmd
);
1513 hci_dev_unlock(hdev
);
1517 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1519 struct hci_dev
*hdev
= req
->hdev
;
1520 struct hci_cp_write_page_scan_activity acp
;
1523 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1526 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1530 type
= PAGE_SCAN_TYPE_INTERLACED
;
1532 /* 160 msec page scan interval */
1533 acp
.interval
= cpu_to_le16(0x0100);
1535 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1537 /* default 1.28 sec page scan */
1538 acp
.interval
= cpu_to_le16(0x0800);
1541 acp
.window
= cpu_to_le16(0x0012);
1543 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1544 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1545 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1548 if (hdev
->page_scan_type
!= type
)
1549 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1552 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1554 struct pending_cmd
*cmd
;
1555 struct mgmt_mode
*cp
;
1558 BT_DBG("status 0x%02x", status
);
1562 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1567 u8 mgmt_err
= mgmt_status(status
);
1568 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1574 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1576 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1578 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1581 new_settings(hdev
, cmd
->sk
);
1584 mgmt_pending_remove(cmd
);
1587 hci_dev_unlock(hdev
);
1590 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1591 struct sock
*sk
, u8 val
)
1593 bool changed
= false;
1596 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1600 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1602 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1603 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1606 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1611 return new_settings(hdev
, sk
);
1616 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1619 struct mgmt_mode
*cp
= data
;
1620 struct pending_cmd
*cmd
;
1621 struct hci_request req
;
1625 BT_DBG("request for %s", hdev
->name
);
1627 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1628 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1629 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1630 MGMT_STATUS_REJECTED
);
1632 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1633 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1634 MGMT_STATUS_INVALID_PARAMS
);
1638 if (!hdev_is_powered(hdev
)) {
1639 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1643 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1644 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1645 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1650 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1656 hci_req_init(&req
, hdev
);
1658 /* If BR/EDR is not enabled and we disable advertising as a
1659 * by-product of disabling connectable, we need to update the
1660 * advertising flags.
1662 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1664 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1665 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1667 update_adv_data(&req
);
1668 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1674 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1675 hdev
->discov_timeout
> 0)
1676 cancel_delayed_work(&hdev
->discov_off
);
1679 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1682 /* If we're going from non-connectable to connectable or
1683 * vice-versa when fast connectable is enabled ensure that fast
1684 * connectable gets disabled. write_fast_connectable won't do
1685 * anything if the page scan parameters are already what they
1688 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1689 write_fast_connectable(&req
, false);
1691 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) &&
1692 hci_conn_num(hdev
, LE_LINK
) == 0) {
1693 disable_advertising(&req
);
1694 enable_advertising(&req
);
1697 err
= hci_req_run(&req
, set_connectable_complete
);
1699 mgmt_pending_remove(cmd
);
1700 if (err
== -ENODATA
)
1701 err
= set_connectable_update_settings(hdev
, sk
,
1707 hci_dev_unlock(hdev
);
1711 static int set_pairable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1714 struct mgmt_mode
*cp
= data
;
1718 BT_DBG("request for %s", hdev
->name
);
1720 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1721 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PAIRABLE
,
1722 MGMT_STATUS_INVALID_PARAMS
);
1727 changed
= !test_and_set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1729 changed
= test_and_clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1731 err
= send_settings_rsp(sk
, MGMT_OP_SET_PAIRABLE
, hdev
);
1736 err
= new_settings(hdev
, sk
);
1739 hci_dev_unlock(hdev
);
1743 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1746 struct mgmt_mode
*cp
= data
;
1747 struct pending_cmd
*cmd
;
1751 BT_DBG("request for %s", hdev
->name
);
1753 status
= mgmt_bredr_support(hdev
);
1755 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1758 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1759 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1760 MGMT_STATUS_INVALID_PARAMS
);
1764 if (!hdev_is_powered(hdev
)) {
1765 bool changed
= false;
1767 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1768 &hdev
->dev_flags
)) {
1769 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1773 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1778 err
= new_settings(hdev
, sk
);
1783 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1784 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1791 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1792 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1796 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1802 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1804 mgmt_pending_remove(cmd
);
1809 hci_dev_unlock(hdev
);
1813 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1815 struct mgmt_mode
*cp
= data
;
1816 struct pending_cmd
*cmd
;
1820 BT_DBG("request for %s", hdev
->name
);
1822 status
= mgmt_bredr_support(hdev
);
1824 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1826 if (!lmp_ssp_capable(hdev
))
1827 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1828 MGMT_STATUS_NOT_SUPPORTED
);
1830 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1831 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1832 MGMT_STATUS_INVALID_PARAMS
);
1836 if (!hdev_is_powered(hdev
)) {
1840 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
1843 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
1846 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
1849 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1852 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1857 err
= new_settings(hdev
, sk
);
1862 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
1863 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
1864 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1869 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1870 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1874 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1880 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1882 mgmt_pending_remove(cmd
);
1887 hci_dev_unlock(hdev
);
1891 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1893 struct mgmt_mode
*cp
= data
;
1898 BT_DBG("request for %s", hdev
->name
);
1900 status
= mgmt_bredr_support(hdev
);
1902 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1904 if (!lmp_ssp_capable(hdev
))
1905 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1906 MGMT_STATUS_NOT_SUPPORTED
);
1908 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
1909 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1910 MGMT_STATUS_REJECTED
);
1912 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1913 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1914 MGMT_STATUS_INVALID_PARAMS
);
1919 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1921 if (hdev_is_powered(hdev
)) {
1922 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1923 MGMT_STATUS_REJECTED
);
1927 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
1930 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1935 err
= new_settings(hdev
, sk
);
1938 hci_dev_unlock(hdev
);
1942 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
1944 struct cmd_lookup match
= { NULL
, hdev
};
1947 u8 mgmt_err
= mgmt_status(status
);
1949 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1954 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1956 new_settings(hdev
, match
.sk
);
1961 /* Make sure the controller has a good default for
1962 * advertising data. Restrict the update to when LE
1963 * has actually been enabled. During power on, the
1964 * update in powered_update_hci will take care of it.
1966 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1967 struct hci_request req
;
1971 hci_req_init(&req
, hdev
);
1972 update_adv_data(&req
);
1973 update_scan_rsp_data(&req
);
1974 hci_req_run(&req
, NULL
);
1976 hci_dev_unlock(hdev
);
1980 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1982 struct mgmt_mode
*cp
= data
;
1983 struct hci_cp_write_le_host_supported hci_cp
;
1984 struct pending_cmd
*cmd
;
1985 struct hci_request req
;
1989 BT_DBG("request for %s", hdev
->name
);
1991 if (!lmp_le_capable(hdev
))
1992 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1993 MGMT_STATUS_NOT_SUPPORTED
);
1995 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1996 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1997 MGMT_STATUS_INVALID_PARAMS
);
1999 /* LE-only devices do not allow toggling LE on/off */
2000 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2001 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2002 MGMT_STATUS_REJECTED
);
2007 enabled
= lmp_host_le_capable(hdev
);
2009 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2010 bool changed
= false;
2012 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2013 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2017 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2018 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2022 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2027 err
= new_settings(hdev
, sk
);
2032 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2033 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2034 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2039 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2045 hci_req_init(&req
, hdev
);
2047 memset(&hci_cp
, 0, sizeof(hci_cp
));
2051 hci_cp
.simul
= lmp_le_br_capable(hdev
);
2053 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
2054 disable_advertising(&req
);
2057 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2060 err
= hci_req_run(&req
, le_enable_complete
);
2062 mgmt_pending_remove(cmd
);
2065 hci_dev_unlock(hdev
);
2069 /* This is a helper function to test for pending mgmt commands that can
2070 * cause CoD or EIR HCI commands. We can only allow one such pending
2071 * mgmt command at a time since otherwise we cannot easily track what
2072 * the current values are, will be, and based on that calculate if a new
2073 * HCI command needs to be sent and if yes with what value.
2075 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2077 struct pending_cmd
*cmd
;
2079 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2080 switch (cmd
->opcode
) {
2081 case MGMT_OP_ADD_UUID
:
2082 case MGMT_OP_REMOVE_UUID
:
2083 case MGMT_OP_SET_DEV_CLASS
:
2084 case MGMT_OP_SET_POWERED
:
2092 static const u8 bluetooth_base_uuid
[] = {
2093 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2094 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2097 static u8
get_uuid_size(const u8
*uuid
)
2101 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2104 val
= get_unaligned_le32(&uuid
[12]);
2111 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2113 struct pending_cmd
*cmd
;
2117 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2121 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2122 hdev
->dev_class
, 3);
2124 mgmt_pending_remove(cmd
);
2127 hci_dev_unlock(hdev
);
2130 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2132 BT_DBG("status 0x%02x", status
);
2134 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2137 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2139 struct mgmt_cp_add_uuid
*cp
= data
;
2140 struct pending_cmd
*cmd
;
2141 struct hci_request req
;
2142 struct bt_uuid
*uuid
;
2145 BT_DBG("request for %s", hdev
->name
);
2149 if (pending_eir_or_class(hdev
)) {
2150 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2155 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2161 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2162 uuid
->svc_hint
= cp
->svc_hint
;
2163 uuid
->size
= get_uuid_size(cp
->uuid
);
2165 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2167 hci_req_init(&req
, hdev
);
2172 err
= hci_req_run(&req
, add_uuid_complete
);
2174 if (err
!= -ENODATA
)
2177 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2178 hdev
->dev_class
, 3);
2182 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2191 hci_dev_unlock(hdev
);
2195 static bool enable_service_cache(struct hci_dev
*hdev
)
2197 if (!hdev_is_powered(hdev
))
2200 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2201 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2209 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2211 BT_DBG("status 0x%02x", status
);
2213 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2216 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2219 struct mgmt_cp_remove_uuid
*cp
= data
;
2220 struct pending_cmd
*cmd
;
2221 struct bt_uuid
*match
, *tmp
;
2222 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2223 struct hci_request req
;
2226 BT_DBG("request for %s", hdev
->name
);
2230 if (pending_eir_or_class(hdev
)) {
2231 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2236 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2237 hci_uuids_clear(hdev
);
2239 if (enable_service_cache(hdev
)) {
2240 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2241 0, hdev
->dev_class
, 3);
2250 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2251 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2254 list_del(&match
->list
);
2260 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2261 MGMT_STATUS_INVALID_PARAMS
);
2266 hci_req_init(&req
, hdev
);
2271 err
= hci_req_run(&req
, remove_uuid_complete
);
2273 if (err
!= -ENODATA
)
2276 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2277 hdev
->dev_class
, 3);
2281 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2290 hci_dev_unlock(hdev
);
2294 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2296 BT_DBG("status 0x%02x", status
);
2298 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2301 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2304 struct mgmt_cp_set_dev_class
*cp
= data
;
2305 struct pending_cmd
*cmd
;
2306 struct hci_request req
;
2309 BT_DBG("request for %s", hdev
->name
);
2311 if (!lmp_bredr_capable(hdev
))
2312 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2313 MGMT_STATUS_NOT_SUPPORTED
);
2317 if (pending_eir_or_class(hdev
)) {
2318 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2323 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2324 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2325 MGMT_STATUS_INVALID_PARAMS
);
2329 hdev
->major_class
= cp
->major
;
2330 hdev
->minor_class
= cp
->minor
;
2332 if (!hdev_is_powered(hdev
)) {
2333 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2334 hdev
->dev_class
, 3);
2338 hci_req_init(&req
, hdev
);
2340 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2341 hci_dev_unlock(hdev
);
2342 cancel_delayed_work_sync(&hdev
->service_cache
);
2349 err
= hci_req_run(&req
, set_class_complete
);
2351 if (err
!= -ENODATA
)
2354 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2355 hdev
->dev_class
, 3);
2359 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2368 hci_dev_unlock(hdev
);
2372 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2375 struct mgmt_cp_load_link_keys
*cp
= data
;
2376 u16 key_count
, expected_len
;
2380 BT_DBG("request for %s", hdev
->name
);
2382 if (!lmp_bredr_capable(hdev
))
2383 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2384 MGMT_STATUS_NOT_SUPPORTED
);
2386 key_count
= __le16_to_cpu(cp
->key_count
);
2388 expected_len
= sizeof(*cp
) + key_count
*
2389 sizeof(struct mgmt_link_key_info
);
2390 if (expected_len
!= len
) {
2391 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2393 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2394 MGMT_STATUS_INVALID_PARAMS
);
2397 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2398 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2399 MGMT_STATUS_INVALID_PARAMS
);
2401 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2404 for (i
= 0; i
< key_count
; i
++) {
2405 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2407 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2408 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2409 MGMT_STATUS_INVALID_PARAMS
);
2414 hci_link_keys_clear(hdev
);
2417 changed
= !test_and_set_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
2419 changed
= test_and_clear_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
2422 new_settings(hdev
, NULL
);
2424 for (i
= 0; i
< key_count
; i
++) {
2425 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2427 hci_add_link_key(hdev
, NULL
, 0, &key
->addr
.bdaddr
, key
->val
,
2428 key
->type
, key
->pin_len
);
2431 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2433 hci_dev_unlock(hdev
);
2438 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2439 u8 addr_type
, struct sock
*skip_sk
)
2441 struct mgmt_ev_device_unpaired ev
;
2443 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2444 ev
.addr
.type
= addr_type
;
2446 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2450 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2453 struct mgmt_cp_unpair_device
*cp
= data
;
2454 struct mgmt_rp_unpair_device rp
;
2455 struct hci_cp_disconnect dc
;
2456 struct pending_cmd
*cmd
;
2457 struct hci_conn
*conn
;
2460 memset(&rp
, 0, sizeof(rp
));
2461 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2462 rp
.addr
.type
= cp
->addr
.type
;
2464 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2465 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2466 MGMT_STATUS_INVALID_PARAMS
,
2469 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2470 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2471 MGMT_STATUS_INVALID_PARAMS
,
2476 if (!hdev_is_powered(hdev
)) {
2477 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2478 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2482 if (cp
->addr
.type
== BDADDR_BREDR
) {
2483 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2487 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2488 addr_type
= ADDR_LE_DEV_PUBLIC
;
2490 addr_type
= ADDR_LE_DEV_RANDOM
;
2492 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2494 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2496 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2500 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2501 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2505 if (cp
->disconnect
) {
2506 if (cp
->addr
.type
== BDADDR_BREDR
)
2507 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2510 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2517 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2519 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2523 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2530 dc
.handle
= cpu_to_le16(conn
->handle
);
2531 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2532 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2534 mgmt_pending_remove(cmd
);
2537 hci_dev_unlock(hdev
);
2541 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2544 struct mgmt_cp_disconnect
*cp
= data
;
2545 struct mgmt_rp_disconnect rp
;
2546 struct hci_cp_disconnect dc
;
2547 struct pending_cmd
*cmd
;
2548 struct hci_conn
*conn
;
2553 memset(&rp
, 0, sizeof(rp
));
2554 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2555 rp
.addr
.type
= cp
->addr
.type
;
2557 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2558 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2559 MGMT_STATUS_INVALID_PARAMS
,
2564 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2565 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2566 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2570 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2571 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2572 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2576 if (cp
->addr
.type
== BDADDR_BREDR
)
2577 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2580 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2582 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2583 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2584 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2588 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2594 dc
.handle
= cpu_to_le16(conn
->handle
);
2595 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2597 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2599 mgmt_pending_remove(cmd
);
2602 hci_dev_unlock(hdev
);
2606 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2608 switch (link_type
) {
2610 switch (addr_type
) {
2611 case ADDR_LE_DEV_PUBLIC
:
2612 return BDADDR_LE_PUBLIC
;
2615 /* Fallback to LE Random address type */
2616 return BDADDR_LE_RANDOM
;
2620 /* Fallback to BR/EDR type */
2621 return BDADDR_BREDR
;
2625 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2628 struct mgmt_rp_get_connections
*rp
;
2638 if (!hdev_is_powered(hdev
)) {
2639 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2640 MGMT_STATUS_NOT_POWERED
);
2645 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2646 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2650 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2651 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2658 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2659 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2661 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2662 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2663 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2668 rp
->conn_count
= cpu_to_le16(i
);
2670 /* Recalculate length in case of filtered SCO connections, etc */
2671 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2673 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2679 hci_dev_unlock(hdev
);
2683 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2684 struct mgmt_cp_pin_code_neg_reply
*cp
)
2686 struct pending_cmd
*cmd
;
2689 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2694 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2695 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2697 mgmt_pending_remove(cmd
);
2702 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2705 struct hci_conn
*conn
;
2706 struct mgmt_cp_pin_code_reply
*cp
= data
;
2707 struct hci_cp_pin_code_reply reply
;
2708 struct pending_cmd
*cmd
;
2715 if (!hdev_is_powered(hdev
)) {
2716 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2717 MGMT_STATUS_NOT_POWERED
);
2721 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2723 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2724 MGMT_STATUS_NOT_CONNECTED
);
2728 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2729 struct mgmt_cp_pin_code_neg_reply ncp
;
2731 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2733 BT_ERR("PIN code is not 16 bytes long");
2735 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2737 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2738 MGMT_STATUS_INVALID_PARAMS
);
2743 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2749 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2750 reply
.pin_len
= cp
->pin_len
;
2751 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2753 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2755 mgmt_pending_remove(cmd
);
2758 hci_dev_unlock(hdev
);
2762 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2765 struct mgmt_cp_set_io_capability
*cp
= data
;
2771 hdev
->io_capability
= cp
->io_capability
;
2773 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2774 hdev
->io_capability
);
2776 hci_dev_unlock(hdev
);
2778 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
2782 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
2784 struct hci_dev
*hdev
= conn
->hdev
;
2785 struct pending_cmd
*cmd
;
2787 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2788 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2791 if (cmd
->user_data
!= conn
)
2800 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
2802 struct mgmt_rp_pair_device rp
;
2803 struct hci_conn
*conn
= cmd
->user_data
;
2805 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2806 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2808 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
2811 /* So we don't get further callbacks for this connection */
2812 conn
->connect_cfm_cb
= NULL
;
2813 conn
->security_cfm_cb
= NULL
;
2814 conn
->disconn_cfm_cb
= NULL
;
2816 hci_conn_drop(conn
);
2818 mgmt_pending_remove(cmd
);
2821 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2823 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2824 struct pending_cmd
*cmd
;
2826 cmd
= find_pairing(conn
);
2828 pairing_complete(cmd
, status
);
2831 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2833 struct pending_cmd
*cmd
;
2835 BT_DBG("status %u", status
);
2837 cmd
= find_pairing(conn
);
2839 BT_DBG("Unable to find a pending command");
2841 pairing_complete(cmd
, mgmt_status(status
));
2844 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2846 struct pending_cmd
*cmd
;
2848 BT_DBG("status %u", status
);
2853 cmd
= find_pairing(conn
);
2855 BT_DBG("Unable to find a pending command");
2857 pairing_complete(cmd
, mgmt_status(status
));
2860 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2863 struct mgmt_cp_pair_device
*cp
= data
;
2864 struct mgmt_rp_pair_device rp
;
2865 struct pending_cmd
*cmd
;
2866 u8 sec_level
, auth_type
;
2867 struct hci_conn
*conn
;
2872 memset(&rp
, 0, sizeof(rp
));
2873 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2874 rp
.addr
.type
= cp
->addr
.type
;
2876 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2877 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2878 MGMT_STATUS_INVALID_PARAMS
,
2883 if (!hdev_is_powered(hdev
)) {
2884 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2885 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2889 sec_level
= BT_SECURITY_MEDIUM
;
2890 auth_type
= HCI_AT_DEDICATED_BONDING
;
2892 if (cp
->addr
.type
== BDADDR_BREDR
) {
2893 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2898 /* Convert from L2CAP channel address type to HCI address type
2900 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2901 addr_type
= ADDR_LE_DEV_PUBLIC
;
2903 addr_type
= ADDR_LE_DEV_RANDOM
;
2905 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
2906 sec_level
, auth_type
);
2912 if (PTR_ERR(conn
) == -EBUSY
)
2913 status
= MGMT_STATUS_BUSY
;
2915 status
= MGMT_STATUS_CONNECT_FAILED
;
2917 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2923 if (conn
->connect_cfm_cb
) {
2924 hci_conn_drop(conn
);
2925 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2926 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2930 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2933 hci_conn_drop(conn
);
2937 /* For LE, just connecting isn't a proof that the pairing finished */
2938 if (cp
->addr
.type
== BDADDR_BREDR
) {
2939 conn
->connect_cfm_cb
= pairing_complete_cb
;
2940 conn
->security_cfm_cb
= pairing_complete_cb
;
2941 conn
->disconn_cfm_cb
= pairing_complete_cb
;
2943 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
2944 conn
->security_cfm_cb
= le_pairing_complete_cb
;
2945 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
2948 conn
->io_capability
= cp
->io_cap
;
2949 cmd
->user_data
= conn
;
2951 if (conn
->state
== BT_CONNECTED
&&
2952 hci_conn_security(conn
, sec_level
, auth_type
))
2953 pairing_complete(cmd
, 0);
2958 hci_dev_unlock(hdev
);
2962 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2965 struct mgmt_addr_info
*addr
= data
;
2966 struct pending_cmd
*cmd
;
2967 struct hci_conn
*conn
;
2974 if (!hdev_is_powered(hdev
)) {
2975 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2976 MGMT_STATUS_NOT_POWERED
);
2980 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
2982 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2983 MGMT_STATUS_INVALID_PARAMS
);
2987 conn
= cmd
->user_data
;
2989 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
2990 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
2991 MGMT_STATUS_INVALID_PARAMS
);
2995 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
2997 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
2998 addr
, sizeof(*addr
));
3000 hci_dev_unlock(hdev
);
3004 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3005 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3006 u16 hci_op
, __le32 passkey
)
3008 struct pending_cmd
*cmd
;
3009 struct hci_conn
*conn
;
3014 if (!hdev_is_powered(hdev
)) {
3015 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3016 MGMT_STATUS_NOT_POWERED
, addr
,
3021 if (addr
->type
== BDADDR_BREDR
)
3022 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3024 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3027 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3028 MGMT_STATUS_NOT_CONNECTED
, addr
,
3033 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3038 hci_dev_unlock(hdev
);
3039 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3043 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3044 MGMT_STATUS_SUCCESS
, addr
,
3047 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3048 MGMT_STATUS_FAILED
, addr
,
3054 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3060 /* Continue with pairing via HCI */
3061 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3062 struct hci_cp_user_passkey_reply cp
;
3064 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3065 cp
.passkey
= passkey
;
3066 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3068 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3072 mgmt_pending_remove(cmd
);
3075 hci_dev_unlock(hdev
);
3079 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3080 void *data
, u16 len
)
3082 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3086 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3087 MGMT_OP_PIN_CODE_NEG_REPLY
,
3088 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3091 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3094 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3098 if (len
!= sizeof(*cp
))
3099 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3100 MGMT_STATUS_INVALID_PARAMS
);
3102 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3103 MGMT_OP_USER_CONFIRM_REPLY
,
3104 HCI_OP_USER_CONFIRM_REPLY
, 0);
3107 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3108 void *data
, u16 len
)
3110 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3114 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3115 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3116 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3119 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3122 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3126 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3127 MGMT_OP_USER_PASSKEY_REPLY
,
3128 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3131 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3132 void *data
, u16 len
)
3134 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3138 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3139 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3140 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3143 static void update_name(struct hci_request
*req
)
3145 struct hci_dev
*hdev
= req
->hdev
;
3146 struct hci_cp_write_local_name cp
;
3148 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3150 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3153 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3155 struct mgmt_cp_set_local_name
*cp
;
3156 struct pending_cmd
*cmd
;
3158 BT_DBG("status 0x%02x", status
);
3162 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3169 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3170 mgmt_status(status
));
3172 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3175 mgmt_pending_remove(cmd
);
3178 hci_dev_unlock(hdev
);
3181 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3184 struct mgmt_cp_set_local_name
*cp
= data
;
3185 struct pending_cmd
*cmd
;
3186 struct hci_request req
;
3193 /* If the old values are the same as the new ones just return a
3194 * direct command complete event.
3196 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3197 !memcmp(hdev
->short_name
, cp
->short_name
,
3198 sizeof(hdev
->short_name
))) {
3199 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3204 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3206 if (!hdev_is_powered(hdev
)) {
3207 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3209 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3214 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3220 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3226 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3228 hci_req_init(&req
, hdev
);
3230 if (lmp_bredr_capable(hdev
)) {
3235 /* The name is stored in the scan response data and so
3236 * no need to udpate the advertising data here.
3238 if (lmp_le_capable(hdev
))
3239 update_scan_rsp_data(&req
);
3241 err
= hci_req_run(&req
, set_name_complete
);
3243 mgmt_pending_remove(cmd
);
3246 hci_dev_unlock(hdev
);
3250 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3251 void *data
, u16 data_len
)
3253 struct pending_cmd
*cmd
;
3256 BT_DBG("%s", hdev
->name
);
3260 if (!hdev_is_powered(hdev
)) {
3261 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3262 MGMT_STATUS_NOT_POWERED
);
3266 if (!lmp_ssp_capable(hdev
)) {
3267 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3268 MGMT_STATUS_NOT_SUPPORTED
);
3272 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3273 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3278 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3284 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3285 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3288 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3291 mgmt_pending_remove(cmd
);
3294 hci_dev_unlock(hdev
);
3298 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3299 void *data
, u16 len
)
3303 BT_DBG("%s ", hdev
->name
);
3307 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3308 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3311 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3312 cp
->hash
, cp
->randomizer
);
3314 status
= MGMT_STATUS_FAILED
;
3316 status
= MGMT_STATUS_SUCCESS
;
3318 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3319 status
, &cp
->addr
, sizeof(cp
->addr
));
3320 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3321 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3324 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3330 status
= MGMT_STATUS_FAILED
;
3332 status
= MGMT_STATUS_SUCCESS
;
3334 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3335 status
, &cp
->addr
, sizeof(cp
->addr
));
3337 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3338 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3339 MGMT_STATUS_INVALID_PARAMS
);
3342 hci_dev_unlock(hdev
);
3346 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3347 void *data
, u16 len
)
3349 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3353 BT_DBG("%s", hdev
->name
);
3357 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3359 status
= MGMT_STATUS_INVALID_PARAMS
;
3361 status
= MGMT_STATUS_SUCCESS
;
3363 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3364 status
, &cp
->addr
, sizeof(cp
->addr
));
3366 hci_dev_unlock(hdev
);
3370 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3372 struct pending_cmd
*cmd
;
3376 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3378 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3382 type
= hdev
->discovery
.type
;
3384 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3385 &type
, sizeof(type
));
3386 mgmt_pending_remove(cmd
);
3391 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3393 unsigned long timeout
= 0;
3395 BT_DBG("status %d", status
);
3399 mgmt_start_discovery_failed(hdev
, status
);
3400 hci_dev_unlock(hdev
);
3405 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3406 hci_dev_unlock(hdev
);
3408 switch (hdev
->discovery
.type
) {
3409 case DISCOV_TYPE_LE
:
3410 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3413 case DISCOV_TYPE_INTERLEAVED
:
3414 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3417 case DISCOV_TYPE_BREDR
:
3421 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3427 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
, timeout
);
3430 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3431 void *data
, u16 len
)
3433 struct mgmt_cp_start_discovery
*cp
= data
;
3434 struct pending_cmd
*cmd
;
3435 struct hci_cp_le_set_scan_param param_cp
;
3436 struct hci_cp_le_set_scan_enable enable_cp
;
3437 struct hci_cp_inquiry inq_cp
;
3438 struct hci_request req
;
3439 /* General inquiry access code (GIAC) */
3440 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3441 u8 status
, own_addr_type
;
3444 BT_DBG("%s", hdev
->name
);
3448 if (!hdev_is_powered(hdev
)) {
3449 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3450 MGMT_STATUS_NOT_POWERED
);
3454 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3455 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3460 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3461 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3466 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3472 hdev
->discovery
.type
= cp
->type
;
3474 hci_req_init(&req
, hdev
);
3476 switch (hdev
->discovery
.type
) {
3477 case DISCOV_TYPE_BREDR
:
3478 status
= mgmt_bredr_support(hdev
);
3480 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3482 mgmt_pending_remove(cmd
);
3486 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3487 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3489 mgmt_pending_remove(cmd
);
3493 hci_inquiry_cache_flush(hdev
);
3495 memset(&inq_cp
, 0, sizeof(inq_cp
));
3496 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3497 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3498 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3501 case DISCOV_TYPE_LE
:
3502 case DISCOV_TYPE_INTERLEAVED
:
3503 status
= mgmt_le_support(hdev
);
3505 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3507 mgmt_pending_remove(cmd
);
3511 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3512 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3513 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3514 MGMT_STATUS_NOT_SUPPORTED
);
3515 mgmt_pending_remove(cmd
);
3519 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3520 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3521 MGMT_STATUS_REJECTED
);
3522 mgmt_pending_remove(cmd
);
3526 /* If controller is scanning, it means the background scanning
3527 * is running. Thus, we should temporarily stop it in order to
3528 * set the discovery scanning parameters.
3530 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3531 hci_req_add_le_scan_disable(&req
);
3533 memset(¶m_cp
, 0, sizeof(param_cp
));
3535 /* All active scans will be done with either a resolvable
3536 * private address (when privacy feature has been enabled)
3537 * or unresolvable private address.
3539 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3541 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3542 MGMT_STATUS_FAILED
);
3543 mgmt_pending_remove(cmd
);
3547 param_cp
.type
= LE_SCAN_ACTIVE
;
3548 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3549 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3550 param_cp
.own_address_type
= own_addr_type
;
3551 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3554 memset(&enable_cp
, 0, sizeof(enable_cp
));
3555 enable_cp
.enable
= LE_SCAN_ENABLE
;
3556 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3557 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3562 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3563 MGMT_STATUS_INVALID_PARAMS
);
3564 mgmt_pending_remove(cmd
);
3568 err
= hci_req_run(&req
, start_discovery_complete
);
3570 mgmt_pending_remove(cmd
);
3572 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3575 hci_dev_unlock(hdev
);
3579 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3581 struct pending_cmd
*cmd
;
3584 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3588 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3589 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3590 mgmt_pending_remove(cmd
);
3595 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3597 BT_DBG("status %d", status
);
3602 mgmt_stop_discovery_failed(hdev
, status
);
3606 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3609 hci_dev_unlock(hdev
);
3612 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3615 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3616 struct pending_cmd
*cmd
;
3617 struct hci_request req
;
3620 BT_DBG("%s", hdev
->name
);
3624 if (!hci_discovery_active(hdev
)) {
3625 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3626 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3627 sizeof(mgmt_cp
->type
));
3631 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3632 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3633 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3634 sizeof(mgmt_cp
->type
));
3638 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3644 hci_req_init(&req
, hdev
);
3646 hci_stop_discovery(&req
);
3648 err
= hci_req_run(&req
, stop_discovery_complete
);
3650 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3654 mgmt_pending_remove(cmd
);
3656 /* If no HCI commands were sent we're done */
3657 if (err
== -ENODATA
) {
3658 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
3659 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3660 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3664 hci_dev_unlock(hdev
);
3668 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3671 struct mgmt_cp_confirm_name
*cp
= data
;
3672 struct inquiry_entry
*e
;
3675 BT_DBG("%s", hdev
->name
);
3679 if (!hci_discovery_active(hdev
)) {
3680 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3681 MGMT_STATUS_FAILED
, &cp
->addr
,
3686 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3688 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3689 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3694 if (cp
->name_known
) {
3695 e
->name_state
= NAME_KNOWN
;
3698 e
->name_state
= NAME_NEEDED
;
3699 hci_inquiry_cache_update_resolve(hdev
, e
);
3702 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3706 hci_dev_unlock(hdev
);
3710 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3713 struct mgmt_cp_block_device
*cp
= data
;
3717 BT_DBG("%s", hdev
->name
);
3719 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3720 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3721 MGMT_STATUS_INVALID_PARAMS
,
3722 &cp
->addr
, sizeof(cp
->addr
));
3726 err
= hci_blacklist_add(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3728 status
= MGMT_STATUS_FAILED
;
3730 status
= MGMT_STATUS_SUCCESS
;
3732 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3733 &cp
->addr
, sizeof(cp
->addr
));
3735 hci_dev_unlock(hdev
);
3740 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3743 struct mgmt_cp_unblock_device
*cp
= data
;
3747 BT_DBG("%s", hdev
->name
);
3749 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3750 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3751 MGMT_STATUS_INVALID_PARAMS
,
3752 &cp
->addr
, sizeof(cp
->addr
));
3756 err
= hci_blacklist_del(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3758 status
= MGMT_STATUS_INVALID_PARAMS
;
3760 status
= MGMT_STATUS_SUCCESS
;
3762 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3763 &cp
->addr
, sizeof(cp
->addr
));
3765 hci_dev_unlock(hdev
);
3770 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3773 struct mgmt_cp_set_device_id
*cp
= data
;
3774 struct hci_request req
;
3778 BT_DBG("%s", hdev
->name
);
3780 source
= __le16_to_cpu(cp
->source
);
3782 if (source
> 0x0002)
3783 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3784 MGMT_STATUS_INVALID_PARAMS
);
3788 hdev
->devid_source
= source
;
3789 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3790 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3791 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3793 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
3795 hci_req_init(&req
, hdev
);
3797 hci_req_run(&req
, NULL
);
3799 hci_dev_unlock(hdev
);
3804 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
3806 struct cmd_lookup match
= { NULL
, hdev
};
3809 u8 mgmt_err
= mgmt_status(status
);
3811 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
3812 cmd_status_rsp
, &mgmt_err
);
3816 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
3819 new_settings(hdev
, match
.sk
);
3825 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3828 struct mgmt_mode
*cp
= data
;
3829 struct pending_cmd
*cmd
;
3830 struct hci_request req
;
3831 u8 val
, enabled
, status
;
3834 BT_DBG("request for %s", hdev
->name
);
3836 status
= mgmt_le_support(hdev
);
3838 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3841 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3842 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3843 MGMT_STATUS_INVALID_PARAMS
);
3848 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3850 /* The following conditions are ones which mean that we should
3851 * not do any HCI communication but directly send a mgmt
3852 * response to user space (after toggling the flag if
3855 if (!hdev_is_powered(hdev
) || val
== enabled
||
3856 hci_conn_num(hdev
, LE_LINK
) > 0) {
3857 bool changed
= false;
3859 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3860 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
3864 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
3869 err
= new_settings(hdev
, sk
);
3874 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
3875 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
3876 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
3881 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
3887 hci_req_init(&req
, hdev
);
3890 enable_advertising(&req
);
3892 disable_advertising(&req
);
3894 err
= hci_req_run(&req
, set_advertising_complete
);
3896 mgmt_pending_remove(cmd
);
3899 hci_dev_unlock(hdev
);
3903 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
3904 void *data
, u16 len
)
3906 struct mgmt_cp_set_static_address
*cp
= data
;
3909 BT_DBG("%s", hdev
->name
);
3911 if (!lmp_le_capable(hdev
))
3912 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3913 MGMT_STATUS_NOT_SUPPORTED
);
3915 if (hdev_is_powered(hdev
))
3916 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
3917 MGMT_STATUS_REJECTED
);
3919 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
3920 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
3921 return cmd_status(sk
, hdev
->id
,
3922 MGMT_OP_SET_STATIC_ADDRESS
,
3923 MGMT_STATUS_INVALID_PARAMS
);
3925 /* Two most significant bits shall be set */
3926 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
3927 return cmd_status(sk
, hdev
->id
,
3928 MGMT_OP_SET_STATIC_ADDRESS
,
3929 MGMT_STATUS_INVALID_PARAMS
);
3934 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
3936 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
3938 hci_dev_unlock(hdev
);
3943 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
3944 void *data
, u16 len
)
3946 struct mgmt_cp_set_scan_params
*cp
= data
;
3947 __u16 interval
, window
;
3950 BT_DBG("%s", hdev
->name
);
3952 if (!lmp_le_capable(hdev
))
3953 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3954 MGMT_STATUS_NOT_SUPPORTED
);
3956 interval
= __le16_to_cpu(cp
->interval
);
3958 if (interval
< 0x0004 || interval
> 0x4000)
3959 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3960 MGMT_STATUS_INVALID_PARAMS
);
3962 window
= __le16_to_cpu(cp
->window
);
3964 if (window
< 0x0004 || window
> 0x4000)
3965 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3966 MGMT_STATUS_INVALID_PARAMS
);
3968 if (window
> interval
)
3969 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
3970 MGMT_STATUS_INVALID_PARAMS
);
3974 hdev
->le_scan_interval
= interval
;
3975 hdev
->le_scan_window
= window
;
3977 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
3979 /* If background scan is running, restart it so new parameters are
3982 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
3983 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
3984 struct hci_request req
;
3986 hci_req_init(&req
, hdev
);
3988 hci_req_add_le_scan_disable(&req
);
3989 hci_req_add_le_passive_scan(&req
);
3991 hci_req_run(&req
, NULL
);
3994 hci_dev_unlock(hdev
);
3999 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4001 struct pending_cmd
*cmd
;
4003 BT_DBG("status 0x%02x", status
);
4007 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4012 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4013 mgmt_status(status
));
4015 struct mgmt_mode
*cp
= cmd
->param
;
4018 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4020 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4022 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4023 new_settings(hdev
, cmd
->sk
);
4026 mgmt_pending_remove(cmd
);
4029 hci_dev_unlock(hdev
);
4032 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4033 void *data
, u16 len
)
4035 struct mgmt_mode
*cp
= data
;
4036 struct pending_cmd
*cmd
;
4037 struct hci_request req
;
4040 BT_DBG("%s", hdev
->name
);
4042 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4043 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4044 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4045 MGMT_STATUS_NOT_SUPPORTED
);
4047 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4048 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4049 MGMT_STATUS_INVALID_PARAMS
);
4051 if (!hdev_is_powered(hdev
))
4052 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4053 MGMT_STATUS_NOT_POWERED
);
4055 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4056 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4057 MGMT_STATUS_REJECTED
);
4061 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4062 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4067 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4068 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4073 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4080 hci_req_init(&req
, hdev
);
4082 write_fast_connectable(&req
, cp
->val
);
4084 err
= hci_req_run(&req
, fast_connectable_complete
);
4086 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4087 MGMT_STATUS_FAILED
);
4088 mgmt_pending_remove(cmd
);
4092 hci_dev_unlock(hdev
);
4097 static void set_bredr_scan(struct hci_request
*req
)
4099 struct hci_dev
*hdev
= req
->hdev
;
4102 /* Ensure that fast connectable is disabled. This function will
4103 * not do anything if the page scan parameters are already what
4106 write_fast_connectable(req
, false);
4108 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4110 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
4111 scan
|= SCAN_INQUIRY
;
4114 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
4117 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4119 struct pending_cmd
*cmd
;
4121 BT_DBG("status 0x%02x", status
);
4125 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4130 u8 mgmt_err
= mgmt_status(status
);
4132 /* We need to restore the flag if related HCI commands
4135 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4137 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4139 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4140 new_settings(hdev
, cmd
->sk
);
4143 mgmt_pending_remove(cmd
);
4146 hci_dev_unlock(hdev
);
4149 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4151 struct mgmt_mode
*cp
= data
;
4152 struct pending_cmd
*cmd
;
4153 struct hci_request req
;
4156 BT_DBG("request for %s", hdev
->name
);
4158 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4159 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4160 MGMT_STATUS_NOT_SUPPORTED
);
4162 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4163 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4164 MGMT_STATUS_REJECTED
);
4166 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4167 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4168 MGMT_STATUS_INVALID_PARAMS
);
4172 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4173 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4177 if (!hdev_is_powered(hdev
)) {
4179 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4180 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4181 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4182 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4183 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4186 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4188 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4192 err
= new_settings(hdev
, sk
);
4196 /* Reject disabling when powered on */
4198 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4199 MGMT_STATUS_REJECTED
);
4203 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4204 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4209 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4215 /* We need to flip the bit already here so that update_adv_data
4216 * generates the correct flags.
4218 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4220 hci_req_init(&req
, hdev
);
4222 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4223 set_bredr_scan(&req
);
4225 /* Since only the advertising data flags will change, there
4226 * is no need to update the scan response data.
4228 update_adv_data(&req
);
4230 err
= hci_req_run(&req
, set_bredr_complete
);
4232 mgmt_pending_remove(cmd
);
4235 hci_dev_unlock(hdev
);
4239 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4240 void *data
, u16 len
)
4242 struct mgmt_mode
*cp
= data
;
4243 struct pending_cmd
*cmd
;
4247 BT_DBG("request for %s", hdev
->name
);
4249 status
= mgmt_bredr_support(hdev
);
4251 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4254 if (!lmp_sc_capable(hdev
) &&
4255 !test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
4256 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4257 MGMT_STATUS_NOT_SUPPORTED
);
4259 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4260 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4261 MGMT_STATUS_INVALID_PARAMS
);
4265 if (!hdev_is_powered(hdev
)) {
4269 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4271 if (cp
->val
== 0x02)
4272 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4274 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4276 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4278 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4281 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4286 err
= new_settings(hdev
, sk
);
4291 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4292 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4299 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4300 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4301 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4305 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4311 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4313 mgmt_pending_remove(cmd
);
4317 if (cp
->val
== 0x02)
4318 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4320 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4323 hci_dev_unlock(hdev
);
4327 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4328 void *data
, u16 len
)
4330 struct mgmt_mode
*cp
= data
;
4334 BT_DBG("request for %s", hdev
->name
);
4336 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4337 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4338 MGMT_STATUS_INVALID_PARAMS
);
4343 changed
= !test_and_set_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
4345 changed
= test_and_clear_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
);
4347 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4352 err
= new_settings(hdev
, sk
);
4355 hci_dev_unlock(hdev
);
4359 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4362 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4366 BT_DBG("request for %s", hdev
->name
);
4368 if (!lmp_le_capable(hdev
))
4369 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4370 MGMT_STATUS_NOT_SUPPORTED
);
4372 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4373 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4374 MGMT_STATUS_INVALID_PARAMS
);
4376 if (hdev_is_powered(hdev
))
4377 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4378 MGMT_STATUS_REJECTED
);
4382 /* If user space supports this command it is also expected to
4383 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4385 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4388 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4389 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4390 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4392 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4393 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4394 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4397 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4402 err
= new_settings(hdev
, sk
);
4405 hci_dev_unlock(hdev
);
4409 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4411 switch (irk
->addr
.type
) {
4412 case BDADDR_LE_PUBLIC
:
4415 case BDADDR_LE_RANDOM
:
4416 /* Two most significant bits shall be set */
4417 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4425 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4428 struct mgmt_cp_load_irks
*cp
= cp_data
;
4429 u16 irk_count
, expected_len
;
4432 BT_DBG("request for %s", hdev
->name
);
4434 if (!lmp_le_capable(hdev
))
4435 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4436 MGMT_STATUS_NOT_SUPPORTED
);
4438 irk_count
= __le16_to_cpu(cp
->irk_count
);
4440 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4441 if (expected_len
!= len
) {
4442 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4444 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4445 MGMT_STATUS_INVALID_PARAMS
);
4448 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4450 for (i
= 0; i
< irk_count
; i
++) {
4451 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4453 if (!irk_is_valid(key
))
4454 return cmd_status(sk
, hdev
->id
,
4456 MGMT_STATUS_INVALID_PARAMS
);
4461 hci_smp_irks_clear(hdev
);
4463 for (i
= 0; i
< irk_count
; i
++) {
4464 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4467 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4468 addr_type
= ADDR_LE_DEV_PUBLIC
;
4470 addr_type
= ADDR_LE_DEV_RANDOM
;
4472 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4476 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4478 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4480 hci_dev_unlock(hdev
);
4485 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4487 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4490 switch (key
->addr
.type
) {
4491 case BDADDR_LE_PUBLIC
:
4494 case BDADDR_LE_RANDOM
:
4495 /* Two most significant bits shall be set */
4496 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4504 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4505 void *cp_data
, u16 len
)
4507 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4508 u16 key_count
, expected_len
;
4511 BT_DBG("request for %s", hdev
->name
);
4513 if (!lmp_le_capable(hdev
))
4514 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4515 MGMT_STATUS_NOT_SUPPORTED
);
4517 key_count
= __le16_to_cpu(cp
->key_count
);
4519 expected_len
= sizeof(*cp
) + key_count
*
4520 sizeof(struct mgmt_ltk_info
);
4521 if (expected_len
!= len
) {
4522 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4524 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4525 MGMT_STATUS_INVALID_PARAMS
);
4528 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4530 for (i
= 0; i
< key_count
; i
++) {
4531 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4533 if (!ltk_is_valid(key
))
4534 return cmd_status(sk
, hdev
->id
,
4535 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4536 MGMT_STATUS_INVALID_PARAMS
);
4541 hci_smp_ltks_clear(hdev
);
4543 for (i
= 0; i
< key_count
; i
++) {
4544 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4545 u8 type
, addr_type
, authenticated
;
4547 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4548 addr_type
= ADDR_LE_DEV_PUBLIC
;
4550 addr_type
= ADDR_LE_DEV_RANDOM
;
4555 type
= HCI_SMP_LTK_SLAVE
;
4557 switch (key
->type
) {
4558 case MGMT_LTK_UNAUTHENTICATED
:
4559 authenticated
= 0x00;
4561 case MGMT_LTK_AUTHENTICATED
:
4562 authenticated
= 0x01;
4568 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4569 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
4573 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4576 hci_dev_unlock(hdev
);
4581 struct cmd_conn_lookup
{
4582 struct hci_conn
*conn
;
4583 bool valid_tx_power
;
4587 static void get_conn_info_complete(struct pending_cmd
*cmd
, void *data
)
4589 struct cmd_conn_lookup
*match
= data
;
4590 struct mgmt_cp_get_conn_info
*cp
;
4591 struct mgmt_rp_get_conn_info rp
;
4592 struct hci_conn
*conn
= cmd
->user_data
;
4594 if (conn
!= match
->conn
)
4597 cp
= (struct mgmt_cp_get_conn_info
*) cmd
->param
;
4599 memset(&rp
, 0, sizeof(rp
));
4600 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4601 rp
.addr
.type
= cp
->addr
.type
;
4603 if (!match
->mgmt_status
) {
4604 rp
.rssi
= conn
->rssi
;
4606 if (match
->valid_tx_power
) {
4607 rp
.tx_power
= conn
->tx_power
;
4608 rp
.max_tx_power
= conn
->max_tx_power
;
4610 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4611 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4615 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4616 match
->mgmt_status
, &rp
, sizeof(rp
));
4618 hci_conn_drop(conn
);
4620 mgmt_pending_remove(cmd
);
4623 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 status
)
4625 struct hci_cp_read_rssi
*cp
;
4626 struct hci_conn
*conn
;
4627 struct cmd_conn_lookup match
;
4630 BT_DBG("status 0x%02x", status
);
4634 /* TX power data is valid in case request completed successfully,
4635 * otherwise we assume it's not valid. At the moment we assume that
4636 * either both or none of current and max values are valid to keep code
4639 match
.valid_tx_power
= !status
;
4641 /* Commands sent in request are either Read RSSI or Read Transmit Power
4642 * Level so we check which one was last sent to retrieve connection
4643 * handle. Both commands have handle as first parameter so it's safe to
4644 * cast data on the same command struct.
4646 * First command sent is always Read RSSI and we fail only if it fails.
4647 * In other case we simply override error to indicate success as we
4648 * already remembered if TX power value is actually valid.
4650 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4652 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4657 BT_ERR("invalid sent_cmd in response");
4661 handle
= __le16_to_cpu(cp
->handle
);
4662 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4664 BT_ERR("unknown handle (%d) in response", handle
);
4669 match
.mgmt_status
= mgmt_status(status
);
4671 /* Cache refresh is complete, now reply for mgmt request for given
4674 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO
, hdev
,
4675 get_conn_info_complete
, &match
);
4678 hci_dev_unlock(hdev
);
4681 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4684 struct mgmt_cp_get_conn_info
*cp
= data
;
4685 struct mgmt_rp_get_conn_info rp
;
4686 struct hci_conn
*conn
;
4687 unsigned long conn_info_age
;
4690 BT_DBG("%s", hdev
->name
);
4692 memset(&rp
, 0, sizeof(rp
));
4693 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4694 rp
.addr
.type
= cp
->addr
.type
;
4696 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4697 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4698 MGMT_STATUS_INVALID_PARAMS
,
4703 if (!hdev_is_powered(hdev
)) {
4704 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4705 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
4709 if (cp
->addr
.type
== BDADDR_BREDR
)
4710 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4713 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
4715 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4716 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4717 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
4721 /* To avoid client trying to guess when to poll again for information we
4722 * calculate conn info age as random value between min/max set in hdev.
4724 conn_info_age
= hdev
->conn_info_min_age
+
4725 prandom_u32_max(hdev
->conn_info_max_age
-
4726 hdev
->conn_info_min_age
);
4728 /* Query controller to refresh cached values if they are too old or were
4731 if (time_after(jiffies
, conn
->conn_info_timestamp
+
4732 msecs_to_jiffies(conn_info_age
)) ||
4733 !conn
->conn_info_timestamp
) {
4734 struct hci_request req
;
4735 struct hci_cp_read_tx_power req_txp_cp
;
4736 struct hci_cp_read_rssi req_rssi_cp
;
4737 struct pending_cmd
*cmd
;
4739 hci_req_init(&req
, hdev
);
4740 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
4741 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
4744 /* For LE links TX power does not change thus we don't need to
4745 * query for it once value is known.
4747 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
4748 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
4749 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4750 req_txp_cp
.type
= 0x00;
4751 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4752 sizeof(req_txp_cp
), &req_txp_cp
);
4755 /* Max TX power needs to be read only once per connection */
4756 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
4757 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4758 req_txp_cp
.type
= 0x01;
4759 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4760 sizeof(req_txp_cp
), &req_txp_cp
);
4763 err
= hci_req_run(&req
, conn_info_refresh_complete
);
4767 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
4774 hci_conn_hold(conn
);
4775 cmd
->user_data
= conn
;
4777 conn
->conn_info_timestamp
= jiffies
;
4779 /* Cache is valid, just reply with values cached in hci_conn */
4780 rp
.rssi
= conn
->rssi
;
4781 rp
.tx_power
= conn
->tx_power
;
4782 rp
.max_tx_power
= conn
->max_tx_power
;
4784 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4785 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
4789 hci_dev_unlock(hdev
);
4793 static const struct mgmt_handler
{
4794 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4798 } mgmt_handlers
[] = {
4799 { NULL
}, /* 0x0000 (no command) */
4800 { read_version
, false, MGMT_READ_VERSION_SIZE
},
4801 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
4802 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
4803 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
4804 { set_powered
, false, MGMT_SETTING_SIZE
},
4805 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
4806 { set_connectable
, false, MGMT_SETTING_SIZE
},
4807 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
4808 { set_pairable
, false, MGMT_SETTING_SIZE
},
4809 { set_link_security
, false, MGMT_SETTING_SIZE
},
4810 { set_ssp
, false, MGMT_SETTING_SIZE
},
4811 { set_hs
, false, MGMT_SETTING_SIZE
},
4812 { set_le
, false, MGMT_SETTING_SIZE
},
4813 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
4814 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
4815 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
4816 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
4817 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
4818 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
4819 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
4820 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
4821 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
4822 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
4823 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
4824 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
4825 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
4826 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
4827 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
4828 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
4829 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
4830 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
4831 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
4832 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
4833 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
4834 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
4835 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
4836 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
4837 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
4838 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
4839 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
4840 { set_advertising
, false, MGMT_SETTING_SIZE
},
4841 { set_bredr
, false, MGMT_SETTING_SIZE
},
4842 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
4843 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
4844 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
4845 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
4846 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
4847 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
4848 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
4852 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
4856 struct mgmt_hdr
*hdr
;
4857 u16 opcode
, index
, len
;
4858 struct hci_dev
*hdev
= NULL
;
4859 const struct mgmt_handler
*handler
;
4862 BT_DBG("got %zu bytes", msglen
);
4864 if (msglen
< sizeof(*hdr
))
4867 buf
= kmalloc(msglen
, GFP_KERNEL
);
4871 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
4877 opcode
= __le16_to_cpu(hdr
->opcode
);
4878 index
= __le16_to_cpu(hdr
->index
);
4879 len
= __le16_to_cpu(hdr
->len
);
4881 if (len
!= msglen
- sizeof(*hdr
)) {
4886 if (index
!= MGMT_INDEX_NONE
) {
4887 hdev
= hci_dev_get(index
);
4889 err
= cmd_status(sk
, index
, opcode
,
4890 MGMT_STATUS_INVALID_INDEX
);
4894 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
4895 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4896 err
= cmd_status(sk
, index
, opcode
,
4897 MGMT_STATUS_INVALID_INDEX
);
4902 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
4903 mgmt_handlers
[opcode
].func
== NULL
) {
4904 BT_DBG("Unknown op %u", opcode
);
4905 err
= cmd_status(sk
, index
, opcode
,
4906 MGMT_STATUS_UNKNOWN_COMMAND
);
4910 if ((hdev
&& opcode
< MGMT_OP_READ_INFO
) ||
4911 (!hdev
&& opcode
>= MGMT_OP_READ_INFO
)) {
4912 err
= cmd_status(sk
, index
, opcode
,
4913 MGMT_STATUS_INVALID_INDEX
);
4917 handler
= &mgmt_handlers
[opcode
];
4919 if ((handler
->var_len
&& len
< handler
->data_len
) ||
4920 (!handler
->var_len
&& len
!= handler
->data_len
)) {
4921 err
= cmd_status(sk
, index
, opcode
,
4922 MGMT_STATUS_INVALID_PARAMS
);
4927 mgmt_init_hdev(sk
, hdev
);
4929 cp
= buf
+ sizeof(*hdr
);
4931 err
= handler
->func(sk
, hdev
, cp
, len
);
4945 void mgmt_index_added(struct hci_dev
*hdev
)
4947 if (hdev
->dev_type
!= HCI_BREDR
)
4950 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
4953 void mgmt_index_removed(struct hci_dev
*hdev
)
4955 u8 status
= MGMT_STATUS_INVALID_INDEX
;
4957 if (hdev
->dev_type
!= HCI_BREDR
)
4960 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
4962 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
4965 /* This function requires the caller holds hdev->lock */
4966 static void restart_le_auto_conns(struct hci_dev
*hdev
)
4968 struct hci_conn_params
*p
;
4970 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
4971 if (p
->auto_connect
== HCI_AUTO_CONN_ALWAYS
)
4972 hci_pend_le_conn_add(hdev
, &p
->addr
, p
->addr_type
);
4976 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
4978 struct cmd_lookup match
= { NULL
, hdev
};
4980 BT_DBG("status 0x%02x", status
);
4984 restart_le_auto_conns(hdev
);
4986 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
4988 new_settings(hdev
, match
.sk
);
4990 hci_dev_unlock(hdev
);
4996 static int powered_update_hci(struct hci_dev
*hdev
)
4998 struct hci_request req
;
5001 hci_req_init(&req
, hdev
);
5003 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
5004 !lmp_host_ssp_capable(hdev
)) {
5007 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
5010 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
5011 lmp_bredr_capable(hdev
)) {
5012 struct hci_cp_write_le_host_supported cp
;
5015 cp
.simul
= lmp_le_br_capable(hdev
);
5017 /* Check first if we already have the right
5018 * host state (host features set)
5020 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
5021 cp
.simul
!= lmp_host_le_br_capable(hdev
))
5022 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
5026 if (lmp_le_capable(hdev
)) {
5027 /* Make sure the controller has a good default for
5028 * advertising data. This also applies to the case
5029 * where BR/EDR was toggled during the AUTO_OFF phase.
5031 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
5032 update_adv_data(&req
);
5033 update_scan_rsp_data(&req
);
5036 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5037 enable_advertising(&req
);
5040 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
5041 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
5042 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
5043 sizeof(link_sec
), &link_sec
);
5045 if (lmp_bredr_capable(hdev
)) {
5046 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5047 set_bredr_scan(&req
);
5053 return hci_req_run(&req
, powered_complete
);
5056 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
5058 struct cmd_lookup match
= { NULL
, hdev
};
5059 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
5060 u8 zero_cod
[] = { 0, 0, 0 };
5063 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
5067 if (powered_update_hci(hdev
) == 0)
5070 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
5075 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5076 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
5078 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
5079 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
5080 zero_cod
, sizeof(zero_cod
), NULL
);
5083 err
= new_settings(hdev
, match
.sk
);
5091 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
5093 struct pending_cmd
*cmd
;
5096 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5100 if (err
== -ERFKILL
)
5101 status
= MGMT_STATUS_RFKILLED
;
5103 status
= MGMT_STATUS_FAILED
;
5105 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
5107 mgmt_pending_remove(cmd
);
5110 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
5112 struct hci_request req
;
5116 /* When discoverable timeout triggers, then just make sure
5117 * the limited discoverable flag is cleared. Even in the case
5118 * of a timeout triggered from general discoverable, it is
5119 * safe to unconditionally clear the flag.
5121 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5122 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5124 hci_req_init(&req
, hdev
);
5125 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
5126 u8 scan
= SCAN_PAGE
;
5127 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
5128 sizeof(scan
), &scan
);
5131 update_adv_data(&req
);
5132 hci_req_run(&req
, NULL
);
5134 hdev
->discov_timeout
= 0;
5136 new_settings(hdev
, NULL
);
5138 hci_dev_unlock(hdev
);
5141 void mgmt_discoverable(struct hci_dev
*hdev
, u8 discoverable
)
5145 /* Nothing needed here if there's a pending command since that
5146 * commands request completion callback takes care of everything
5149 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
5152 /* Powering off may clear the scan mode - don't let that interfere */
5153 if (!discoverable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5157 changed
= !test_and_set_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5159 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5160 changed
= test_and_clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5164 struct hci_request req
;
5166 /* In case this change in discoverable was triggered by
5167 * a disabling of connectable there could be a need to
5168 * update the advertising flags.
5170 hci_req_init(&req
, hdev
);
5171 update_adv_data(&req
);
5172 hci_req_run(&req
, NULL
);
5174 new_settings(hdev
, NULL
);
5178 void mgmt_connectable(struct hci_dev
*hdev
, u8 connectable
)
5182 /* Nothing needed here if there's a pending command since that
5183 * commands request completion callback takes care of everything
5186 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
5189 /* Powering off may clear the scan mode - don't let that interfere */
5190 if (!connectable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5194 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5196 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5199 new_settings(hdev
, NULL
);
5202 void mgmt_advertising(struct hci_dev
*hdev
, u8 advertising
)
5204 /* Powering off may stop advertising - don't let that interfere */
5205 if (!advertising
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5209 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5211 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5214 void mgmt_write_scan_failed(struct hci_dev
*hdev
, u8 scan
, u8 status
)
5216 u8 mgmt_err
= mgmt_status(status
);
5218 if (scan
& SCAN_PAGE
)
5219 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE
, hdev
,
5220 cmd_status_rsp
, &mgmt_err
);
5222 if (scan
& SCAN_INQUIRY
)
5223 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE
, hdev
,
5224 cmd_status_rsp
, &mgmt_err
);
5227 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
5230 struct mgmt_ev_new_link_key ev
;
5232 memset(&ev
, 0, sizeof(ev
));
5234 ev
.store_hint
= persistent
;
5235 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5236 ev
.key
.addr
.type
= BDADDR_BREDR
;
5237 ev
.key
.type
= key
->type
;
5238 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
5239 ev
.key
.pin_len
= key
->pin_len
;
5241 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5244 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
5246 if (ltk
->authenticated
)
5247 return MGMT_LTK_AUTHENTICATED
;
5249 return MGMT_LTK_UNAUTHENTICATED
;
5252 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
5254 struct mgmt_ev_new_long_term_key ev
;
5256 memset(&ev
, 0, sizeof(ev
));
5258 /* Devices using resolvable or non-resolvable random addresses
5259 * without providing an indentity resolving key don't require
5260 * to store long term keys. Their addresses will change the
5263 * Only when a remote device provides an identity address
5264 * make sure the long term key is stored. If the remote
5265 * identity is known, the long term keys are internally
5266 * mapped to the identity address. So allow static random
5267 * and public addresses here.
5269 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5270 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5271 ev
.store_hint
= 0x00;
5273 ev
.store_hint
= persistent
;
5275 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5276 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
5277 ev
.key
.type
= mgmt_ltk_type(key
);
5278 ev
.key
.enc_size
= key
->enc_size
;
5279 ev
.key
.ediv
= key
->ediv
;
5280 ev
.key
.rand
= key
->rand
;
5282 if (key
->type
== HCI_SMP_LTK
)
5285 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
5287 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5290 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
5292 struct mgmt_ev_new_irk ev
;
5294 memset(&ev
, 0, sizeof(ev
));
5296 /* For identity resolving keys from devices that are already
5297 * using a public address or static random address, do not
5298 * ask for storing this key. The identity resolving key really
5299 * is only mandatory for devices using resovlable random
5302 * Storing all identity resolving keys has the downside that
5303 * they will be also loaded on next boot of they system. More
5304 * identity resolving keys, means more time during scanning is
5305 * needed to actually resolve these addresses.
5307 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
5308 ev
.store_hint
= 0x01;
5310 ev
.store_hint
= 0x00;
5312 bacpy(&ev
.rpa
, &irk
->rpa
);
5313 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
5314 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
5315 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
5317 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
5320 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
5323 struct mgmt_ev_new_csrk ev
;
5325 memset(&ev
, 0, sizeof(ev
));
5327 /* Devices using resolvable or non-resolvable random addresses
5328 * without providing an indentity resolving key don't require
5329 * to store signature resolving keys. Their addresses will change
5330 * the next time around.
5332 * Only when a remote device provides an identity address
5333 * make sure the signature resolving key is stored. So allow
5334 * static random and public addresses here.
5336 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5337 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5338 ev
.store_hint
= 0x00;
5340 ev
.store_hint
= persistent
;
5342 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
5343 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
5344 ev
.key
.master
= csrk
->master
;
5345 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
5347 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
5350 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
5353 eir
[eir_len
++] = sizeof(type
) + data_len
;
5354 eir
[eir_len
++] = type
;
5355 memcpy(&eir
[eir_len
], data
, data_len
);
5356 eir_len
+= data_len
;
5361 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5362 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
5366 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
5369 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5370 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5372 ev
->flags
= __cpu_to_le32(flags
);
5375 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
5378 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
5379 eir_len
= eir_append_data(ev
->eir
, eir_len
,
5380 EIR_CLASS_OF_DEV
, dev_class
, 3);
5382 ev
->eir_len
= cpu_to_le16(eir_len
);
5384 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
5385 sizeof(*ev
) + eir_len
, NULL
);
5388 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
5390 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
5391 struct sock
**sk
= data
;
5392 struct mgmt_rp_disconnect rp
;
5394 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5395 rp
.addr
.type
= cp
->addr
.type
;
5397 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
5403 mgmt_pending_remove(cmd
);
5406 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
5408 struct hci_dev
*hdev
= data
;
5409 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
5410 struct mgmt_rp_unpair_device rp
;
5412 memset(&rp
, 0, sizeof(rp
));
5413 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5414 rp
.addr
.type
= cp
->addr
.type
;
5416 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
5418 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
5420 mgmt_pending_remove(cmd
);
5423 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5424 u8 link_type
, u8 addr_type
, u8 reason
,
5425 bool mgmt_connected
)
5427 struct mgmt_ev_device_disconnected ev
;
5428 struct pending_cmd
*power_off
;
5429 struct sock
*sk
= NULL
;
5431 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5433 struct mgmt_mode
*cp
= power_off
->param
;
5435 /* The connection is still in hci_conn_hash so test for 1
5436 * instead of 0 to know if this is the last one.
5438 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
5439 cancel_delayed_work(&hdev
->power_off
);
5440 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5444 if (!mgmt_connected
)
5447 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
5450 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
5452 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5453 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5456 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
5461 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5465 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5466 u8 link_type
, u8 addr_type
, u8 status
)
5468 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
5469 struct mgmt_cp_disconnect
*cp
;
5470 struct mgmt_rp_disconnect rp
;
5471 struct pending_cmd
*cmd
;
5473 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
5476 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
5482 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
5485 if (cp
->addr
.type
!= bdaddr_type
)
5488 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5489 rp
.addr
.type
= bdaddr_type
;
5491 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
5492 mgmt_status(status
), &rp
, sizeof(rp
));
5494 mgmt_pending_remove(cmd
);
5497 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5498 u8 addr_type
, u8 status
)
5500 struct mgmt_ev_connect_failed ev
;
5501 struct pending_cmd
*power_off
;
5503 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5505 struct mgmt_mode
*cp
= power_off
->param
;
5507 /* The connection is still in hci_conn_hash so test for 1
5508 * instead of 0 to know if this is the last one.
5510 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
5511 cancel_delayed_work(&hdev
->power_off
);
5512 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
5516 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5517 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5518 ev
.status
= mgmt_status(status
);
5520 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5523 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
5525 struct mgmt_ev_pin_code_request ev
;
5527 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5528 ev
.addr
.type
= BDADDR_BREDR
;
5531 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
5534 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5537 struct pending_cmd
*cmd
;
5538 struct mgmt_rp_pin_code_reply rp
;
5540 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
5544 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5545 rp
.addr
.type
= BDADDR_BREDR
;
5547 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
5548 mgmt_status(status
), &rp
, sizeof(rp
));
5550 mgmt_pending_remove(cmd
);
5553 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5556 struct pending_cmd
*cmd
;
5557 struct mgmt_rp_pin_code_reply rp
;
5559 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
5563 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5564 rp
.addr
.type
= BDADDR_BREDR
;
5566 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
5567 mgmt_status(status
), &rp
, sizeof(rp
));
5569 mgmt_pending_remove(cmd
);
5572 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5573 u8 link_type
, u8 addr_type
, u32 value
,
5576 struct mgmt_ev_user_confirm_request ev
;
5578 BT_DBG("%s", hdev
->name
);
5580 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5581 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5582 ev
.confirm_hint
= confirm_hint
;
5583 ev
.value
= cpu_to_le32(value
);
5585 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
5589 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5590 u8 link_type
, u8 addr_type
)
5592 struct mgmt_ev_user_passkey_request ev
;
5594 BT_DBG("%s", hdev
->name
);
5596 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5597 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5599 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
5603 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5604 u8 link_type
, u8 addr_type
, u8 status
,
5607 struct pending_cmd
*cmd
;
5608 struct mgmt_rp_user_confirm_reply rp
;
5611 cmd
= mgmt_pending_find(opcode
, hdev
);
5615 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
5616 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5617 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
5620 mgmt_pending_remove(cmd
);
5625 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5626 u8 link_type
, u8 addr_type
, u8 status
)
5628 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5629 status
, MGMT_OP_USER_CONFIRM_REPLY
);
5632 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5633 u8 link_type
, u8 addr_type
, u8 status
)
5635 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5637 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
5640 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5641 u8 link_type
, u8 addr_type
, u8 status
)
5643 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5644 status
, MGMT_OP_USER_PASSKEY_REPLY
);
5647 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5648 u8 link_type
, u8 addr_type
, u8 status
)
5650 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
5652 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
5655 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
5656 u8 link_type
, u8 addr_type
, u32 passkey
,
5659 struct mgmt_ev_passkey_notify ev
;
5661 BT_DBG("%s", hdev
->name
);
5663 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5664 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5665 ev
.passkey
= __cpu_to_le32(passkey
);
5666 ev
.entered
= entered
;
5668 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
5671 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5672 u8 addr_type
, u8 status
)
5674 struct mgmt_ev_auth_failed ev
;
5676 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5677 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5678 ev
.status
= mgmt_status(status
);
5680 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
5683 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
5685 struct cmd_lookup match
= { NULL
, hdev
};
5689 u8 mgmt_err
= mgmt_status(status
);
5690 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
5691 cmd_status_rsp
, &mgmt_err
);
5695 if (test_bit(HCI_AUTH
, &hdev
->flags
))
5696 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
5699 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
5702 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
5706 new_settings(hdev
, match
.sk
);
5712 static void clear_eir(struct hci_request
*req
)
5714 struct hci_dev
*hdev
= req
->hdev
;
5715 struct hci_cp_write_eir cp
;
5717 if (!lmp_ext_inq_capable(hdev
))
5720 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
5722 memset(&cp
, 0, sizeof(cp
));
5724 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
5727 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
5729 struct cmd_lookup match
= { NULL
, hdev
};
5730 struct hci_request req
;
5731 bool changed
= false;
5734 u8 mgmt_err
= mgmt_status(status
);
5736 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
5737 &hdev
->dev_flags
)) {
5738 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
5739 new_settings(hdev
, NULL
);
5742 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
5748 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
5750 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
5752 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
5755 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
5758 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
5761 new_settings(hdev
, match
.sk
);
5766 hci_req_init(&req
, hdev
);
5768 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
5773 hci_req_run(&req
, NULL
);
5776 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
5778 struct cmd_lookup match
= { NULL
, hdev
};
5779 bool changed
= false;
5782 u8 mgmt_err
= mgmt_status(status
);
5785 if (test_and_clear_bit(HCI_SC_ENABLED
,
5787 new_settings(hdev
, NULL
);
5788 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
5791 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
5792 cmd_status_rsp
, &mgmt_err
);
5797 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
5799 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
5800 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
5803 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
5804 settings_rsp
, &match
);
5807 new_settings(hdev
, match
.sk
);
5813 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
5815 struct cmd_lookup
*match
= data
;
5817 if (match
->sk
== NULL
) {
5818 match
->sk
= cmd
->sk
;
5819 sock_hold(match
->sk
);
5823 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
5826 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
5828 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
5829 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
5830 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
5833 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
5840 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
5842 struct mgmt_cp_set_local_name ev
;
5843 struct pending_cmd
*cmd
;
5848 memset(&ev
, 0, sizeof(ev
));
5849 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
5850 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
5852 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
5854 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
5856 /* If this is a HCI command related to powering on the
5857 * HCI dev don't send any mgmt signals.
5859 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5863 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
5864 cmd
? cmd
->sk
: NULL
);
5867 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
5868 u8
*randomizer192
, u8
*hash256
,
5869 u8
*randomizer256
, u8 status
)
5871 struct pending_cmd
*cmd
;
5873 BT_DBG("%s status %u", hdev
->name
, status
);
5875 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
5880 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
5881 mgmt_status(status
));
5883 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
5884 hash256
&& randomizer256
) {
5885 struct mgmt_rp_read_local_oob_ext_data rp
;
5887 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
5888 memcpy(rp
.randomizer192
, randomizer192
,
5889 sizeof(rp
.randomizer192
));
5891 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
5892 memcpy(rp
.randomizer256
, randomizer256
,
5893 sizeof(rp
.randomizer256
));
5895 cmd_complete(cmd
->sk
, hdev
->id
,
5896 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
5899 struct mgmt_rp_read_local_oob_data rp
;
5901 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
5902 memcpy(rp
.randomizer
, randomizer192
,
5903 sizeof(rp
.randomizer
));
5905 cmd_complete(cmd
->sk
, hdev
->id
,
5906 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
5911 mgmt_pending_remove(cmd
);
5914 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5915 u8 addr_type
, u8
*dev_class
, s8 rssi
, u8 cfm_name
,
5916 u8 ssp
, u8
*eir
, u16 eir_len
, u8
*scan_rsp
,
5920 struct mgmt_ev_device_found
*ev
= (void *) buf
;
5921 struct smp_irk
*irk
;
5924 if (!hci_discovery_active(hdev
))
5927 /* Make sure that the buffer is big enough. The 5 extra bytes
5928 * are for the potential CoD field.
5930 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
5933 memset(buf
, 0, sizeof(buf
));
5935 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
5937 bacpy(&ev
->addr
.bdaddr
, &irk
->bdaddr
);
5938 ev
->addr
.type
= link_to_bdaddr(link_type
, irk
->addr_type
);
5940 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5941 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5946 ev
->flags
|= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME
);
5948 ev
->flags
|= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING
);
5951 memcpy(ev
->eir
, eir
, eir_len
);
5953 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
5954 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
5957 if (scan_rsp_len
> 0)
5958 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
5960 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
5961 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
5963 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
5966 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
5967 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
5969 struct mgmt_ev_device_found
*ev
;
5970 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
5973 ev
= (struct mgmt_ev_device_found
*) buf
;
5975 memset(buf
, 0, sizeof(buf
));
5977 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
5978 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
5981 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
5984 ev
->eir_len
= cpu_to_le16(eir_len
);
5986 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
5989 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
5991 struct mgmt_ev_discovering ev
;
5992 struct pending_cmd
*cmd
;
5994 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
5997 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
5999 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
6002 u8 type
= hdev
->discovery
.type
;
6004 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
6006 mgmt_pending_remove(cmd
);
6009 memset(&ev
, 0, sizeof(ev
));
6010 ev
.type
= hdev
->discovery
.type
;
6011 ev
.discovering
= discovering
;
6013 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
6016 int mgmt_device_blocked(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
6018 struct pending_cmd
*cmd
;
6019 struct mgmt_ev_device_blocked ev
;
6021 cmd
= mgmt_pending_find(MGMT_OP_BLOCK_DEVICE
, hdev
);
6023 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6024 ev
.addr
.type
= type
;
6026 return mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &ev
, sizeof(ev
),
6027 cmd
? cmd
->sk
: NULL
);
6030 int mgmt_device_unblocked(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
6032 struct pending_cmd
*cmd
;
6033 struct mgmt_ev_device_unblocked ev
;
6035 cmd
= mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE
, hdev
);
6037 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6038 ev
.addr
.type
= type
;
6040 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &ev
, sizeof(ev
),
6041 cmd
? cmd
->sk
: NULL
);
6044 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
6046 BT_DBG("%s status %u", hdev
->name
, status
);
6048 /* Clear the advertising mgmt setting if we failed to re-enable it */
6050 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6051 new_settings(hdev
, NULL
);
6055 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
6057 struct hci_request req
;
6059 if (hci_conn_num(hdev
, LE_LINK
) > 0)
6062 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6065 hci_req_init(&req
, hdev
);
6066 enable_advertising(&req
);
6068 /* If this fails we have no option but to let user space know
6069 * that we've disabled advertising.
6071 if (hci_req_run(&req
, adv_enable_complete
) < 0) {
6072 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6073 new_settings(hdev
, NULL
);