2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 /* Handle HCI Event packets */
45 static void hci_cc_inquiry_cancel(struct hci_dev
*hdev
, struct sk_buff
*skb
,
48 __u8 status
= *((__u8
*) skb
->data
);
50 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
52 /* It is possible that we receive Inquiry Complete event right
53 * before we receive Inquiry Cancel Command Complete event, in
54 * which case the latter event should have status of Command
55 * Disallowed (0x0c). This should not be treated as error, since
56 * we actually achieve what Inquiry Cancel wants to achieve,
57 * which is to end the last Inquiry session.
59 if (status
== 0x0c && !test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
60 bt_dev_warn(hdev
, "Ignoring error of Inquiry Cancel command");
69 clear_bit(HCI_INQUIRY
, &hdev
->flags
);
70 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
71 wake_up_bit(&hdev
->flags
, HCI_INQUIRY
);
74 /* Set discovery state to stopped if we're not doing LE active
77 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
) ||
78 hdev
->le_scan_type
!= LE_SCAN_ACTIVE
)
79 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
82 hci_conn_check_pending(hdev
);
85 static void hci_cc_periodic_inq(struct hci_dev
*hdev
, struct sk_buff
*skb
)
87 __u8 status
= *((__u8
*) skb
->data
);
89 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
94 hci_dev_set_flag(hdev
, HCI_PERIODIC_INQ
);
97 static void hci_cc_exit_periodic_inq(struct hci_dev
*hdev
, struct sk_buff
*skb
)
99 __u8 status
= *((__u8
*) skb
->data
);
101 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
106 hci_dev_clear_flag(hdev
, HCI_PERIODIC_INQ
);
108 hci_conn_check_pending(hdev
);
111 static void hci_cc_remote_name_req_cancel(struct hci_dev
*hdev
,
114 BT_DBG("%s", hdev
->name
);
117 static void hci_cc_role_discovery(struct hci_dev
*hdev
, struct sk_buff
*skb
)
119 struct hci_rp_role_discovery
*rp
= (void *) skb
->data
;
120 struct hci_conn
*conn
;
122 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
129 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
131 conn
->role
= rp
->role
;
133 hci_dev_unlock(hdev
);
136 static void hci_cc_read_link_policy(struct hci_dev
*hdev
, struct sk_buff
*skb
)
138 struct hci_rp_read_link_policy
*rp
= (void *) skb
->data
;
139 struct hci_conn
*conn
;
141 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
148 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
150 conn
->link_policy
= __le16_to_cpu(rp
->policy
);
152 hci_dev_unlock(hdev
);
155 static void hci_cc_write_link_policy(struct hci_dev
*hdev
, struct sk_buff
*skb
)
157 struct hci_rp_write_link_policy
*rp
= (void *) skb
->data
;
158 struct hci_conn
*conn
;
161 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
166 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_LINK_POLICY
);
172 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
174 conn
->link_policy
= get_unaligned_le16(sent
+ 2);
176 hci_dev_unlock(hdev
);
179 static void hci_cc_read_def_link_policy(struct hci_dev
*hdev
,
182 struct hci_rp_read_def_link_policy
*rp
= (void *) skb
->data
;
184 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
189 hdev
->link_policy
= __le16_to_cpu(rp
->policy
);
192 static void hci_cc_write_def_link_policy(struct hci_dev
*hdev
,
195 __u8 status
= *((__u8
*) skb
->data
);
198 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
203 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
);
207 hdev
->link_policy
= get_unaligned_le16(sent
);
210 static void hci_cc_reset(struct hci_dev
*hdev
, struct sk_buff
*skb
)
212 __u8 status
= *((__u8
*) skb
->data
);
214 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
216 clear_bit(HCI_RESET
, &hdev
->flags
);
221 /* Reset all non-persistent flags */
222 hci_dev_clear_volatile_flags(hdev
);
224 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
226 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
227 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
229 memset(hdev
->adv_data
, 0, sizeof(hdev
->adv_data
));
230 hdev
->adv_data_len
= 0;
232 memset(hdev
->scan_rsp_data
, 0, sizeof(hdev
->scan_rsp_data
));
233 hdev
->scan_rsp_data_len
= 0;
235 hdev
->le_scan_type
= LE_SCAN_PASSIVE
;
237 hdev
->ssp_debug_mode
= 0;
239 hci_bdaddr_list_clear(&hdev
->le_white_list
);
240 hci_bdaddr_list_clear(&hdev
->le_resolv_list
);
243 static void hci_cc_read_stored_link_key(struct hci_dev
*hdev
,
246 struct hci_rp_read_stored_link_key
*rp
= (void *)skb
->data
;
247 struct hci_cp_read_stored_link_key
*sent
;
249 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
251 sent
= hci_sent_cmd_data(hdev
, HCI_OP_READ_STORED_LINK_KEY
);
255 if (!rp
->status
&& sent
->read_all
== 0x01) {
256 hdev
->stored_max_keys
= rp
->max_keys
;
257 hdev
->stored_num_keys
= rp
->num_keys
;
261 static void hci_cc_delete_stored_link_key(struct hci_dev
*hdev
,
264 struct hci_rp_delete_stored_link_key
*rp
= (void *)skb
->data
;
266 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
271 if (rp
->num_keys
<= hdev
->stored_num_keys
)
272 hdev
->stored_num_keys
-= rp
->num_keys
;
274 hdev
->stored_num_keys
= 0;
277 static void hci_cc_write_local_name(struct hci_dev
*hdev
, struct sk_buff
*skb
)
279 __u8 status
= *((__u8
*) skb
->data
);
282 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
284 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_LOCAL_NAME
);
290 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
291 mgmt_set_local_name_complete(hdev
, sent
, status
);
293 memcpy(hdev
->dev_name
, sent
, HCI_MAX_NAME_LENGTH
);
295 hci_dev_unlock(hdev
);
298 static void hci_cc_read_local_name(struct hci_dev
*hdev
, struct sk_buff
*skb
)
300 struct hci_rp_read_local_name
*rp
= (void *) skb
->data
;
302 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
307 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
308 hci_dev_test_flag(hdev
, HCI_CONFIG
))
309 memcpy(hdev
->dev_name
, rp
->name
, HCI_MAX_NAME_LENGTH
);
312 static void hci_cc_write_auth_enable(struct hci_dev
*hdev
, struct sk_buff
*skb
)
314 __u8 status
= *((__u8
*) skb
->data
);
317 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
319 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_AUTH_ENABLE
);
326 __u8 param
= *((__u8
*) sent
);
328 if (param
== AUTH_ENABLED
)
329 set_bit(HCI_AUTH
, &hdev
->flags
);
331 clear_bit(HCI_AUTH
, &hdev
->flags
);
334 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
335 mgmt_auth_enable_complete(hdev
, status
);
337 hci_dev_unlock(hdev
);
340 static void hci_cc_write_encrypt_mode(struct hci_dev
*hdev
, struct sk_buff
*skb
)
342 __u8 status
= *((__u8
*) skb
->data
);
346 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
351 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
);
355 param
= *((__u8
*) sent
);
358 set_bit(HCI_ENCRYPT
, &hdev
->flags
);
360 clear_bit(HCI_ENCRYPT
, &hdev
->flags
);
363 static void hci_cc_write_scan_enable(struct hci_dev
*hdev
, struct sk_buff
*skb
)
365 __u8 status
= *((__u8
*) skb
->data
);
369 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
371 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_SCAN_ENABLE
);
375 param
= *((__u8
*) sent
);
380 hdev
->discov_timeout
= 0;
384 if (param
& SCAN_INQUIRY
)
385 set_bit(HCI_ISCAN
, &hdev
->flags
);
387 clear_bit(HCI_ISCAN
, &hdev
->flags
);
389 if (param
& SCAN_PAGE
)
390 set_bit(HCI_PSCAN
, &hdev
->flags
);
392 clear_bit(HCI_PSCAN
, &hdev
->flags
);
395 hci_dev_unlock(hdev
);
398 static void hci_cc_read_class_of_dev(struct hci_dev
*hdev
, struct sk_buff
*skb
)
400 struct hci_rp_read_class_of_dev
*rp
= (void *) skb
->data
;
402 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
407 memcpy(hdev
->dev_class
, rp
->dev_class
, 3);
409 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev
->name
,
410 hdev
->dev_class
[2], hdev
->dev_class
[1], hdev
->dev_class
[0]);
413 static void hci_cc_write_class_of_dev(struct hci_dev
*hdev
, struct sk_buff
*skb
)
415 __u8 status
= *((__u8
*) skb
->data
);
418 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
420 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_CLASS_OF_DEV
);
427 memcpy(hdev
->dev_class
, sent
, 3);
429 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
430 mgmt_set_class_of_dev_complete(hdev
, sent
, status
);
432 hci_dev_unlock(hdev
);
435 static void hci_cc_read_voice_setting(struct hci_dev
*hdev
, struct sk_buff
*skb
)
437 struct hci_rp_read_voice_setting
*rp
= (void *) skb
->data
;
440 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
445 setting
= __le16_to_cpu(rp
->voice_setting
);
447 if (hdev
->voice_setting
== setting
)
450 hdev
->voice_setting
= setting
;
452 BT_DBG("%s voice setting 0x%4.4x", hdev
->name
, setting
);
455 hdev
->notify(hdev
, HCI_NOTIFY_VOICE_SETTING
);
458 static void hci_cc_write_voice_setting(struct hci_dev
*hdev
,
461 __u8 status
= *((__u8
*) skb
->data
);
465 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
470 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_VOICE_SETTING
);
474 setting
= get_unaligned_le16(sent
);
476 if (hdev
->voice_setting
== setting
)
479 hdev
->voice_setting
= setting
;
481 BT_DBG("%s voice setting 0x%4.4x", hdev
->name
, setting
);
484 hdev
->notify(hdev
, HCI_NOTIFY_VOICE_SETTING
);
487 static void hci_cc_read_num_supported_iac(struct hci_dev
*hdev
,
490 struct hci_rp_read_num_supported_iac
*rp
= (void *) skb
->data
;
492 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
497 hdev
->num_iac
= rp
->num_iac
;
499 BT_DBG("%s num iac %d", hdev
->name
, hdev
->num_iac
);
502 static void hci_cc_write_ssp_mode(struct hci_dev
*hdev
, struct sk_buff
*skb
)
504 __u8 status
= *((__u8
*) skb
->data
);
505 struct hci_cp_write_ssp_mode
*sent
;
507 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
509 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_SSP_MODE
);
517 hdev
->features
[1][0] |= LMP_HOST_SSP
;
519 hdev
->features
[1][0] &= ~LMP_HOST_SSP
;
522 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
523 mgmt_ssp_enable_complete(hdev
, sent
->mode
, status
);
526 hci_dev_set_flag(hdev
, HCI_SSP_ENABLED
);
528 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
531 hci_dev_unlock(hdev
);
534 static void hci_cc_write_sc_support(struct hci_dev
*hdev
, struct sk_buff
*skb
)
536 u8 status
= *((u8
*) skb
->data
);
537 struct hci_cp_write_sc_support
*sent
;
539 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
541 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_SC_SUPPORT
);
549 hdev
->features
[1][0] |= LMP_HOST_SC
;
551 hdev
->features
[1][0] &= ~LMP_HOST_SC
;
554 if (!hci_dev_test_flag(hdev
, HCI_MGMT
) && !status
) {
556 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
558 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
561 hci_dev_unlock(hdev
);
564 static void hci_cc_read_local_version(struct hci_dev
*hdev
, struct sk_buff
*skb
)
566 struct hci_rp_read_local_version
*rp
= (void *) skb
->data
;
568 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
573 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
574 hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
575 hdev
->hci_ver
= rp
->hci_ver
;
576 hdev
->hci_rev
= __le16_to_cpu(rp
->hci_rev
);
577 hdev
->lmp_ver
= rp
->lmp_ver
;
578 hdev
->manufacturer
= __le16_to_cpu(rp
->manufacturer
);
579 hdev
->lmp_subver
= __le16_to_cpu(rp
->lmp_subver
);
583 static void hci_cc_read_local_commands(struct hci_dev
*hdev
,
586 struct hci_rp_read_local_commands
*rp
= (void *) skb
->data
;
588 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
593 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
594 hci_dev_test_flag(hdev
, HCI_CONFIG
))
595 memcpy(hdev
->commands
, rp
->commands
, sizeof(hdev
->commands
));
598 static void hci_cc_read_auth_payload_timeout(struct hci_dev
*hdev
,
601 struct hci_rp_read_auth_payload_to
*rp
= (void *)skb
->data
;
602 struct hci_conn
*conn
;
604 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
611 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
613 conn
->auth_payload_timeout
= __le16_to_cpu(rp
->timeout
);
615 hci_dev_unlock(hdev
);
618 static void hci_cc_write_auth_payload_timeout(struct hci_dev
*hdev
,
621 struct hci_rp_write_auth_payload_to
*rp
= (void *)skb
->data
;
622 struct hci_conn
*conn
;
625 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
630 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_AUTH_PAYLOAD_TO
);
636 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
638 conn
->auth_payload_timeout
= get_unaligned_le16(sent
+ 2);
640 hci_dev_unlock(hdev
);
643 static void hci_cc_read_local_features(struct hci_dev
*hdev
,
646 struct hci_rp_read_local_features
*rp
= (void *) skb
->data
;
648 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
653 memcpy(hdev
->features
, rp
->features
, 8);
655 /* Adjust default settings according to features
656 * supported by device. */
658 if (hdev
->features
[0][0] & LMP_3SLOT
)
659 hdev
->pkt_type
|= (HCI_DM3
| HCI_DH3
);
661 if (hdev
->features
[0][0] & LMP_5SLOT
)
662 hdev
->pkt_type
|= (HCI_DM5
| HCI_DH5
);
664 if (hdev
->features
[0][1] & LMP_HV2
) {
665 hdev
->pkt_type
|= (HCI_HV2
);
666 hdev
->esco_type
|= (ESCO_HV2
);
669 if (hdev
->features
[0][1] & LMP_HV3
) {
670 hdev
->pkt_type
|= (HCI_HV3
);
671 hdev
->esco_type
|= (ESCO_HV3
);
674 if (lmp_esco_capable(hdev
))
675 hdev
->esco_type
|= (ESCO_EV3
);
677 if (hdev
->features
[0][4] & LMP_EV4
)
678 hdev
->esco_type
|= (ESCO_EV4
);
680 if (hdev
->features
[0][4] & LMP_EV5
)
681 hdev
->esco_type
|= (ESCO_EV5
);
683 if (hdev
->features
[0][5] & LMP_EDR_ESCO_2M
)
684 hdev
->esco_type
|= (ESCO_2EV3
);
686 if (hdev
->features
[0][5] & LMP_EDR_ESCO_3M
)
687 hdev
->esco_type
|= (ESCO_3EV3
);
689 if (hdev
->features
[0][5] & LMP_EDR_3S_ESCO
)
690 hdev
->esco_type
|= (ESCO_2EV5
| ESCO_3EV5
);
693 static void hci_cc_read_local_ext_features(struct hci_dev
*hdev
,
696 struct hci_rp_read_local_ext_features
*rp
= (void *) skb
->data
;
698 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
703 if (hdev
->max_page
< rp
->max_page
)
704 hdev
->max_page
= rp
->max_page
;
706 if (rp
->page
< HCI_MAX_PAGES
)
707 memcpy(hdev
->features
[rp
->page
], rp
->features
, 8);
710 static void hci_cc_read_flow_control_mode(struct hci_dev
*hdev
,
713 struct hci_rp_read_flow_control_mode
*rp
= (void *) skb
->data
;
715 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
720 hdev
->flow_ctl_mode
= rp
->mode
;
723 static void hci_cc_read_buffer_size(struct hci_dev
*hdev
, struct sk_buff
*skb
)
725 struct hci_rp_read_buffer_size
*rp
= (void *) skb
->data
;
727 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
732 hdev
->acl_mtu
= __le16_to_cpu(rp
->acl_mtu
);
733 hdev
->sco_mtu
= rp
->sco_mtu
;
734 hdev
->acl_pkts
= __le16_to_cpu(rp
->acl_max_pkt
);
735 hdev
->sco_pkts
= __le16_to_cpu(rp
->sco_max_pkt
);
737 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE
, &hdev
->quirks
)) {
742 hdev
->acl_cnt
= hdev
->acl_pkts
;
743 hdev
->sco_cnt
= hdev
->sco_pkts
;
745 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev
->name
, hdev
->acl_mtu
,
746 hdev
->acl_pkts
, hdev
->sco_mtu
, hdev
->sco_pkts
);
749 static void hci_cc_read_bd_addr(struct hci_dev
*hdev
, struct sk_buff
*skb
)
751 struct hci_rp_read_bd_addr
*rp
= (void *) skb
->data
;
753 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
758 if (test_bit(HCI_INIT
, &hdev
->flags
))
759 bacpy(&hdev
->bdaddr
, &rp
->bdaddr
);
761 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
762 bacpy(&hdev
->setup_addr
, &rp
->bdaddr
);
765 static void hci_cc_read_local_pairing_opts(struct hci_dev
*hdev
,
768 struct hci_rp_read_local_pairing_opts
*rp
= (void *) skb
->data
;
770 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
775 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
776 hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
777 hdev
->pairing_opts
= rp
->pairing_opts
;
778 hdev
->max_enc_key_size
= rp
->max_key_size
;
782 static void hci_cc_read_page_scan_activity(struct hci_dev
*hdev
,
785 struct hci_rp_read_page_scan_activity
*rp
= (void *) skb
->data
;
787 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
792 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
793 hdev
->page_scan_interval
= __le16_to_cpu(rp
->interval
);
794 hdev
->page_scan_window
= __le16_to_cpu(rp
->window
);
798 static void hci_cc_write_page_scan_activity(struct hci_dev
*hdev
,
801 u8 status
= *((u8
*) skb
->data
);
802 struct hci_cp_write_page_scan_activity
*sent
;
804 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
809 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
);
813 hdev
->page_scan_interval
= __le16_to_cpu(sent
->interval
);
814 hdev
->page_scan_window
= __le16_to_cpu(sent
->window
);
817 static void hci_cc_read_page_scan_type(struct hci_dev
*hdev
,
820 struct hci_rp_read_page_scan_type
*rp
= (void *) skb
->data
;
822 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
827 if (test_bit(HCI_INIT
, &hdev
->flags
))
828 hdev
->page_scan_type
= rp
->type
;
831 static void hci_cc_write_page_scan_type(struct hci_dev
*hdev
,
834 u8 status
= *((u8
*) skb
->data
);
837 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
842 type
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_PAGE_SCAN_TYPE
);
844 hdev
->page_scan_type
= *type
;
847 static void hci_cc_read_data_block_size(struct hci_dev
*hdev
,
850 struct hci_rp_read_data_block_size
*rp
= (void *) skb
->data
;
852 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
857 hdev
->block_mtu
= __le16_to_cpu(rp
->max_acl_len
);
858 hdev
->block_len
= __le16_to_cpu(rp
->block_len
);
859 hdev
->num_blocks
= __le16_to_cpu(rp
->num_blocks
);
861 hdev
->block_cnt
= hdev
->num_blocks
;
863 BT_DBG("%s blk mtu %d cnt %d len %d", hdev
->name
, hdev
->block_mtu
,
864 hdev
->block_cnt
, hdev
->block_len
);
867 static void hci_cc_read_clock(struct hci_dev
*hdev
, struct sk_buff
*skb
)
869 struct hci_rp_read_clock
*rp
= (void *) skb
->data
;
870 struct hci_cp_read_clock
*cp
;
871 struct hci_conn
*conn
;
873 BT_DBG("%s", hdev
->name
);
875 if (skb
->len
< sizeof(*rp
))
883 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
887 if (cp
->which
== 0x00) {
888 hdev
->clock
= le32_to_cpu(rp
->clock
);
892 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
894 conn
->clock
= le32_to_cpu(rp
->clock
);
895 conn
->clock_accuracy
= le16_to_cpu(rp
->accuracy
);
899 hci_dev_unlock(hdev
);
902 static void hci_cc_read_local_amp_info(struct hci_dev
*hdev
,
905 struct hci_rp_read_local_amp_info
*rp
= (void *) skb
->data
;
907 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
912 hdev
->amp_status
= rp
->amp_status
;
913 hdev
->amp_total_bw
= __le32_to_cpu(rp
->total_bw
);
914 hdev
->amp_max_bw
= __le32_to_cpu(rp
->max_bw
);
915 hdev
->amp_min_latency
= __le32_to_cpu(rp
->min_latency
);
916 hdev
->amp_max_pdu
= __le32_to_cpu(rp
->max_pdu
);
917 hdev
->amp_type
= rp
->amp_type
;
918 hdev
->amp_pal_cap
= __le16_to_cpu(rp
->pal_cap
);
919 hdev
->amp_assoc_size
= __le16_to_cpu(rp
->max_assoc_size
);
920 hdev
->amp_be_flush_to
= __le32_to_cpu(rp
->be_flush_to
);
921 hdev
->amp_max_flush_to
= __le32_to_cpu(rp
->max_flush_to
);
924 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev
*hdev
,
927 struct hci_rp_read_inq_rsp_tx_power
*rp
= (void *) skb
->data
;
929 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
934 hdev
->inq_tx_power
= rp
->tx_power
;
937 static void hci_cc_read_def_err_data_reporting(struct hci_dev
*hdev
,
940 struct hci_rp_read_def_err_data_reporting
*rp
= (void *)skb
->data
;
942 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
947 hdev
->err_data_reporting
= rp
->err_data_reporting
;
950 static void hci_cc_write_def_err_data_reporting(struct hci_dev
*hdev
,
953 __u8 status
= *((__u8
*)skb
->data
);
954 struct hci_cp_write_def_err_data_reporting
*cp
;
956 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
961 cp
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING
);
965 hdev
->err_data_reporting
= cp
->err_data_reporting
;
968 static void hci_cc_pin_code_reply(struct hci_dev
*hdev
, struct sk_buff
*skb
)
970 struct hci_rp_pin_code_reply
*rp
= (void *) skb
->data
;
971 struct hci_cp_pin_code_reply
*cp
;
972 struct hci_conn
*conn
;
974 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
978 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
979 mgmt_pin_code_reply_complete(hdev
, &rp
->bdaddr
, rp
->status
);
984 cp
= hci_sent_cmd_data(hdev
, HCI_OP_PIN_CODE_REPLY
);
988 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->bdaddr
);
990 conn
->pin_length
= cp
->pin_len
;
993 hci_dev_unlock(hdev
);
996 static void hci_cc_pin_code_neg_reply(struct hci_dev
*hdev
, struct sk_buff
*skb
)
998 struct hci_rp_pin_code_neg_reply
*rp
= (void *) skb
->data
;
1000 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1004 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1005 mgmt_pin_code_neg_reply_complete(hdev
, &rp
->bdaddr
,
1008 hci_dev_unlock(hdev
);
1011 static void hci_cc_le_read_buffer_size(struct hci_dev
*hdev
,
1012 struct sk_buff
*skb
)
1014 struct hci_rp_le_read_buffer_size
*rp
= (void *) skb
->data
;
1016 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1021 hdev
->le_mtu
= __le16_to_cpu(rp
->le_mtu
);
1022 hdev
->le_pkts
= rp
->le_max_pkt
;
1024 hdev
->le_cnt
= hdev
->le_pkts
;
1026 BT_DBG("%s le mtu %d:%d", hdev
->name
, hdev
->le_mtu
, hdev
->le_pkts
);
1029 static void hci_cc_le_read_local_features(struct hci_dev
*hdev
,
1030 struct sk_buff
*skb
)
1032 struct hci_rp_le_read_local_features
*rp
= (void *) skb
->data
;
1034 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1039 memcpy(hdev
->le_features
, rp
->features
, 8);
1042 static void hci_cc_le_read_adv_tx_power(struct hci_dev
*hdev
,
1043 struct sk_buff
*skb
)
1045 struct hci_rp_le_read_adv_tx_power
*rp
= (void *) skb
->data
;
1047 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1052 hdev
->adv_tx_power
= rp
->tx_power
;
1055 static void hci_cc_user_confirm_reply(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1057 struct hci_rp_user_confirm_reply
*rp
= (void *) skb
->data
;
1059 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1063 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1064 mgmt_user_confirm_reply_complete(hdev
, &rp
->bdaddr
, ACL_LINK
, 0,
1067 hci_dev_unlock(hdev
);
1070 static void hci_cc_user_confirm_neg_reply(struct hci_dev
*hdev
,
1071 struct sk_buff
*skb
)
1073 struct hci_rp_user_confirm_reply
*rp
= (void *) skb
->data
;
1075 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1079 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1080 mgmt_user_confirm_neg_reply_complete(hdev
, &rp
->bdaddr
,
1081 ACL_LINK
, 0, rp
->status
);
1083 hci_dev_unlock(hdev
);
1086 static void hci_cc_user_passkey_reply(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1088 struct hci_rp_user_confirm_reply
*rp
= (void *) skb
->data
;
1090 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1094 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1095 mgmt_user_passkey_reply_complete(hdev
, &rp
->bdaddr
, ACL_LINK
,
1098 hci_dev_unlock(hdev
);
1101 static void hci_cc_user_passkey_neg_reply(struct hci_dev
*hdev
,
1102 struct sk_buff
*skb
)
1104 struct hci_rp_user_confirm_reply
*rp
= (void *) skb
->data
;
1106 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1110 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1111 mgmt_user_passkey_neg_reply_complete(hdev
, &rp
->bdaddr
,
1112 ACL_LINK
, 0, rp
->status
);
1114 hci_dev_unlock(hdev
);
1117 static void hci_cc_read_local_oob_data(struct hci_dev
*hdev
,
1118 struct sk_buff
*skb
)
1120 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
1122 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1125 static void hci_cc_read_local_oob_ext_data(struct hci_dev
*hdev
,
1126 struct sk_buff
*skb
)
1128 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
1130 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1133 static void hci_cc_le_set_random_addr(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1135 __u8 status
= *((__u8
*) skb
->data
);
1138 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1143 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_RANDOM_ADDR
);
1149 bacpy(&hdev
->random_addr
, sent
);
1151 hci_dev_unlock(hdev
);
1154 static void hci_cc_le_set_default_phy(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1156 __u8 status
= *((__u8
*) skb
->data
);
1157 struct hci_cp_le_set_default_phy
*cp
;
1159 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1164 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_DEFAULT_PHY
);
1170 hdev
->le_tx_def_phys
= cp
->tx_phys
;
1171 hdev
->le_rx_def_phys
= cp
->rx_phys
;
1173 hci_dev_unlock(hdev
);
1176 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev
*hdev
,
1177 struct sk_buff
*skb
)
1179 __u8 status
= *((__u8
*) skb
->data
);
1180 struct hci_cp_le_set_adv_set_rand_addr
*cp
;
1181 struct adv_info
*adv_instance
;
1186 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_ADV_SET_RAND_ADDR
);
1192 if (!hdev
->cur_adv_instance
) {
1193 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1194 bacpy(&hdev
->random_addr
, &cp
->bdaddr
);
1196 adv_instance
= hci_find_adv_instance(hdev
,
1197 hdev
->cur_adv_instance
);
1199 bacpy(&adv_instance
->random_addr
, &cp
->bdaddr
);
1202 hci_dev_unlock(hdev
);
1205 static void hci_cc_le_read_transmit_power(struct hci_dev
*hdev
,
1206 struct sk_buff
*skb
)
1208 struct hci_rp_le_read_transmit_power
*rp
= (void *)skb
->data
;
1210 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1215 hdev
->min_le_tx_power
= rp
->min_le_tx_power
;
1216 hdev
->max_le_tx_power
= rp
->max_le_tx_power
;
1219 static void hci_cc_le_set_adv_enable(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1221 __u8
*sent
, status
= *((__u8
*) skb
->data
);
1223 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1228 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_ADV_ENABLE
);
1234 /* If we're doing connection initiation as peripheral. Set a
1235 * timeout in case something goes wrong.
1238 struct hci_conn
*conn
;
1240 hci_dev_set_flag(hdev
, HCI_LE_ADV
);
1242 conn
= hci_lookup_le_connect(hdev
);
1244 queue_delayed_work(hdev
->workqueue
,
1245 &conn
->le_conn_timeout
,
1246 conn
->conn_timeout
);
1248 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1251 hci_dev_unlock(hdev
);
1254 static void hci_cc_le_set_ext_adv_enable(struct hci_dev
*hdev
,
1255 struct sk_buff
*skb
)
1257 struct hci_cp_le_set_ext_adv_enable
*cp
;
1258 __u8 status
= *((__u8
*) skb
->data
);
1260 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1265 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_EXT_ADV_ENABLE
);
1272 struct hci_conn
*conn
;
1274 hci_dev_set_flag(hdev
, HCI_LE_ADV
);
1276 conn
= hci_lookup_le_connect(hdev
);
1278 queue_delayed_work(hdev
->workqueue
,
1279 &conn
->le_conn_timeout
,
1280 conn
->conn_timeout
);
1282 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1285 hci_dev_unlock(hdev
);
1288 static void hci_cc_le_set_scan_param(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1290 struct hci_cp_le_set_scan_param
*cp
;
1291 __u8 status
= *((__u8
*) skb
->data
);
1293 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1298 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_SCAN_PARAM
);
1304 hdev
->le_scan_type
= cp
->type
;
1306 hci_dev_unlock(hdev
);
1309 static void hci_cc_le_set_ext_scan_param(struct hci_dev
*hdev
,
1310 struct sk_buff
*skb
)
1312 struct hci_cp_le_set_ext_scan_params
*cp
;
1313 __u8 status
= *((__u8
*) skb
->data
);
1314 struct hci_cp_le_scan_phy_params
*phy_param
;
1316 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1321 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_EXT_SCAN_PARAMS
);
1325 phy_param
= (void *)cp
->data
;
1329 hdev
->le_scan_type
= phy_param
->type
;
1331 hci_dev_unlock(hdev
);
1334 static bool has_pending_adv_report(struct hci_dev
*hdev
)
1336 struct discovery_state
*d
= &hdev
->discovery
;
1338 return bacmp(&d
->last_adv_addr
, BDADDR_ANY
);
1341 static void clear_pending_adv_report(struct hci_dev
*hdev
)
1343 struct discovery_state
*d
= &hdev
->discovery
;
1345 bacpy(&d
->last_adv_addr
, BDADDR_ANY
);
1346 d
->last_adv_data_len
= 0;
1349 static void store_pending_adv_report(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1350 u8 bdaddr_type
, s8 rssi
, u32 flags
,
1353 struct discovery_state
*d
= &hdev
->discovery
;
1355 if (len
> HCI_MAX_AD_LENGTH
)
1358 bacpy(&d
->last_adv_addr
, bdaddr
);
1359 d
->last_adv_addr_type
= bdaddr_type
;
1360 d
->last_adv_rssi
= rssi
;
1361 d
->last_adv_flags
= flags
;
1362 memcpy(d
->last_adv_data
, data
, len
);
1363 d
->last_adv_data_len
= len
;
1366 static void le_set_scan_enable_complete(struct hci_dev
*hdev
, u8 enable
)
1371 case LE_SCAN_ENABLE
:
1372 hci_dev_set_flag(hdev
, HCI_LE_SCAN
);
1373 if (hdev
->le_scan_type
== LE_SCAN_ACTIVE
)
1374 clear_pending_adv_report(hdev
);
1377 case LE_SCAN_DISABLE
:
1378 /* We do this here instead of when setting DISCOVERY_STOPPED
1379 * since the latter would potentially require waiting for
1380 * inquiry to stop too.
1382 if (has_pending_adv_report(hdev
)) {
1383 struct discovery_state
*d
= &hdev
->discovery
;
1385 mgmt_device_found(hdev
, &d
->last_adv_addr
, LE_LINK
,
1386 d
->last_adv_addr_type
, NULL
,
1387 d
->last_adv_rssi
, d
->last_adv_flags
,
1389 d
->last_adv_data_len
, NULL
, 0);
1392 /* Cancel this timer so that we don't try to disable scanning
1393 * when it's already disabled.
1395 cancel_delayed_work(&hdev
->le_scan_disable
);
1397 hci_dev_clear_flag(hdev
, HCI_LE_SCAN
);
1399 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1400 * interrupted scanning due to a connect request. Mark
1401 * therefore discovery as stopped. If this was not
1402 * because of a connect request advertising might have
1403 * been disabled because of active scanning, so
1404 * re-enable it again if necessary.
1406 if (hci_dev_test_and_clear_flag(hdev
, HCI_LE_SCAN_INTERRUPTED
))
1407 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1408 else if (!hci_dev_test_flag(hdev
, HCI_LE_ADV
) &&
1409 hdev
->discovery
.state
== DISCOVERY_FINDING
)
1410 hci_req_reenable_advertising(hdev
);
1415 bt_dev_err(hdev
, "use of reserved LE_Scan_Enable param %d",
1420 hci_dev_unlock(hdev
);
1423 static void hci_cc_le_set_scan_enable(struct hci_dev
*hdev
,
1424 struct sk_buff
*skb
)
1426 struct hci_cp_le_set_scan_enable
*cp
;
1427 __u8 status
= *((__u8
*) skb
->data
);
1429 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1434 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
);
1438 le_set_scan_enable_complete(hdev
, cp
->enable
);
1441 static void hci_cc_le_set_ext_scan_enable(struct hci_dev
*hdev
,
1442 struct sk_buff
*skb
)
1444 struct hci_cp_le_set_ext_scan_enable
*cp
;
1445 __u8 status
= *((__u8
*) skb
->data
);
1447 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1452 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_EXT_SCAN_ENABLE
);
1456 le_set_scan_enable_complete(hdev
, cp
->enable
);
1459 static void hci_cc_le_read_num_adv_sets(struct hci_dev
*hdev
,
1460 struct sk_buff
*skb
)
1462 struct hci_rp_le_read_num_supported_adv_sets
*rp
= (void *) skb
->data
;
1464 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev
->name
, rp
->status
,
1470 hdev
->le_num_of_adv_sets
= rp
->num_of_sets
;
1473 static void hci_cc_le_read_white_list_size(struct hci_dev
*hdev
,
1474 struct sk_buff
*skb
)
1476 struct hci_rp_le_read_white_list_size
*rp
= (void *) skb
->data
;
1478 BT_DBG("%s status 0x%2.2x size %u", hdev
->name
, rp
->status
, rp
->size
);
1483 hdev
->le_white_list_size
= rp
->size
;
1486 static void hci_cc_le_clear_white_list(struct hci_dev
*hdev
,
1487 struct sk_buff
*skb
)
1489 __u8 status
= *((__u8
*) skb
->data
);
1491 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1496 hci_bdaddr_list_clear(&hdev
->le_white_list
);
1499 static void hci_cc_le_add_to_white_list(struct hci_dev
*hdev
,
1500 struct sk_buff
*skb
)
1502 struct hci_cp_le_add_to_white_list
*sent
;
1503 __u8 status
= *((__u8
*) skb
->data
);
1505 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1510 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_ADD_TO_WHITE_LIST
);
1514 hci_bdaddr_list_add(&hdev
->le_white_list
, &sent
->bdaddr
,
1518 static void hci_cc_le_del_from_white_list(struct hci_dev
*hdev
,
1519 struct sk_buff
*skb
)
1521 struct hci_cp_le_del_from_white_list
*sent
;
1522 __u8 status
= *((__u8
*) skb
->data
);
1524 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1529 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_DEL_FROM_WHITE_LIST
);
1533 hci_bdaddr_list_del(&hdev
->le_white_list
, &sent
->bdaddr
,
1537 static void hci_cc_le_read_supported_states(struct hci_dev
*hdev
,
1538 struct sk_buff
*skb
)
1540 struct hci_rp_le_read_supported_states
*rp
= (void *) skb
->data
;
1542 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1547 memcpy(hdev
->le_states
, rp
->le_states
, 8);
1550 static void hci_cc_le_read_def_data_len(struct hci_dev
*hdev
,
1551 struct sk_buff
*skb
)
1553 struct hci_rp_le_read_def_data_len
*rp
= (void *) skb
->data
;
1555 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1560 hdev
->le_def_tx_len
= le16_to_cpu(rp
->tx_len
);
1561 hdev
->le_def_tx_time
= le16_to_cpu(rp
->tx_time
);
1564 static void hci_cc_le_write_def_data_len(struct hci_dev
*hdev
,
1565 struct sk_buff
*skb
)
1567 struct hci_cp_le_write_def_data_len
*sent
;
1568 __u8 status
= *((__u8
*) skb
->data
);
1570 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1575 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_WRITE_DEF_DATA_LEN
);
1579 hdev
->le_def_tx_len
= le16_to_cpu(sent
->tx_len
);
1580 hdev
->le_def_tx_time
= le16_to_cpu(sent
->tx_time
);
1583 static void hci_cc_le_add_to_resolv_list(struct hci_dev
*hdev
,
1584 struct sk_buff
*skb
)
1586 struct hci_cp_le_add_to_resolv_list
*sent
;
1587 __u8 status
= *((__u8
*) skb
->data
);
1589 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1594 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_ADD_TO_RESOLV_LIST
);
1598 hci_bdaddr_list_add_with_irk(&hdev
->le_resolv_list
, &sent
->bdaddr
,
1599 sent
->bdaddr_type
, sent
->peer_irk
,
1603 static void hci_cc_le_del_from_resolv_list(struct hci_dev
*hdev
,
1604 struct sk_buff
*skb
)
1606 struct hci_cp_le_del_from_resolv_list
*sent
;
1607 __u8 status
= *((__u8
*) skb
->data
);
1609 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1614 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_DEL_FROM_RESOLV_LIST
);
1618 hci_bdaddr_list_del_with_irk(&hdev
->le_resolv_list
, &sent
->bdaddr
,
1622 static void hci_cc_le_clear_resolv_list(struct hci_dev
*hdev
,
1623 struct sk_buff
*skb
)
1625 __u8 status
= *((__u8
*) skb
->data
);
1627 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1632 hci_bdaddr_list_clear(&hdev
->le_resolv_list
);
1635 static void hci_cc_le_read_resolv_list_size(struct hci_dev
*hdev
,
1636 struct sk_buff
*skb
)
1638 struct hci_rp_le_read_resolv_list_size
*rp
= (void *) skb
->data
;
1640 BT_DBG("%s status 0x%2.2x size %u", hdev
->name
, rp
->status
, rp
->size
);
1645 hdev
->le_resolv_list_size
= rp
->size
;
1648 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev
*hdev
,
1649 struct sk_buff
*skb
)
1651 __u8
*sent
, status
= *((__u8
*) skb
->data
);
1653 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1658 sent
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE
);
1665 hci_dev_set_flag(hdev
, HCI_LL_RPA_RESOLUTION
);
1667 hci_dev_clear_flag(hdev
, HCI_LL_RPA_RESOLUTION
);
1669 hci_dev_unlock(hdev
);
1672 static void hci_cc_le_read_max_data_len(struct hci_dev
*hdev
,
1673 struct sk_buff
*skb
)
1675 struct hci_rp_le_read_max_data_len
*rp
= (void *) skb
->data
;
1677 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1682 hdev
->le_max_tx_len
= le16_to_cpu(rp
->tx_len
);
1683 hdev
->le_max_tx_time
= le16_to_cpu(rp
->tx_time
);
1684 hdev
->le_max_rx_len
= le16_to_cpu(rp
->rx_len
);
1685 hdev
->le_max_rx_time
= le16_to_cpu(rp
->rx_time
);
1688 static void hci_cc_write_le_host_supported(struct hci_dev
*hdev
,
1689 struct sk_buff
*skb
)
1691 struct hci_cp_write_le_host_supported
*sent
;
1692 __u8 status
= *((__u8
*) skb
->data
);
1694 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1699 sent
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_LE_HOST_SUPPORTED
);
1706 hdev
->features
[1][0] |= LMP_HOST_LE
;
1707 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
1709 hdev
->features
[1][0] &= ~LMP_HOST_LE
;
1710 hci_dev_clear_flag(hdev
, HCI_LE_ENABLED
);
1711 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
1715 hdev
->features
[1][0] |= LMP_HOST_LE_BREDR
;
1717 hdev
->features
[1][0] &= ~LMP_HOST_LE_BREDR
;
1719 hci_dev_unlock(hdev
);
1722 static void hci_cc_set_adv_param(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1724 struct hci_cp_le_set_adv_param
*cp
;
1725 u8 status
= *((u8
*) skb
->data
);
1727 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1732 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_ADV_PARAM
);
1737 hdev
->adv_addr_type
= cp
->own_address_type
;
1738 hci_dev_unlock(hdev
);
1741 static void hci_cc_set_ext_adv_param(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1743 struct hci_rp_le_set_ext_adv_params
*rp
= (void *) skb
->data
;
1744 struct hci_cp_le_set_ext_adv_params
*cp
;
1745 struct adv_info
*adv_instance
;
1747 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1752 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_SET_EXT_ADV_PARAMS
);
1757 hdev
->adv_addr_type
= cp
->own_addr_type
;
1758 if (!hdev
->cur_adv_instance
) {
1759 /* Store in hdev for instance 0 */
1760 hdev
->adv_tx_power
= rp
->tx_power
;
1762 adv_instance
= hci_find_adv_instance(hdev
,
1763 hdev
->cur_adv_instance
);
1765 adv_instance
->tx_power
= rp
->tx_power
;
1767 /* Update adv data as tx power is known now */
1768 hci_req_update_adv_data(hdev
, hdev
->cur_adv_instance
);
1770 hci_dev_unlock(hdev
);
1773 static void hci_cc_read_rssi(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1775 struct hci_rp_read_rssi
*rp
= (void *) skb
->data
;
1776 struct hci_conn
*conn
;
1778 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1785 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
1787 conn
->rssi
= rp
->rssi
;
1789 hci_dev_unlock(hdev
);
1792 static void hci_cc_read_tx_power(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1794 struct hci_cp_read_tx_power
*sent
;
1795 struct hci_rp_read_tx_power
*rp
= (void *) skb
->data
;
1796 struct hci_conn
*conn
;
1798 BT_DBG("%s status 0x%2.2x", hdev
->name
, rp
->status
);
1803 sent
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
1809 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(rp
->handle
));
1813 switch (sent
->type
) {
1815 conn
->tx_power
= rp
->tx_power
;
1818 conn
->max_tx_power
= rp
->tx_power
;
1823 hci_dev_unlock(hdev
);
1826 static void hci_cc_write_ssp_debug_mode(struct hci_dev
*hdev
, struct sk_buff
*skb
)
1828 u8 status
= *((u8
*) skb
->data
);
1831 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1836 mode
= hci_sent_cmd_data(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
);
1838 hdev
->ssp_debug_mode
= *mode
;
1841 static void hci_cs_inquiry(struct hci_dev
*hdev
, __u8 status
)
1843 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1846 hci_conn_check_pending(hdev
);
1850 set_bit(HCI_INQUIRY
, &hdev
->flags
);
1853 static void hci_cs_create_conn(struct hci_dev
*hdev
, __u8 status
)
1855 struct hci_cp_create_conn
*cp
;
1856 struct hci_conn
*conn
;
1858 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1860 cp
= hci_sent_cmd_data(hdev
, HCI_OP_CREATE_CONN
);
1866 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->bdaddr
);
1868 BT_DBG("%s bdaddr %pMR hcon %p", hdev
->name
, &cp
->bdaddr
, conn
);
1871 if (conn
&& conn
->state
== BT_CONNECT
) {
1872 if (status
!= 0x0c || conn
->attempt
> 2) {
1873 conn
->state
= BT_CLOSED
;
1874 hci_connect_cfm(conn
, status
);
1877 conn
->state
= BT_CONNECT2
;
1881 conn
= hci_conn_add(hdev
, ACL_LINK
, &cp
->bdaddr
,
1884 bt_dev_err(hdev
, "no memory for new connection");
1888 hci_dev_unlock(hdev
);
1891 static void hci_cs_add_sco(struct hci_dev
*hdev
, __u8 status
)
1893 struct hci_cp_add_sco
*cp
;
1894 struct hci_conn
*acl
, *sco
;
1897 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1902 cp
= hci_sent_cmd_data(hdev
, HCI_OP_ADD_SCO
);
1906 handle
= __le16_to_cpu(cp
->handle
);
1908 BT_DBG("%s handle 0x%4.4x", hdev
->name
, handle
);
1912 acl
= hci_conn_hash_lookup_handle(hdev
, handle
);
1916 sco
->state
= BT_CLOSED
;
1918 hci_connect_cfm(sco
, status
);
1923 hci_dev_unlock(hdev
);
1926 static void hci_cs_auth_requested(struct hci_dev
*hdev
, __u8 status
)
1928 struct hci_cp_auth_requested
*cp
;
1929 struct hci_conn
*conn
;
1931 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1936 cp
= hci_sent_cmd_data(hdev
, HCI_OP_AUTH_REQUESTED
);
1942 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
1944 if (conn
->state
== BT_CONFIG
) {
1945 hci_connect_cfm(conn
, status
);
1946 hci_conn_drop(conn
);
1950 hci_dev_unlock(hdev
);
1953 static void hci_cs_set_conn_encrypt(struct hci_dev
*hdev
, __u8 status
)
1955 struct hci_cp_set_conn_encrypt
*cp
;
1956 struct hci_conn
*conn
;
1958 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
1963 cp
= hci_sent_cmd_data(hdev
, HCI_OP_SET_CONN_ENCRYPT
);
1969 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
1971 if (conn
->state
== BT_CONFIG
) {
1972 hci_connect_cfm(conn
, status
);
1973 hci_conn_drop(conn
);
1977 hci_dev_unlock(hdev
);
1980 static int hci_outgoing_auth_needed(struct hci_dev
*hdev
,
1981 struct hci_conn
*conn
)
1983 if (conn
->state
!= BT_CONFIG
|| !conn
->out
)
1986 if (conn
->pending_sec_level
== BT_SECURITY_SDP
)
1989 /* Only request authentication for SSP connections or non-SSP
1990 * devices with sec_level MEDIUM or HIGH or if MITM protection
1993 if (!hci_conn_ssp_enabled(conn
) && !(conn
->auth_type
& 0x01) &&
1994 conn
->pending_sec_level
!= BT_SECURITY_FIPS
&&
1995 conn
->pending_sec_level
!= BT_SECURITY_HIGH
&&
1996 conn
->pending_sec_level
!= BT_SECURITY_MEDIUM
)
2002 static int hci_resolve_name(struct hci_dev
*hdev
,
2003 struct inquiry_entry
*e
)
2005 struct hci_cp_remote_name_req cp
;
2007 memset(&cp
, 0, sizeof(cp
));
2009 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
2010 cp
.pscan_rep_mode
= e
->data
.pscan_rep_mode
;
2011 cp
.pscan_mode
= e
->data
.pscan_mode
;
2012 cp
.clock_offset
= e
->data
.clock_offset
;
2014 return hci_send_cmd(hdev
, HCI_OP_REMOTE_NAME_REQ
, sizeof(cp
), &cp
);
2017 static bool hci_resolve_next_name(struct hci_dev
*hdev
)
2019 struct discovery_state
*discov
= &hdev
->discovery
;
2020 struct inquiry_entry
*e
;
2022 if (list_empty(&discov
->resolve
))
2025 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
, NAME_NEEDED
);
2029 if (hci_resolve_name(hdev
, e
) == 0) {
2030 e
->name_state
= NAME_PENDING
;
2037 static void hci_check_pending_name(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2038 bdaddr_t
*bdaddr
, u8
*name
, u8 name_len
)
2040 struct discovery_state
*discov
= &hdev
->discovery
;
2041 struct inquiry_entry
*e
;
2043 /* Update the mgmt connected state if necessary. Be careful with
2044 * conn objects that exist but are not (yet) connected however.
2045 * Only those in BT_CONFIG or BT_CONNECTED states can be
2046 * considered connected.
2049 (conn
->state
== BT_CONFIG
|| conn
->state
== BT_CONNECTED
) &&
2050 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2051 mgmt_device_connected(hdev
, conn
, 0, name
, name_len
);
2053 if (discov
->state
== DISCOVERY_STOPPED
)
2056 if (discov
->state
== DISCOVERY_STOPPING
)
2057 goto discov_complete
;
2059 if (discov
->state
!= DISCOVERY_RESOLVING
)
2062 e
= hci_inquiry_cache_lookup_resolve(hdev
, bdaddr
, NAME_PENDING
);
2063 /* If the device was not found in a list of found devices names of which
2064 * are pending. there is no need to continue resolving a next name as it
2065 * will be done upon receiving another Remote Name Request Complete
2072 e
->name_state
= NAME_KNOWN
;
2073 mgmt_remote_name(hdev
, bdaddr
, ACL_LINK
, 0x00,
2074 e
->data
.rssi
, name
, name_len
);
2076 e
->name_state
= NAME_NOT_KNOWN
;
2079 if (hci_resolve_next_name(hdev
))
2083 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2086 static void hci_cs_remote_name_req(struct hci_dev
*hdev
, __u8 status
)
2088 struct hci_cp_remote_name_req
*cp
;
2089 struct hci_conn
*conn
;
2091 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2093 /* If successful wait for the name req complete event before
2094 * checking for the need to do authentication */
2098 cp
= hci_sent_cmd_data(hdev
, HCI_OP_REMOTE_NAME_REQ
);
2104 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->bdaddr
);
2106 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
2107 hci_check_pending_name(hdev
, conn
, &cp
->bdaddr
, NULL
, 0);
2112 if (!hci_outgoing_auth_needed(hdev
, conn
))
2115 if (!test_and_set_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
)) {
2116 struct hci_cp_auth_requested auth_cp
;
2118 set_bit(HCI_CONN_AUTH_INITIATOR
, &conn
->flags
);
2120 auth_cp
.handle
= __cpu_to_le16(conn
->handle
);
2121 hci_send_cmd(hdev
, HCI_OP_AUTH_REQUESTED
,
2122 sizeof(auth_cp
), &auth_cp
);
2126 hci_dev_unlock(hdev
);
2129 static void hci_cs_read_remote_features(struct hci_dev
*hdev
, __u8 status
)
2131 struct hci_cp_read_remote_features
*cp
;
2132 struct hci_conn
*conn
;
2134 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2139 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_REMOTE_FEATURES
);
2145 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2147 if (conn
->state
== BT_CONFIG
) {
2148 hci_connect_cfm(conn
, status
);
2149 hci_conn_drop(conn
);
2153 hci_dev_unlock(hdev
);
2156 static void hci_cs_read_remote_ext_features(struct hci_dev
*hdev
, __u8 status
)
2158 struct hci_cp_read_remote_ext_features
*cp
;
2159 struct hci_conn
*conn
;
2161 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2166 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_REMOTE_EXT_FEATURES
);
2172 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2174 if (conn
->state
== BT_CONFIG
) {
2175 hci_connect_cfm(conn
, status
);
2176 hci_conn_drop(conn
);
2180 hci_dev_unlock(hdev
);
2183 static void hci_cs_setup_sync_conn(struct hci_dev
*hdev
, __u8 status
)
2185 struct hci_cp_setup_sync_conn
*cp
;
2186 struct hci_conn
*acl
, *sco
;
2189 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2194 cp
= hci_sent_cmd_data(hdev
, HCI_OP_SETUP_SYNC_CONN
);
2198 handle
= __le16_to_cpu(cp
->handle
);
2200 BT_DBG("%s handle 0x%4.4x", hdev
->name
, handle
);
2204 acl
= hci_conn_hash_lookup_handle(hdev
, handle
);
2208 sco
->state
= BT_CLOSED
;
2210 hci_connect_cfm(sco
, status
);
2215 hci_dev_unlock(hdev
);
2218 static void hci_cs_sniff_mode(struct hci_dev
*hdev
, __u8 status
)
2220 struct hci_cp_sniff_mode
*cp
;
2221 struct hci_conn
*conn
;
2223 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2228 cp
= hci_sent_cmd_data(hdev
, HCI_OP_SNIFF_MODE
);
2234 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2236 clear_bit(HCI_CONN_MODE_CHANGE_PEND
, &conn
->flags
);
2238 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND
, &conn
->flags
))
2239 hci_sco_setup(conn
, status
);
2242 hci_dev_unlock(hdev
);
2245 static void hci_cs_exit_sniff_mode(struct hci_dev
*hdev
, __u8 status
)
2247 struct hci_cp_exit_sniff_mode
*cp
;
2248 struct hci_conn
*conn
;
2250 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2255 cp
= hci_sent_cmd_data(hdev
, HCI_OP_EXIT_SNIFF_MODE
);
2261 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2263 clear_bit(HCI_CONN_MODE_CHANGE_PEND
, &conn
->flags
);
2265 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND
, &conn
->flags
))
2266 hci_sco_setup(conn
, status
);
2269 hci_dev_unlock(hdev
);
2272 static void hci_cs_disconnect(struct hci_dev
*hdev
, u8 status
)
2274 struct hci_cp_disconnect
*cp
;
2275 struct hci_conn
*conn
;
2280 cp
= hci_sent_cmd_data(hdev
, HCI_OP_DISCONNECT
);
2286 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2288 u8 type
= conn
->type
;
2290 mgmt_disconnect_failed(hdev
, &conn
->dst
, conn
->type
,
2291 conn
->dst_type
, status
);
2293 /* If the disconnection failed for any reason, the upper layer
2294 * does not retry to disconnect in current implementation.
2295 * Hence, we need to do some basic cleanup here and re-enable
2296 * advertising if necessary.
2299 if (type
== LE_LINK
)
2300 hci_req_reenable_advertising(hdev
);
2303 hci_dev_unlock(hdev
);
2306 static void cs_le_create_conn(struct hci_dev
*hdev
, bdaddr_t
*peer_addr
,
2307 u8 peer_addr_type
, u8 own_address_type
,
2310 struct hci_conn
*conn
;
2312 conn
= hci_conn_hash_lookup_le(hdev
, peer_addr
,
2317 /* When using controller based address resolution, then the new
2318 * address types 0x02 and 0x03 are used. These types need to be
2319 * converted back into either public address or random address type
2321 if (use_ll_privacy(hdev
) &&
2322 hci_dev_test_flag(hdev
, HCI_LL_RPA_RESOLUTION
)) {
2323 switch (own_address_type
) {
2324 case ADDR_LE_DEV_PUBLIC_RESOLVED
:
2325 own_address_type
= ADDR_LE_DEV_PUBLIC
;
2327 case ADDR_LE_DEV_RANDOM_RESOLVED
:
2328 own_address_type
= ADDR_LE_DEV_RANDOM
;
2333 /* Store the initiator and responder address information which
2334 * is needed for SMP. These values will not change during the
2335 * lifetime of the connection.
2337 conn
->init_addr_type
= own_address_type
;
2338 if (own_address_type
== ADDR_LE_DEV_RANDOM
)
2339 bacpy(&conn
->init_addr
, &hdev
->random_addr
);
2341 bacpy(&conn
->init_addr
, &hdev
->bdaddr
);
2343 conn
->resp_addr_type
= peer_addr_type
;
2344 bacpy(&conn
->resp_addr
, peer_addr
);
2346 /* We don't want the connection attempt to stick around
2347 * indefinitely since LE doesn't have a page timeout concept
2348 * like BR/EDR. Set a timer for any connection that doesn't use
2349 * the white list for connecting.
2351 if (filter_policy
== HCI_LE_USE_PEER_ADDR
)
2352 queue_delayed_work(conn
->hdev
->workqueue
,
2353 &conn
->le_conn_timeout
,
2354 conn
->conn_timeout
);
2357 static void hci_cs_le_create_conn(struct hci_dev
*hdev
, u8 status
)
2359 struct hci_cp_le_create_conn
*cp
;
2361 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2363 /* All connection failure handling is taken care of by the
2364 * hci_le_conn_failed function which is triggered by the HCI
2365 * request completion callbacks used for connecting.
2370 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_CREATE_CONN
);
2376 cs_le_create_conn(hdev
, &cp
->peer_addr
, cp
->peer_addr_type
,
2377 cp
->own_address_type
, cp
->filter_policy
);
2379 hci_dev_unlock(hdev
);
2382 static void hci_cs_le_ext_create_conn(struct hci_dev
*hdev
, u8 status
)
2384 struct hci_cp_le_ext_create_conn
*cp
;
2386 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2388 /* All connection failure handling is taken care of by the
2389 * hci_le_conn_failed function which is triggered by the HCI
2390 * request completion callbacks used for connecting.
2395 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_EXT_CREATE_CONN
);
2401 cs_le_create_conn(hdev
, &cp
->peer_addr
, cp
->peer_addr_type
,
2402 cp
->own_addr_type
, cp
->filter_policy
);
2404 hci_dev_unlock(hdev
);
2407 static void hci_cs_le_read_remote_features(struct hci_dev
*hdev
, u8 status
)
2409 struct hci_cp_le_read_remote_features
*cp
;
2410 struct hci_conn
*conn
;
2412 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2417 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_READ_REMOTE_FEATURES
);
2423 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2425 if (conn
->state
== BT_CONFIG
) {
2426 hci_connect_cfm(conn
, status
);
2427 hci_conn_drop(conn
);
2431 hci_dev_unlock(hdev
);
2434 static void hci_cs_le_start_enc(struct hci_dev
*hdev
, u8 status
)
2436 struct hci_cp_le_start_enc
*cp
;
2437 struct hci_conn
*conn
;
2439 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2446 cp
= hci_sent_cmd_data(hdev
, HCI_OP_LE_START_ENC
);
2450 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(cp
->handle
));
2454 if (conn
->state
!= BT_CONNECTED
)
2457 hci_disconnect(conn
, HCI_ERROR_AUTH_FAILURE
);
2458 hci_conn_drop(conn
);
2461 hci_dev_unlock(hdev
);
2464 static void hci_cs_switch_role(struct hci_dev
*hdev
, u8 status
)
2466 struct hci_cp_switch_role
*cp
;
2467 struct hci_conn
*conn
;
2469 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2474 cp
= hci_sent_cmd_data(hdev
, HCI_OP_SWITCH_ROLE
);
2480 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->bdaddr
);
2482 clear_bit(HCI_CONN_RSWITCH_PEND
, &conn
->flags
);
2484 hci_dev_unlock(hdev
);
2487 static void hci_inquiry_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2489 __u8 status
= *((__u8
*) skb
->data
);
2490 struct discovery_state
*discov
= &hdev
->discovery
;
2491 struct inquiry_entry
*e
;
2493 BT_DBG("%s status 0x%2.2x", hdev
->name
, status
);
2495 hci_conn_check_pending(hdev
);
2497 if (!test_and_clear_bit(HCI_INQUIRY
, &hdev
->flags
))
2500 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2501 wake_up_bit(&hdev
->flags
, HCI_INQUIRY
);
2503 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
2508 if (discov
->state
!= DISCOVERY_FINDING
)
2511 if (list_empty(&discov
->resolve
)) {
2512 /* When BR/EDR inquiry is active and no LE scanning is in
2513 * progress, then change discovery state to indicate completion.
2515 * When running LE scanning and BR/EDR inquiry simultaneously
2516 * and the LE scan already finished, then change the discovery
2517 * state to indicate completion.
2519 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
) ||
2520 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
))
2521 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2525 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
, NAME_NEEDED
);
2526 if (e
&& hci_resolve_name(hdev
, e
) == 0) {
2527 e
->name_state
= NAME_PENDING
;
2528 hci_discovery_set_state(hdev
, DISCOVERY_RESOLVING
);
2530 /* When BR/EDR inquiry is active and no LE scanning is in
2531 * progress, then change discovery state to indicate completion.
2533 * When running LE scanning and BR/EDR inquiry simultaneously
2534 * and the LE scan already finished, then change the discovery
2535 * state to indicate completion.
2537 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
) ||
2538 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
))
2539 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2543 hci_dev_unlock(hdev
);
2546 static void hci_inquiry_result_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2548 struct inquiry_data data
;
2549 struct inquiry_info
*info
= (void *) (skb
->data
+ 1);
2550 int num_rsp
= *((__u8
*) skb
->data
);
2552 BT_DBG("%s num_rsp %d", hdev
->name
, num_rsp
);
2554 if (!num_rsp
|| skb
->len
< num_rsp
* sizeof(*info
) + 1)
2557 if (hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
))
2562 for (; num_rsp
; num_rsp
--, info
++) {
2565 bacpy(&data
.bdaddr
, &info
->bdaddr
);
2566 data
.pscan_rep_mode
= info
->pscan_rep_mode
;
2567 data
.pscan_period_mode
= info
->pscan_period_mode
;
2568 data
.pscan_mode
= info
->pscan_mode
;
2569 memcpy(data
.dev_class
, info
->dev_class
, 3);
2570 data
.clock_offset
= info
->clock_offset
;
2571 data
.rssi
= HCI_RSSI_INVALID
;
2572 data
.ssp_mode
= 0x00;
2574 flags
= hci_inquiry_cache_update(hdev
, &data
, false);
2576 mgmt_device_found(hdev
, &info
->bdaddr
, ACL_LINK
, 0x00,
2577 info
->dev_class
, HCI_RSSI_INVALID
,
2578 flags
, NULL
, 0, NULL
, 0);
2581 hci_dev_unlock(hdev
);
2584 static void hci_conn_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2586 struct hci_ev_conn_complete
*ev
= (void *) skb
->data
;
2587 struct hci_conn
*conn
;
2589 BT_DBG("%s", hdev
->name
);
2593 conn
= hci_conn_hash_lookup_ba(hdev
, ev
->link_type
, &ev
->bdaddr
);
2595 /* Connection may not exist if auto-connected. Check the bredr
2596 * allowlist to see if this device is allowed to auto connect.
2597 * If link is an ACL type, create a connection class
2600 * Auto-connect will only occur if the event filter is
2601 * programmed with a given address. Right now, event filter is
2602 * only used during suspend.
2604 if (ev
->link_type
== ACL_LINK
&&
2605 hci_bdaddr_list_lookup_with_flags(&hdev
->whitelist
,
2608 conn
= hci_conn_add(hdev
, ev
->link_type
, &ev
->bdaddr
,
2611 bt_dev_err(hdev
, "no memory for new conn");
2615 if (ev
->link_type
!= SCO_LINK
)
2618 conn
= hci_conn_hash_lookup_ba(hdev
, ESCO_LINK
,
2623 conn
->type
= SCO_LINK
;
2628 conn
->handle
= __le16_to_cpu(ev
->handle
);
2630 if (conn
->type
== ACL_LINK
) {
2631 conn
->state
= BT_CONFIG
;
2632 hci_conn_hold(conn
);
2634 if (!conn
->out
&& !hci_conn_ssp_enabled(conn
) &&
2635 !hci_find_link_key(hdev
, &ev
->bdaddr
))
2636 conn
->disc_timeout
= HCI_PAIRING_TIMEOUT
;
2638 conn
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
2640 conn
->state
= BT_CONNECTED
;
2642 hci_debugfs_create_conn(conn
);
2643 hci_conn_add_sysfs(conn
);
2645 if (test_bit(HCI_AUTH
, &hdev
->flags
))
2646 set_bit(HCI_CONN_AUTH
, &conn
->flags
);
2648 if (test_bit(HCI_ENCRYPT
, &hdev
->flags
))
2649 set_bit(HCI_CONN_ENCRYPT
, &conn
->flags
);
2651 /* Get remote features */
2652 if (conn
->type
== ACL_LINK
) {
2653 struct hci_cp_read_remote_features cp
;
2654 cp
.handle
= ev
->handle
;
2655 hci_send_cmd(hdev
, HCI_OP_READ_REMOTE_FEATURES
,
2658 hci_req_update_scan(hdev
);
2661 /* Set packet type for incoming connection */
2662 if (!conn
->out
&& hdev
->hci_ver
< BLUETOOTH_VER_2_0
) {
2663 struct hci_cp_change_conn_ptype cp
;
2664 cp
.handle
= ev
->handle
;
2665 cp
.pkt_type
= cpu_to_le16(conn
->pkt_type
);
2666 hci_send_cmd(hdev
, HCI_OP_CHANGE_CONN_PTYPE
, sizeof(cp
),
2670 conn
->state
= BT_CLOSED
;
2671 if (conn
->type
== ACL_LINK
)
2672 mgmt_connect_failed(hdev
, &conn
->dst
, conn
->type
,
2673 conn
->dst_type
, ev
->status
);
2676 if (conn
->type
== ACL_LINK
)
2677 hci_sco_setup(conn
, ev
->status
);
2680 hci_connect_cfm(conn
, ev
->status
);
2682 } else if (ev
->link_type
== SCO_LINK
) {
2683 switch (conn
->setting
& SCO_AIRMODE_MASK
) {
2684 case SCO_AIRMODE_CVSD
:
2686 hdev
->notify(hdev
, HCI_NOTIFY_ENABLE_SCO_CVSD
);
2690 hci_connect_cfm(conn
, ev
->status
);
2694 hci_dev_unlock(hdev
);
2696 hci_conn_check_pending(hdev
);
2699 static void hci_reject_conn(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2701 struct hci_cp_reject_conn_req cp
;
2703 bacpy(&cp
.bdaddr
, bdaddr
);
2704 cp
.reason
= HCI_ERROR_REJ_BAD_ADDR
;
2705 hci_send_cmd(hdev
, HCI_OP_REJECT_CONN_REQ
, sizeof(cp
), &cp
);
2708 static void hci_conn_request_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2710 struct hci_ev_conn_request
*ev
= (void *) skb
->data
;
2711 int mask
= hdev
->link_mode
;
2712 struct inquiry_entry
*ie
;
2713 struct hci_conn
*conn
;
2716 BT_DBG("%s bdaddr %pMR type 0x%x", hdev
->name
, &ev
->bdaddr
,
2719 mask
|= hci_proto_connect_ind(hdev
, &ev
->bdaddr
, ev
->link_type
,
2722 if (!(mask
& HCI_LM_ACCEPT
)) {
2723 hci_reject_conn(hdev
, &ev
->bdaddr
);
2727 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &ev
->bdaddr
,
2729 hci_reject_conn(hdev
, &ev
->bdaddr
);
2733 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2734 * connection. These features are only touched through mgmt so
2735 * only do the checks if HCI_MGMT is set.
2737 if (hci_dev_test_flag(hdev
, HCI_MGMT
) &&
2738 !hci_dev_test_flag(hdev
, HCI_CONNECTABLE
) &&
2739 !hci_bdaddr_list_lookup_with_flags(&hdev
->whitelist
, &ev
->bdaddr
,
2741 hci_reject_conn(hdev
, &ev
->bdaddr
);
2745 /* Connection accepted */
2749 ie
= hci_inquiry_cache_lookup(hdev
, &ev
->bdaddr
);
2751 memcpy(ie
->data
.dev_class
, ev
->dev_class
, 3);
2753 conn
= hci_conn_hash_lookup_ba(hdev
, ev
->link_type
,
2756 conn
= hci_conn_add(hdev
, ev
->link_type
, &ev
->bdaddr
,
2759 bt_dev_err(hdev
, "no memory for new connection");
2760 hci_dev_unlock(hdev
);
2765 memcpy(conn
->dev_class
, ev
->dev_class
, 3);
2767 hci_dev_unlock(hdev
);
2769 if (ev
->link_type
== ACL_LINK
||
2770 (!(flags
& HCI_PROTO_DEFER
) && !lmp_esco_capable(hdev
))) {
2771 struct hci_cp_accept_conn_req cp
;
2772 conn
->state
= BT_CONNECT
;
2774 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
2776 if (lmp_rswitch_capable(hdev
) && (mask
& HCI_LM_MASTER
))
2777 cp
.role
= 0x00; /* Become master */
2779 cp
.role
= 0x01; /* Remain slave */
2781 hci_send_cmd(hdev
, HCI_OP_ACCEPT_CONN_REQ
, sizeof(cp
), &cp
);
2782 } else if (!(flags
& HCI_PROTO_DEFER
)) {
2783 struct hci_cp_accept_sync_conn_req cp
;
2784 conn
->state
= BT_CONNECT
;
2786 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
2787 cp
.pkt_type
= cpu_to_le16(conn
->pkt_type
);
2789 cp
.tx_bandwidth
= cpu_to_le32(0x00001f40);
2790 cp
.rx_bandwidth
= cpu_to_le32(0x00001f40);
2791 cp
.max_latency
= cpu_to_le16(0xffff);
2792 cp
.content_format
= cpu_to_le16(hdev
->voice_setting
);
2793 cp
.retrans_effort
= 0xff;
2795 hci_send_cmd(hdev
, HCI_OP_ACCEPT_SYNC_CONN_REQ
, sizeof(cp
),
2798 conn
->state
= BT_CONNECT2
;
2799 hci_connect_cfm(conn
, 0);
2803 static u8
hci_to_mgmt_reason(u8 err
)
2806 case HCI_ERROR_CONNECTION_TIMEOUT
:
2807 return MGMT_DEV_DISCONN_TIMEOUT
;
2808 case HCI_ERROR_REMOTE_USER_TERM
:
2809 case HCI_ERROR_REMOTE_LOW_RESOURCES
:
2810 case HCI_ERROR_REMOTE_POWER_OFF
:
2811 return MGMT_DEV_DISCONN_REMOTE
;
2812 case HCI_ERROR_LOCAL_HOST_TERM
:
2813 return MGMT_DEV_DISCONN_LOCAL_HOST
;
2815 return MGMT_DEV_DISCONN_UNKNOWN
;
2819 static void hci_disconn_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2821 struct hci_ev_disconn_complete
*ev
= (void *) skb
->data
;
2823 struct hci_conn_params
*params
;
2824 struct hci_conn
*conn
;
2825 bool mgmt_connected
;
2828 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
2832 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
2837 mgmt_disconnect_failed(hdev
, &conn
->dst
, conn
->type
,
2838 conn
->dst_type
, ev
->status
);
2842 conn
->state
= BT_CLOSED
;
2844 mgmt_connected
= test_and_clear_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
);
2846 if (test_bit(HCI_CONN_AUTH_FAILURE
, &conn
->flags
))
2847 reason
= MGMT_DEV_DISCONN_AUTH_FAILURE
;
2849 reason
= hci_to_mgmt_reason(ev
->reason
);
2851 mgmt_device_disconnected(hdev
, &conn
->dst
, conn
->type
, conn
->dst_type
,
2852 reason
, mgmt_connected
);
2854 if (conn
->type
== ACL_LINK
) {
2855 if (test_bit(HCI_CONN_FLUSH_KEY
, &conn
->flags
))
2856 hci_remove_link_key(hdev
, &conn
->dst
);
2858 hci_req_update_scan(hdev
);
2861 params
= hci_conn_params_lookup(hdev
, &conn
->dst
, conn
->dst_type
);
2863 switch (params
->auto_connect
) {
2864 case HCI_AUTO_CONN_LINK_LOSS
:
2865 if (ev
->reason
!= HCI_ERROR_CONNECTION_TIMEOUT
)
2869 case HCI_AUTO_CONN_DIRECT
:
2870 case HCI_AUTO_CONN_ALWAYS
:
2871 list_del_init(¶ms
->action
);
2872 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
2873 hci_update_background_scan(hdev
);
2883 hci_disconn_cfm(conn
, ev
->reason
);
2886 /* The suspend notifier is waiting for all devices to disconnect so
2887 * clear the bit from pending tasks and inform the wait queue.
2889 if (list_empty(&hdev
->conn_hash
.list
) &&
2890 test_and_clear_bit(SUSPEND_DISCONNECTING
, hdev
->suspend_tasks
)) {
2891 wake_up(&hdev
->suspend_wait_q
);
2894 /* Re-enable advertising if necessary, since it might
2895 * have been disabled by the connection. From the
2896 * HCI_LE_Set_Advertise_Enable command description in
2897 * the core specification (v4.0):
2898 * "The Controller shall continue advertising until the Host
2899 * issues an LE_Set_Advertise_Enable command with
2900 * Advertising_Enable set to 0x00 (Advertising is disabled)
2901 * or until a connection is created or until the Advertising
2902 * is timed out due to Directed Advertising."
2904 if (type
== LE_LINK
)
2905 hci_req_reenable_advertising(hdev
);
2908 hci_dev_unlock(hdev
);
2911 static void hci_auth_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2913 struct hci_ev_auth_complete
*ev
= (void *) skb
->data
;
2914 struct hci_conn
*conn
;
2916 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
2920 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
2925 clear_bit(HCI_CONN_AUTH_FAILURE
, &conn
->flags
);
2927 if (!hci_conn_ssp_enabled(conn
) &&
2928 test_bit(HCI_CONN_REAUTH_PEND
, &conn
->flags
)) {
2929 bt_dev_info(hdev
, "re-auth of legacy device is not possible.");
2931 set_bit(HCI_CONN_AUTH
, &conn
->flags
);
2932 conn
->sec_level
= conn
->pending_sec_level
;
2935 if (ev
->status
== HCI_ERROR_PIN_OR_KEY_MISSING
)
2936 set_bit(HCI_CONN_AUTH_FAILURE
, &conn
->flags
);
2938 mgmt_auth_failed(conn
, ev
->status
);
2941 clear_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
);
2942 clear_bit(HCI_CONN_REAUTH_PEND
, &conn
->flags
);
2944 if (conn
->state
== BT_CONFIG
) {
2945 if (!ev
->status
&& hci_conn_ssp_enabled(conn
)) {
2946 struct hci_cp_set_conn_encrypt cp
;
2947 cp
.handle
= ev
->handle
;
2949 hci_send_cmd(hdev
, HCI_OP_SET_CONN_ENCRYPT
, sizeof(cp
),
2952 conn
->state
= BT_CONNECTED
;
2953 hci_connect_cfm(conn
, ev
->status
);
2954 hci_conn_drop(conn
);
2957 hci_auth_cfm(conn
, ev
->status
);
2959 hci_conn_hold(conn
);
2960 conn
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
2961 hci_conn_drop(conn
);
2964 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
)) {
2966 struct hci_cp_set_conn_encrypt cp
;
2967 cp
.handle
= ev
->handle
;
2969 hci_send_cmd(hdev
, HCI_OP_SET_CONN_ENCRYPT
, sizeof(cp
),
2972 clear_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
);
2973 hci_encrypt_cfm(conn
, ev
->status
);
2978 hci_dev_unlock(hdev
);
2981 static void hci_remote_name_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2983 struct hci_ev_remote_name
*ev
= (void *) skb
->data
;
2984 struct hci_conn
*conn
;
2986 BT_DBG("%s", hdev
->name
);
2988 hci_conn_check_pending(hdev
);
2992 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
2994 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
2997 if (ev
->status
== 0)
2998 hci_check_pending_name(hdev
, conn
, &ev
->bdaddr
, ev
->name
,
2999 strnlen(ev
->name
, HCI_MAX_NAME_LENGTH
));
3001 hci_check_pending_name(hdev
, conn
, &ev
->bdaddr
, NULL
, 0);
3007 if (!hci_outgoing_auth_needed(hdev
, conn
))
3010 if (!test_and_set_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
)) {
3011 struct hci_cp_auth_requested cp
;
3013 set_bit(HCI_CONN_AUTH_INITIATOR
, &conn
->flags
);
3015 cp
.handle
= __cpu_to_le16(conn
->handle
);
3016 hci_send_cmd(hdev
, HCI_OP_AUTH_REQUESTED
, sizeof(cp
), &cp
);
3020 hci_dev_unlock(hdev
);
3023 static void read_enc_key_size_complete(struct hci_dev
*hdev
, u8 status
,
3024 u16 opcode
, struct sk_buff
*skb
)
3026 const struct hci_rp_read_enc_key_size
*rp
;
3027 struct hci_conn
*conn
;
3030 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
3032 if (!skb
|| skb
->len
< sizeof(*rp
)) {
3033 bt_dev_err(hdev
, "invalid read key size response");
3037 rp
= (void *)skb
->data
;
3038 handle
= le16_to_cpu(rp
->handle
);
3042 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3046 /* While unexpected, the read_enc_key_size command may fail. The most
3047 * secure approach is to then assume the key size is 0 to force a
3051 bt_dev_err(hdev
, "failed to read key size for handle %u",
3053 conn
->enc_key_size
= 0;
3055 conn
->enc_key_size
= rp
->key_size
;
3058 hci_encrypt_cfm(conn
, 0);
3061 hci_dev_unlock(hdev
);
3064 static void hci_encrypt_change_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3066 struct hci_ev_encrypt_change
*ev
= (void *) skb
->data
;
3067 struct hci_conn
*conn
;
3069 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
3073 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
3079 /* Encryption implies authentication */
3080 set_bit(HCI_CONN_AUTH
, &conn
->flags
);
3081 set_bit(HCI_CONN_ENCRYPT
, &conn
->flags
);
3082 conn
->sec_level
= conn
->pending_sec_level
;
3084 /* P-256 authentication key implies FIPS */
3085 if (conn
->key_type
== HCI_LK_AUTH_COMBINATION_P256
)
3086 set_bit(HCI_CONN_FIPS
, &conn
->flags
);
3088 if ((conn
->type
== ACL_LINK
&& ev
->encrypt
== 0x02) ||
3089 conn
->type
== LE_LINK
)
3090 set_bit(HCI_CONN_AES_CCM
, &conn
->flags
);
3092 clear_bit(HCI_CONN_ENCRYPT
, &conn
->flags
);
3093 clear_bit(HCI_CONN_AES_CCM
, &conn
->flags
);
3097 /* We should disregard the current RPA and generate a new one
3098 * whenever the encryption procedure fails.
3100 if (ev
->status
&& conn
->type
== LE_LINK
) {
3101 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
3102 hci_adv_instances_set_rpa_expired(hdev
, true);
3105 clear_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
);
3107 /* Check link security requirements are met */
3108 if (!hci_conn_check_link_mode(conn
))
3109 ev
->status
= HCI_ERROR_AUTH_FAILURE
;
3111 if (ev
->status
&& conn
->state
== BT_CONNECTED
) {
3112 if (ev
->status
== HCI_ERROR_PIN_OR_KEY_MISSING
)
3113 set_bit(HCI_CONN_AUTH_FAILURE
, &conn
->flags
);
3115 /* Notify upper layers so they can cleanup before
3118 hci_encrypt_cfm(conn
, ev
->status
);
3119 hci_disconnect(conn
, HCI_ERROR_AUTH_FAILURE
);
3120 hci_conn_drop(conn
);
3124 /* Try reading the encryption key size for encrypted ACL links */
3125 if (!ev
->status
&& ev
->encrypt
&& conn
->type
== ACL_LINK
) {
3126 struct hci_cp_read_enc_key_size cp
;
3127 struct hci_request req
;
3129 /* Only send HCI_Read_Encryption_Key_Size if the
3130 * controller really supports it. If it doesn't, assume
3131 * the default size (16).
3133 if (!(hdev
->commands
[20] & 0x10)) {
3134 conn
->enc_key_size
= HCI_LINK_KEY_SIZE
;
3138 hci_req_init(&req
, hdev
);
3140 cp
.handle
= cpu_to_le16(conn
->handle
);
3141 hci_req_add(&req
, HCI_OP_READ_ENC_KEY_SIZE
, sizeof(cp
), &cp
);
3143 if (hci_req_run_skb(&req
, read_enc_key_size_complete
)) {
3144 bt_dev_err(hdev
, "sending read key size failed");
3145 conn
->enc_key_size
= HCI_LINK_KEY_SIZE
;
3152 /* Set the default Authenticated Payload Timeout after
3153 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3154 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3155 * sent when the link is active and Encryption is enabled, the conn
3156 * type can be either LE or ACL and controller must support LMP Ping.
3157 * Ensure for AES-CCM encryption as well.
3159 if (test_bit(HCI_CONN_ENCRYPT
, &conn
->flags
) &&
3160 test_bit(HCI_CONN_AES_CCM
, &conn
->flags
) &&
3161 ((conn
->type
== ACL_LINK
&& lmp_ping_capable(hdev
)) ||
3162 (conn
->type
== LE_LINK
&& (hdev
->le_features
[0] & HCI_LE_PING
)))) {
3163 struct hci_cp_write_auth_payload_to cp
;
3165 cp
.handle
= cpu_to_le16(conn
->handle
);
3166 cp
.timeout
= cpu_to_le16(hdev
->auth_payload_timeout
);
3167 hci_send_cmd(conn
->hdev
, HCI_OP_WRITE_AUTH_PAYLOAD_TO
,
3172 hci_encrypt_cfm(conn
, ev
->status
);
3175 hci_dev_unlock(hdev
);
3178 static void hci_change_link_key_complete_evt(struct hci_dev
*hdev
,
3179 struct sk_buff
*skb
)
3181 struct hci_ev_change_link_key_complete
*ev
= (void *) skb
->data
;
3182 struct hci_conn
*conn
;
3184 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
3188 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
3191 set_bit(HCI_CONN_SECURE
, &conn
->flags
);
3193 clear_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
);
3195 hci_key_change_cfm(conn
, ev
->status
);
3198 hci_dev_unlock(hdev
);
3201 static void hci_remote_features_evt(struct hci_dev
*hdev
,
3202 struct sk_buff
*skb
)
3204 struct hci_ev_remote_features
*ev
= (void *) skb
->data
;
3205 struct hci_conn
*conn
;
3207 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
3211 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
3216 memcpy(conn
->features
[0], ev
->features
, 8);
3218 if (conn
->state
!= BT_CONFIG
)
3221 if (!ev
->status
&& lmp_ext_feat_capable(hdev
) &&
3222 lmp_ext_feat_capable(conn
)) {
3223 struct hci_cp_read_remote_ext_features cp
;
3224 cp
.handle
= ev
->handle
;
3226 hci_send_cmd(hdev
, HCI_OP_READ_REMOTE_EXT_FEATURES
,
3231 if (!ev
->status
&& !test_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
)) {
3232 struct hci_cp_remote_name_req cp
;
3233 memset(&cp
, 0, sizeof(cp
));
3234 bacpy(&cp
.bdaddr
, &conn
->dst
);
3235 cp
.pscan_rep_mode
= 0x02;
3236 hci_send_cmd(hdev
, HCI_OP_REMOTE_NAME_REQ
, sizeof(cp
), &cp
);
3237 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
3238 mgmt_device_connected(hdev
, conn
, 0, NULL
, 0);
3240 if (!hci_outgoing_auth_needed(hdev
, conn
)) {
3241 conn
->state
= BT_CONNECTED
;
3242 hci_connect_cfm(conn
, ev
->status
);
3243 hci_conn_drop(conn
);
3247 hci_dev_unlock(hdev
);
3250 static void hci_cmd_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
,
3251 u16
*opcode
, u8
*status
,
3252 hci_req_complete_t
*req_complete
,
3253 hci_req_complete_skb_t
*req_complete_skb
)
3255 struct hci_ev_cmd_complete
*ev
= (void *) skb
->data
;
3257 *opcode
= __le16_to_cpu(ev
->opcode
);
3258 *status
= skb
->data
[sizeof(*ev
)];
3260 skb_pull(skb
, sizeof(*ev
));
3263 case HCI_OP_INQUIRY_CANCEL
:
3264 hci_cc_inquiry_cancel(hdev
, skb
, status
);
3267 case HCI_OP_PERIODIC_INQ
:
3268 hci_cc_periodic_inq(hdev
, skb
);
3271 case HCI_OP_EXIT_PERIODIC_INQ
:
3272 hci_cc_exit_periodic_inq(hdev
, skb
);
3275 case HCI_OP_REMOTE_NAME_REQ_CANCEL
:
3276 hci_cc_remote_name_req_cancel(hdev
, skb
);
3279 case HCI_OP_ROLE_DISCOVERY
:
3280 hci_cc_role_discovery(hdev
, skb
);
3283 case HCI_OP_READ_LINK_POLICY
:
3284 hci_cc_read_link_policy(hdev
, skb
);
3287 case HCI_OP_WRITE_LINK_POLICY
:
3288 hci_cc_write_link_policy(hdev
, skb
);
3291 case HCI_OP_READ_DEF_LINK_POLICY
:
3292 hci_cc_read_def_link_policy(hdev
, skb
);
3295 case HCI_OP_WRITE_DEF_LINK_POLICY
:
3296 hci_cc_write_def_link_policy(hdev
, skb
);
3300 hci_cc_reset(hdev
, skb
);
3303 case HCI_OP_READ_STORED_LINK_KEY
:
3304 hci_cc_read_stored_link_key(hdev
, skb
);
3307 case HCI_OP_DELETE_STORED_LINK_KEY
:
3308 hci_cc_delete_stored_link_key(hdev
, skb
);
3311 case HCI_OP_WRITE_LOCAL_NAME
:
3312 hci_cc_write_local_name(hdev
, skb
);
3315 case HCI_OP_READ_LOCAL_NAME
:
3316 hci_cc_read_local_name(hdev
, skb
);
3319 case HCI_OP_WRITE_AUTH_ENABLE
:
3320 hci_cc_write_auth_enable(hdev
, skb
);
3323 case HCI_OP_WRITE_ENCRYPT_MODE
:
3324 hci_cc_write_encrypt_mode(hdev
, skb
);
3327 case HCI_OP_WRITE_SCAN_ENABLE
:
3328 hci_cc_write_scan_enable(hdev
, skb
);
3331 case HCI_OP_READ_CLASS_OF_DEV
:
3332 hci_cc_read_class_of_dev(hdev
, skb
);
3335 case HCI_OP_WRITE_CLASS_OF_DEV
:
3336 hci_cc_write_class_of_dev(hdev
, skb
);
3339 case HCI_OP_READ_VOICE_SETTING
:
3340 hci_cc_read_voice_setting(hdev
, skb
);
3343 case HCI_OP_WRITE_VOICE_SETTING
:
3344 hci_cc_write_voice_setting(hdev
, skb
);
3347 case HCI_OP_READ_NUM_SUPPORTED_IAC
:
3348 hci_cc_read_num_supported_iac(hdev
, skb
);
3351 case HCI_OP_WRITE_SSP_MODE
:
3352 hci_cc_write_ssp_mode(hdev
, skb
);
3355 case HCI_OP_WRITE_SC_SUPPORT
:
3356 hci_cc_write_sc_support(hdev
, skb
);
3359 case HCI_OP_READ_AUTH_PAYLOAD_TO
:
3360 hci_cc_read_auth_payload_timeout(hdev
, skb
);
3363 case HCI_OP_WRITE_AUTH_PAYLOAD_TO
:
3364 hci_cc_write_auth_payload_timeout(hdev
, skb
);
3367 case HCI_OP_READ_LOCAL_VERSION
:
3368 hci_cc_read_local_version(hdev
, skb
);
3371 case HCI_OP_READ_LOCAL_COMMANDS
:
3372 hci_cc_read_local_commands(hdev
, skb
);
3375 case HCI_OP_READ_LOCAL_FEATURES
:
3376 hci_cc_read_local_features(hdev
, skb
);
3379 case HCI_OP_READ_LOCAL_EXT_FEATURES
:
3380 hci_cc_read_local_ext_features(hdev
, skb
);
3383 case HCI_OP_READ_BUFFER_SIZE
:
3384 hci_cc_read_buffer_size(hdev
, skb
);
3387 case HCI_OP_READ_BD_ADDR
:
3388 hci_cc_read_bd_addr(hdev
, skb
);
3391 case HCI_OP_READ_LOCAL_PAIRING_OPTS
:
3392 hci_cc_read_local_pairing_opts(hdev
, skb
);
3395 case HCI_OP_READ_PAGE_SCAN_ACTIVITY
:
3396 hci_cc_read_page_scan_activity(hdev
, skb
);
3399 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
:
3400 hci_cc_write_page_scan_activity(hdev
, skb
);
3403 case HCI_OP_READ_PAGE_SCAN_TYPE
:
3404 hci_cc_read_page_scan_type(hdev
, skb
);
3407 case HCI_OP_WRITE_PAGE_SCAN_TYPE
:
3408 hci_cc_write_page_scan_type(hdev
, skb
);
3411 case HCI_OP_READ_DATA_BLOCK_SIZE
:
3412 hci_cc_read_data_block_size(hdev
, skb
);
3415 case HCI_OP_READ_FLOW_CONTROL_MODE
:
3416 hci_cc_read_flow_control_mode(hdev
, skb
);
3419 case HCI_OP_READ_LOCAL_AMP_INFO
:
3420 hci_cc_read_local_amp_info(hdev
, skb
);
3423 case HCI_OP_READ_CLOCK
:
3424 hci_cc_read_clock(hdev
, skb
);
3427 case HCI_OP_READ_INQ_RSP_TX_POWER
:
3428 hci_cc_read_inq_rsp_tx_power(hdev
, skb
);
3431 case HCI_OP_READ_DEF_ERR_DATA_REPORTING
:
3432 hci_cc_read_def_err_data_reporting(hdev
, skb
);
3435 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING
:
3436 hci_cc_write_def_err_data_reporting(hdev
, skb
);
3439 case HCI_OP_PIN_CODE_REPLY
:
3440 hci_cc_pin_code_reply(hdev
, skb
);
3443 case HCI_OP_PIN_CODE_NEG_REPLY
:
3444 hci_cc_pin_code_neg_reply(hdev
, skb
);
3447 case HCI_OP_READ_LOCAL_OOB_DATA
:
3448 hci_cc_read_local_oob_data(hdev
, skb
);
3451 case HCI_OP_READ_LOCAL_OOB_EXT_DATA
:
3452 hci_cc_read_local_oob_ext_data(hdev
, skb
);
3455 case HCI_OP_LE_READ_BUFFER_SIZE
:
3456 hci_cc_le_read_buffer_size(hdev
, skb
);
3459 case HCI_OP_LE_READ_LOCAL_FEATURES
:
3460 hci_cc_le_read_local_features(hdev
, skb
);
3463 case HCI_OP_LE_READ_ADV_TX_POWER
:
3464 hci_cc_le_read_adv_tx_power(hdev
, skb
);
3467 case HCI_OP_USER_CONFIRM_REPLY
:
3468 hci_cc_user_confirm_reply(hdev
, skb
);
3471 case HCI_OP_USER_CONFIRM_NEG_REPLY
:
3472 hci_cc_user_confirm_neg_reply(hdev
, skb
);
3475 case HCI_OP_USER_PASSKEY_REPLY
:
3476 hci_cc_user_passkey_reply(hdev
, skb
);
3479 case HCI_OP_USER_PASSKEY_NEG_REPLY
:
3480 hci_cc_user_passkey_neg_reply(hdev
, skb
);
3483 case HCI_OP_LE_SET_RANDOM_ADDR
:
3484 hci_cc_le_set_random_addr(hdev
, skb
);
3487 case HCI_OP_LE_SET_ADV_ENABLE
:
3488 hci_cc_le_set_adv_enable(hdev
, skb
);
3491 case HCI_OP_LE_SET_SCAN_PARAM
:
3492 hci_cc_le_set_scan_param(hdev
, skb
);
3495 case HCI_OP_LE_SET_SCAN_ENABLE
:
3496 hci_cc_le_set_scan_enable(hdev
, skb
);
3499 case HCI_OP_LE_READ_WHITE_LIST_SIZE
:
3500 hci_cc_le_read_white_list_size(hdev
, skb
);
3503 case HCI_OP_LE_CLEAR_WHITE_LIST
:
3504 hci_cc_le_clear_white_list(hdev
, skb
);
3507 case HCI_OP_LE_ADD_TO_WHITE_LIST
:
3508 hci_cc_le_add_to_white_list(hdev
, skb
);
3511 case HCI_OP_LE_DEL_FROM_WHITE_LIST
:
3512 hci_cc_le_del_from_white_list(hdev
, skb
);
3515 case HCI_OP_LE_READ_SUPPORTED_STATES
:
3516 hci_cc_le_read_supported_states(hdev
, skb
);
3519 case HCI_OP_LE_READ_DEF_DATA_LEN
:
3520 hci_cc_le_read_def_data_len(hdev
, skb
);
3523 case HCI_OP_LE_WRITE_DEF_DATA_LEN
:
3524 hci_cc_le_write_def_data_len(hdev
, skb
);
3527 case HCI_OP_LE_ADD_TO_RESOLV_LIST
:
3528 hci_cc_le_add_to_resolv_list(hdev
, skb
);
3531 case HCI_OP_LE_DEL_FROM_RESOLV_LIST
:
3532 hci_cc_le_del_from_resolv_list(hdev
, skb
);
3535 case HCI_OP_LE_CLEAR_RESOLV_LIST
:
3536 hci_cc_le_clear_resolv_list(hdev
, skb
);
3539 case HCI_OP_LE_READ_RESOLV_LIST_SIZE
:
3540 hci_cc_le_read_resolv_list_size(hdev
, skb
);
3543 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE
:
3544 hci_cc_le_set_addr_resolution_enable(hdev
, skb
);
3547 case HCI_OP_LE_READ_MAX_DATA_LEN
:
3548 hci_cc_le_read_max_data_len(hdev
, skb
);
3551 case HCI_OP_WRITE_LE_HOST_SUPPORTED
:
3552 hci_cc_write_le_host_supported(hdev
, skb
);
3555 case HCI_OP_LE_SET_ADV_PARAM
:
3556 hci_cc_set_adv_param(hdev
, skb
);
3559 case HCI_OP_READ_RSSI
:
3560 hci_cc_read_rssi(hdev
, skb
);
3563 case HCI_OP_READ_TX_POWER
:
3564 hci_cc_read_tx_power(hdev
, skb
);
3567 case HCI_OP_WRITE_SSP_DEBUG_MODE
:
3568 hci_cc_write_ssp_debug_mode(hdev
, skb
);
3571 case HCI_OP_LE_SET_EXT_SCAN_PARAMS
:
3572 hci_cc_le_set_ext_scan_param(hdev
, skb
);
3575 case HCI_OP_LE_SET_EXT_SCAN_ENABLE
:
3576 hci_cc_le_set_ext_scan_enable(hdev
, skb
);
3579 case HCI_OP_LE_SET_DEFAULT_PHY
:
3580 hci_cc_le_set_default_phy(hdev
, skb
);
3583 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS
:
3584 hci_cc_le_read_num_adv_sets(hdev
, skb
);
3587 case HCI_OP_LE_SET_EXT_ADV_PARAMS
:
3588 hci_cc_set_ext_adv_param(hdev
, skb
);
3591 case HCI_OP_LE_SET_EXT_ADV_ENABLE
:
3592 hci_cc_le_set_ext_adv_enable(hdev
, skb
);
3595 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR
:
3596 hci_cc_le_set_adv_set_random_addr(hdev
, skb
);
3599 case HCI_OP_LE_READ_TRANSMIT_POWER
:
3600 hci_cc_le_read_transmit_power(hdev
, skb
);
3604 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, *opcode
);
3608 if (*opcode
!= HCI_OP_NOP
)
3609 cancel_delayed_work(&hdev
->cmd_timer
);
3611 if (ev
->ncmd
&& !test_bit(HCI_RESET
, &hdev
->flags
))
3612 atomic_set(&hdev
->cmd_cnt
, 1);
3614 hci_req_cmd_complete(hdev
, *opcode
, *status
, req_complete
,
3617 if (hci_dev_test_flag(hdev
, HCI_CMD_PENDING
)) {
3619 "unexpected event for opcode 0x%4.4x", *opcode
);
3623 if (atomic_read(&hdev
->cmd_cnt
) && !skb_queue_empty(&hdev
->cmd_q
))
3624 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3627 static void hci_cmd_status_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
,
3628 u16
*opcode
, u8
*status
,
3629 hci_req_complete_t
*req_complete
,
3630 hci_req_complete_skb_t
*req_complete_skb
)
3632 struct hci_ev_cmd_status
*ev
= (void *) skb
->data
;
3634 skb_pull(skb
, sizeof(*ev
));
3636 *opcode
= __le16_to_cpu(ev
->opcode
);
3637 *status
= ev
->status
;
3640 case HCI_OP_INQUIRY
:
3641 hci_cs_inquiry(hdev
, ev
->status
);
3644 case HCI_OP_CREATE_CONN
:
3645 hci_cs_create_conn(hdev
, ev
->status
);
3648 case HCI_OP_DISCONNECT
:
3649 hci_cs_disconnect(hdev
, ev
->status
);
3652 case HCI_OP_ADD_SCO
:
3653 hci_cs_add_sco(hdev
, ev
->status
);
3656 case HCI_OP_AUTH_REQUESTED
:
3657 hci_cs_auth_requested(hdev
, ev
->status
);
3660 case HCI_OP_SET_CONN_ENCRYPT
:
3661 hci_cs_set_conn_encrypt(hdev
, ev
->status
);
3664 case HCI_OP_REMOTE_NAME_REQ
:
3665 hci_cs_remote_name_req(hdev
, ev
->status
);
3668 case HCI_OP_READ_REMOTE_FEATURES
:
3669 hci_cs_read_remote_features(hdev
, ev
->status
);
3672 case HCI_OP_READ_REMOTE_EXT_FEATURES
:
3673 hci_cs_read_remote_ext_features(hdev
, ev
->status
);
3676 case HCI_OP_SETUP_SYNC_CONN
:
3677 hci_cs_setup_sync_conn(hdev
, ev
->status
);
3680 case HCI_OP_SNIFF_MODE
:
3681 hci_cs_sniff_mode(hdev
, ev
->status
);
3684 case HCI_OP_EXIT_SNIFF_MODE
:
3685 hci_cs_exit_sniff_mode(hdev
, ev
->status
);
3688 case HCI_OP_SWITCH_ROLE
:
3689 hci_cs_switch_role(hdev
, ev
->status
);
3692 case HCI_OP_LE_CREATE_CONN
:
3693 hci_cs_le_create_conn(hdev
, ev
->status
);
3696 case HCI_OP_LE_READ_REMOTE_FEATURES
:
3697 hci_cs_le_read_remote_features(hdev
, ev
->status
);
3700 case HCI_OP_LE_START_ENC
:
3701 hci_cs_le_start_enc(hdev
, ev
->status
);
3704 case HCI_OP_LE_EXT_CREATE_CONN
:
3705 hci_cs_le_ext_create_conn(hdev
, ev
->status
);
3709 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, *opcode
);
3713 if (*opcode
!= HCI_OP_NOP
)
3714 cancel_delayed_work(&hdev
->cmd_timer
);
3716 if (ev
->ncmd
&& !test_bit(HCI_RESET
, &hdev
->flags
))
3717 atomic_set(&hdev
->cmd_cnt
, 1);
3719 /* Indicate request completion if the command failed. Also, if
3720 * we're not waiting for a special event and we get a success
3721 * command status we should try to flag the request as completed
3722 * (since for this kind of commands there will not be a command
3726 (hdev
->sent_cmd
&& !bt_cb(hdev
->sent_cmd
)->hci
.req_event
))
3727 hci_req_cmd_complete(hdev
, *opcode
, ev
->status
, req_complete
,
3730 if (hci_dev_test_flag(hdev
, HCI_CMD_PENDING
)) {
3732 "unexpected event for opcode 0x%4.4x", *opcode
);
3736 if (atomic_read(&hdev
->cmd_cnt
) && !skb_queue_empty(&hdev
->cmd_q
))
3737 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3740 static void hci_hardware_error_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3742 struct hci_ev_hardware_error
*ev
= (void *) skb
->data
;
3744 hdev
->hw_error_code
= ev
->code
;
3746 queue_work(hdev
->req_workqueue
, &hdev
->error_reset
);
3749 static void hci_role_change_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3751 struct hci_ev_role_change
*ev
= (void *) skb
->data
;
3752 struct hci_conn
*conn
;
3754 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
3758 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
3761 conn
->role
= ev
->role
;
3763 clear_bit(HCI_CONN_RSWITCH_PEND
, &conn
->flags
);
3765 hci_role_switch_cfm(conn
, ev
->status
, ev
->role
);
3768 hci_dev_unlock(hdev
);
3771 static void hci_num_comp_pkts_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3773 struct hci_ev_num_comp_pkts
*ev
= (void *) skb
->data
;
3776 if (hdev
->flow_ctl_mode
!= HCI_FLOW_CTL_MODE_PACKET_BASED
) {
3777 bt_dev_err(hdev
, "wrong event for mode %d", hdev
->flow_ctl_mode
);
3781 if (skb
->len
< sizeof(*ev
) ||
3782 skb
->len
< struct_size(ev
, handles
, ev
->num_hndl
)) {
3783 BT_DBG("%s bad parameters", hdev
->name
);
3787 BT_DBG("%s num_hndl %d", hdev
->name
, ev
->num_hndl
);
3789 for (i
= 0; i
< ev
->num_hndl
; i
++) {
3790 struct hci_comp_pkts_info
*info
= &ev
->handles
[i
];
3791 struct hci_conn
*conn
;
3792 __u16 handle
, count
;
3794 handle
= __le16_to_cpu(info
->handle
);
3795 count
= __le16_to_cpu(info
->count
);
3797 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3801 conn
->sent
-= count
;
3803 switch (conn
->type
) {
3805 hdev
->acl_cnt
+= count
;
3806 if (hdev
->acl_cnt
> hdev
->acl_pkts
)
3807 hdev
->acl_cnt
= hdev
->acl_pkts
;
3811 if (hdev
->le_pkts
) {
3812 hdev
->le_cnt
+= count
;
3813 if (hdev
->le_cnt
> hdev
->le_pkts
)
3814 hdev
->le_cnt
= hdev
->le_pkts
;
3816 hdev
->acl_cnt
+= count
;
3817 if (hdev
->acl_cnt
> hdev
->acl_pkts
)
3818 hdev
->acl_cnt
= hdev
->acl_pkts
;
3823 hdev
->sco_cnt
+= count
;
3824 if (hdev
->sco_cnt
> hdev
->sco_pkts
)
3825 hdev
->sco_cnt
= hdev
->sco_pkts
;
3829 bt_dev_err(hdev
, "unknown type %d conn %p",
3835 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3838 static struct hci_conn
*__hci_conn_lookup_handle(struct hci_dev
*hdev
,
3841 struct hci_chan
*chan
;
3843 switch (hdev
->dev_type
) {
3845 return hci_conn_hash_lookup_handle(hdev
, handle
);
3847 chan
= hci_chan_lookup_handle(hdev
, handle
);
3852 bt_dev_err(hdev
, "unknown dev_type %d", hdev
->dev_type
);
3859 static void hci_num_comp_blocks_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3861 struct hci_ev_num_comp_blocks
*ev
= (void *) skb
->data
;
3864 if (hdev
->flow_ctl_mode
!= HCI_FLOW_CTL_MODE_BLOCK_BASED
) {
3865 bt_dev_err(hdev
, "wrong event for mode %d", hdev
->flow_ctl_mode
);
3869 if (skb
->len
< sizeof(*ev
) ||
3870 skb
->len
< struct_size(ev
, handles
, ev
->num_hndl
)) {
3871 BT_DBG("%s bad parameters", hdev
->name
);
3875 BT_DBG("%s num_blocks %d num_hndl %d", hdev
->name
, ev
->num_blocks
,
3878 for (i
= 0; i
< ev
->num_hndl
; i
++) {
3879 struct hci_comp_blocks_info
*info
= &ev
->handles
[i
];
3880 struct hci_conn
*conn
= NULL
;
3881 __u16 handle
, block_count
;
3883 handle
= __le16_to_cpu(info
->handle
);
3884 block_count
= __le16_to_cpu(info
->blocks
);
3886 conn
= __hci_conn_lookup_handle(hdev
, handle
);
3890 conn
->sent
-= block_count
;
3892 switch (conn
->type
) {
3895 hdev
->block_cnt
+= block_count
;
3896 if (hdev
->block_cnt
> hdev
->num_blocks
)
3897 hdev
->block_cnt
= hdev
->num_blocks
;
3901 bt_dev_err(hdev
, "unknown type %d conn %p",
3907 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3910 static void hci_mode_change_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3912 struct hci_ev_mode_change
*ev
= (void *) skb
->data
;
3913 struct hci_conn
*conn
;
3915 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
3919 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
3921 conn
->mode
= ev
->mode
;
3923 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND
,
3925 if (conn
->mode
== HCI_CM_ACTIVE
)
3926 set_bit(HCI_CONN_POWER_SAVE
, &conn
->flags
);
3928 clear_bit(HCI_CONN_POWER_SAVE
, &conn
->flags
);
3931 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND
, &conn
->flags
))
3932 hci_sco_setup(conn
, ev
->status
);
3935 hci_dev_unlock(hdev
);
3938 static void hci_pin_code_request_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3940 struct hci_ev_pin_code_req
*ev
= (void *) skb
->data
;
3941 struct hci_conn
*conn
;
3943 BT_DBG("%s", hdev
->name
);
3947 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
3951 if (conn
->state
== BT_CONNECTED
) {
3952 hci_conn_hold(conn
);
3953 conn
->disc_timeout
= HCI_PAIRING_TIMEOUT
;
3954 hci_conn_drop(conn
);
3957 if (!hci_dev_test_flag(hdev
, HCI_BONDABLE
) &&
3958 !test_bit(HCI_CONN_AUTH_INITIATOR
, &conn
->flags
)) {
3959 hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
3960 sizeof(ev
->bdaddr
), &ev
->bdaddr
);
3961 } else if (hci_dev_test_flag(hdev
, HCI_MGMT
)) {
3964 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
)
3969 mgmt_pin_code_request(hdev
, &ev
->bdaddr
, secure
);
3973 hci_dev_unlock(hdev
);
3976 static void conn_set_key(struct hci_conn
*conn
, u8 key_type
, u8 pin_len
)
3978 if (key_type
== HCI_LK_CHANGED_COMBINATION
)
3981 conn
->pin_length
= pin_len
;
3982 conn
->key_type
= key_type
;
3985 case HCI_LK_LOCAL_UNIT
:
3986 case HCI_LK_REMOTE_UNIT
:
3987 case HCI_LK_DEBUG_COMBINATION
:
3989 case HCI_LK_COMBINATION
:
3991 conn
->pending_sec_level
= BT_SECURITY_HIGH
;
3993 conn
->pending_sec_level
= BT_SECURITY_MEDIUM
;
3995 case HCI_LK_UNAUTH_COMBINATION_P192
:
3996 case HCI_LK_UNAUTH_COMBINATION_P256
:
3997 conn
->pending_sec_level
= BT_SECURITY_MEDIUM
;
3999 case HCI_LK_AUTH_COMBINATION_P192
:
4000 conn
->pending_sec_level
= BT_SECURITY_HIGH
;
4002 case HCI_LK_AUTH_COMBINATION_P256
:
4003 conn
->pending_sec_level
= BT_SECURITY_FIPS
;
4008 static void hci_link_key_request_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4010 struct hci_ev_link_key_req
*ev
= (void *) skb
->data
;
4011 struct hci_cp_link_key_reply cp
;
4012 struct hci_conn
*conn
;
4013 struct link_key
*key
;
4015 BT_DBG("%s", hdev
->name
);
4017 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
4022 key
= hci_find_link_key(hdev
, &ev
->bdaddr
);
4024 BT_DBG("%s link key not found for %pMR", hdev
->name
,
4029 BT_DBG("%s found key type %u for %pMR", hdev
->name
, key
->type
,
4032 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4034 clear_bit(HCI_CONN_NEW_LINK_KEY
, &conn
->flags
);
4036 if ((key
->type
== HCI_LK_UNAUTH_COMBINATION_P192
||
4037 key
->type
== HCI_LK_UNAUTH_COMBINATION_P256
) &&
4038 conn
->auth_type
!= 0xff && (conn
->auth_type
& 0x01)) {
4039 BT_DBG("%s ignoring unauthenticated key", hdev
->name
);
4043 if (key
->type
== HCI_LK_COMBINATION
&& key
->pin_len
< 16 &&
4044 (conn
->pending_sec_level
== BT_SECURITY_HIGH
||
4045 conn
->pending_sec_level
== BT_SECURITY_FIPS
)) {
4046 BT_DBG("%s ignoring key unauthenticated for high security",
4051 conn_set_key(conn
, key
->type
, key
->pin_len
);
4054 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
4055 memcpy(cp
.link_key
, key
->val
, HCI_LINK_KEY_SIZE
);
4057 hci_send_cmd(hdev
, HCI_OP_LINK_KEY_REPLY
, sizeof(cp
), &cp
);
4059 hci_dev_unlock(hdev
);
4064 hci_send_cmd(hdev
, HCI_OP_LINK_KEY_NEG_REPLY
, 6, &ev
->bdaddr
);
4065 hci_dev_unlock(hdev
);
4068 static void hci_link_key_notify_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4070 struct hci_ev_link_key_notify
*ev
= (void *) skb
->data
;
4071 struct hci_conn
*conn
;
4072 struct link_key
*key
;
4076 BT_DBG("%s", hdev
->name
);
4080 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4084 hci_conn_hold(conn
);
4085 conn
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
4086 hci_conn_drop(conn
);
4088 set_bit(HCI_CONN_NEW_LINK_KEY
, &conn
->flags
);
4089 conn_set_key(conn
, ev
->key_type
, conn
->pin_length
);
4091 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
4094 key
= hci_add_link_key(hdev
, conn
, &ev
->bdaddr
, ev
->link_key
,
4095 ev
->key_type
, pin_len
, &persistent
);
4099 /* Update connection information since adding the key will have
4100 * fixed up the type in the case of changed combination keys.
4102 if (ev
->key_type
== HCI_LK_CHANGED_COMBINATION
)
4103 conn_set_key(conn
, key
->type
, key
->pin_len
);
4105 mgmt_new_link_key(hdev
, key
, persistent
);
4107 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4108 * is set. If it's not set simply remove the key from the kernel
4109 * list (we've still notified user space about it but with
4110 * store_hint being 0).
4112 if (key
->type
== HCI_LK_DEBUG_COMBINATION
&&
4113 !hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
)) {
4114 list_del_rcu(&key
->list
);
4115 kfree_rcu(key
, rcu
);
4120 clear_bit(HCI_CONN_FLUSH_KEY
, &conn
->flags
);
4122 set_bit(HCI_CONN_FLUSH_KEY
, &conn
->flags
);
4125 hci_dev_unlock(hdev
);
4128 static void hci_clock_offset_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4130 struct hci_ev_clock_offset
*ev
= (void *) skb
->data
;
4131 struct hci_conn
*conn
;
4133 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
4137 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
4138 if (conn
&& !ev
->status
) {
4139 struct inquiry_entry
*ie
;
4141 ie
= hci_inquiry_cache_lookup(hdev
, &conn
->dst
);
4143 ie
->data
.clock_offset
= ev
->clock_offset
;
4144 ie
->timestamp
= jiffies
;
4148 hci_dev_unlock(hdev
);
4151 static void hci_pkt_type_change_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4153 struct hci_ev_pkt_type_change
*ev
= (void *) skb
->data
;
4154 struct hci_conn
*conn
;
4156 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
4160 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
4161 if (conn
&& !ev
->status
)
4162 conn
->pkt_type
= __le16_to_cpu(ev
->pkt_type
);
4164 hci_dev_unlock(hdev
);
4167 static void hci_pscan_rep_mode_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4169 struct hci_ev_pscan_rep_mode
*ev
= (void *) skb
->data
;
4170 struct inquiry_entry
*ie
;
4172 BT_DBG("%s", hdev
->name
);
4176 ie
= hci_inquiry_cache_lookup(hdev
, &ev
->bdaddr
);
4178 ie
->data
.pscan_rep_mode
= ev
->pscan_rep_mode
;
4179 ie
->timestamp
= jiffies
;
4182 hci_dev_unlock(hdev
);
4185 static void hci_inquiry_result_with_rssi_evt(struct hci_dev
*hdev
,
4186 struct sk_buff
*skb
)
4188 struct inquiry_data data
;
4189 int num_rsp
= *((__u8
*) skb
->data
);
4191 BT_DBG("%s num_rsp %d", hdev
->name
, num_rsp
);
4196 if (hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
))
4201 if ((skb
->len
- 1) / num_rsp
!= sizeof(struct inquiry_info_with_rssi
)) {
4202 struct inquiry_info_with_rssi_and_pscan_mode
*info
;
4203 info
= (void *) (skb
->data
+ 1);
4205 if (skb
->len
< num_rsp
* sizeof(*info
) + 1)
4208 for (; num_rsp
; num_rsp
--, info
++) {
4211 bacpy(&data
.bdaddr
, &info
->bdaddr
);
4212 data
.pscan_rep_mode
= info
->pscan_rep_mode
;
4213 data
.pscan_period_mode
= info
->pscan_period_mode
;
4214 data
.pscan_mode
= info
->pscan_mode
;
4215 memcpy(data
.dev_class
, info
->dev_class
, 3);
4216 data
.clock_offset
= info
->clock_offset
;
4217 data
.rssi
= info
->rssi
;
4218 data
.ssp_mode
= 0x00;
4220 flags
= hci_inquiry_cache_update(hdev
, &data
, false);
4222 mgmt_device_found(hdev
, &info
->bdaddr
, ACL_LINK
, 0x00,
4223 info
->dev_class
, info
->rssi
,
4224 flags
, NULL
, 0, NULL
, 0);
4227 struct inquiry_info_with_rssi
*info
= (void *) (skb
->data
+ 1);
4229 if (skb
->len
< num_rsp
* sizeof(*info
) + 1)
4232 for (; num_rsp
; num_rsp
--, info
++) {
4235 bacpy(&data
.bdaddr
, &info
->bdaddr
);
4236 data
.pscan_rep_mode
= info
->pscan_rep_mode
;
4237 data
.pscan_period_mode
= info
->pscan_period_mode
;
4238 data
.pscan_mode
= 0x00;
4239 memcpy(data
.dev_class
, info
->dev_class
, 3);
4240 data
.clock_offset
= info
->clock_offset
;
4241 data
.rssi
= info
->rssi
;
4242 data
.ssp_mode
= 0x00;
4244 flags
= hci_inquiry_cache_update(hdev
, &data
, false);
4246 mgmt_device_found(hdev
, &info
->bdaddr
, ACL_LINK
, 0x00,
4247 info
->dev_class
, info
->rssi
,
4248 flags
, NULL
, 0, NULL
, 0);
4253 hci_dev_unlock(hdev
);
4256 static void hci_remote_ext_features_evt(struct hci_dev
*hdev
,
4257 struct sk_buff
*skb
)
4259 struct hci_ev_remote_ext_features
*ev
= (void *) skb
->data
;
4260 struct hci_conn
*conn
;
4262 BT_DBG("%s", hdev
->name
);
4266 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
4270 if (ev
->page
< HCI_MAX_PAGES
)
4271 memcpy(conn
->features
[ev
->page
], ev
->features
, 8);
4273 if (!ev
->status
&& ev
->page
== 0x01) {
4274 struct inquiry_entry
*ie
;
4276 ie
= hci_inquiry_cache_lookup(hdev
, &conn
->dst
);
4278 ie
->data
.ssp_mode
= (ev
->features
[0] & LMP_HOST_SSP
);
4280 if (ev
->features
[0] & LMP_HOST_SSP
) {
4281 set_bit(HCI_CONN_SSP_ENABLED
, &conn
->flags
);
4283 /* It is mandatory by the Bluetooth specification that
4284 * Extended Inquiry Results are only used when Secure
4285 * Simple Pairing is enabled, but some devices violate
4288 * To make these devices work, the internal SSP
4289 * enabled flag needs to be cleared if the remote host
4290 * features do not indicate SSP support */
4291 clear_bit(HCI_CONN_SSP_ENABLED
, &conn
->flags
);
4294 if (ev
->features
[0] & LMP_HOST_SC
)
4295 set_bit(HCI_CONN_SC_ENABLED
, &conn
->flags
);
4298 if (conn
->state
!= BT_CONFIG
)
4301 if (!ev
->status
&& !test_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
)) {
4302 struct hci_cp_remote_name_req cp
;
4303 memset(&cp
, 0, sizeof(cp
));
4304 bacpy(&cp
.bdaddr
, &conn
->dst
);
4305 cp
.pscan_rep_mode
= 0x02;
4306 hci_send_cmd(hdev
, HCI_OP_REMOTE_NAME_REQ
, sizeof(cp
), &cp
);
4307 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
4308 mgmt_device_connected(hdev
, conn
, 0, NULL
, 0);
4310 if (!hci_outgoing_auth_needed(hdev
, conn
)) {
4311 conn
->state
= BT_CONNECTED
;
4312 hci_connect_cfm(conn
, ev
->status
);
4313 hci_conn_drop(conn
);
4317 hci_dev_unlock(hdev
);
4320 static void hci_sync_conn_complete_evt(struct hci_dev
*hdev
,
4321 struct sk_buff
*skb
)
4323 struct hci_ev_sync_conn_complete
*ev
= (void *) skb
->data
;
4324 struct hci_conn
*conn
;
4326 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
4330 conn
= hci_conn_hash_lookup_ba(hdev
, ev
->link_type
, &ev
->bdaddr
);
4332 if (ev
->link_type
== ESCO_LINK
)
4335 /* When the link type in the event indicates SCO connection
4336 * and lookup of the connection object fails, then check
4337 * if an eSCO connection object exists.
4339 * The core limits the synchronous connections to either
4340 * SCO or eSCO. The eSCO connection is preferred and tried
4341 * to be setup first and until successfully established,
4342 * the link type will be hinted as eSCO.
4344 conn
= hci_conn_hash_lookup_ba(hdev
, ESCO_LINK
, &ev
->bdaddr
);
4349 switch (ev
->status
) {
4351 conn
->handle
= __le16_to_cpu(ev
->handle
);
4352 conn
->state
= BT_CONNECTED
;
4353 conn
->type
= ev
->link_type
;
4355 hci_debugfs_create_conn(conn
);
4356 hci_conn_add_sysfs(conn
);
4359 case 0x10: /* Connection Accept Timeout */
4360 case 0x0d: /* Connection Rejected due to Limited Resources */
4361 case 0x11: /* Unsupported Feature or Parameter Value */
4362 case 0x1c: /* SCO interval rejected */
4363 case 0x1a: /* Unsupported Remote Feature */
4364 case 0x1e: /* Invalid LMP Parameters */
4365 case 0x1f: /* Unspecified error */
4366 case 0x20: /* Unsupported LMP Parameter value */
4368 conn
->pkt_type
= (hdev
->esco_type
& SCO_ESCO_MASK
) |
4369 (hdev
->esco_type
& EDR_ESCO_MASK
);
4370 if (hci_setup_sync(conn
, conn
->link
->handle
))
4376 conn
->state
= BT_CLOSED
;
4380 bt_dev_dbg(hdev
, "SCO connected with air mode: %02x", ev
->air_mode
);
4382 switch (conn
->setting
& SCO_AIRMODE_MASK
) {
4383 case SCO_AIRMODE_CVSD
:
4385 hdev
->notify(hdev
, HCI_NOTIFY_ENABLE_SCO_CVSD
);
4387 case SCO_AIRMODE_TRANSP
:
4389 hdev
->notify(hdev
, HCI_NOTIFY_ENABLE_SCO_TRANSP
);
4393 hci_connect_cfm(conn
, ev
->status
);
4398 hci_dev_unlock(hdev
);
4401 static inline size_t eir_get_length(u8
*eir
, size_t eir_len
)
4405 while (parsed
< eir_len
) {
4406 u8 field_len
= eir
[0];
4411 parsed
+= field_len
+ 1;
4412 eir
+= field_len
+ 1;
4418 static void hci_extended_inquiry_result_evt(struct hci_dev
*hdev
,
4419 struct sk_buff
*skb
)
4421 struct inquiry_data data
;
4422 struct extended_inquiry_info
*info
= (void *) (skb
->data
+ 1);
4423 int num_rsp
= *((__u8
*) skb
->data
);
4426 BT_DBG("%s num_rsp %d", hdev
->name
, num_rsp
);
4428 if (!num_rsp
|| skb
->len
< num_rsp
* sizeof(*info
) + 1)
4431 if (hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
))
4436 for (; num_rsp
; num_rsp
--, info
++) {
4440 bacpy(&data
.bdaddr
, &info
->bdaddr
);
4441 data
.pscan_rep_mode
= info
->pscan_rep_mode
;
4442 data
.pscan_period_mode
= info
->pscan_period_mode
;
4443 data
.pscan_mode
= 0x00;
4444 memcpy(data
.dev_class
, info
->dev_class
, 3);
4445 data
.clock_offset
= info
->clock_offset
;
4446 data
.rssi
= info
->rssi
;
4447 data
.ssp_mode
= 0x01;
4449 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
4450 name_known
= eir_get_data(info
->data
,
4452 EIR_NAME_COMPLETE
, NULL
);
4456 flags
= hci_inquiry_cache_update(hdev
, &data
, name_known
);
4458 eir_len
= eir_get_length(info
->data
, sizeof(info
->data
));
4460 mgmt_device_found(hdev
, &info
->bdaddr
, ACL_LINK
, 0x00,
4461 info
->dev_class
, info
->rssi
,
4462 flags
, info
->data
, eir_len
, NULL
, 0);
4465 hci_dev_unlock(hdev
);
4468 static void hci_key_refresh_complete_evt(struct hci_dev
*hdev
,
4469 struct sk_buff
*skb
)
4471 struct hci_ev_key_refresh_complete
*ev
= (void *) skb
->data
;
4472 struct hci_conn
*conn
;
4474 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev
->name
, ev
->status
,
4475 __le16_to_cpu(ev
->handle
));
4479 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
4483 /* For BR/EDR the necessary steps are taken through the
4484 * auth_complete event.
4486 if (conn
->type
!= LE_LINK
)
4490 conn
->sec_level
= conn
->pending_sec_level
;
4492 clear_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
);
4494 if (ev
->status
&& conn
->state
== BT_CONNECTED
) {
4495 hci_disconnect(conn
, HCI_ERROR_AUTH_FAILURE
);
4496 hci_conn_drop(conn
);
4500 if (conn
->state
== BT_CONFIG
) {
4502 conn
->state
= BT_CONNECTED
;
4504 hci_connect_cfm(conn
, ev
->status
);
4505 hci_conn_drop(conn
);
4507 hci_auth_cfm(conn
, ev
->status
);
4509 hci_conn_hold(conn
);
4510 conn
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
4511 hci_conn_drop(conn
);
4515 hci_dev_unlock(hdev
);
4518 static u8
hci_get_auth_req(struct hci_conn
*conn
)
4520 /* If remote requests no-bonding follow that lead */
4521 if (conn
->remote_auth
== HCI_AT_NO_BONDING
||
4522 conn
->remote_auth
== HCI_AT_NO_BONDING_MITM
)
4523 return conn
->remote_auth
| (conn
->auth_type
& 0x01);
4525 /* If both remote and local have enough IO capabilities, require
4528 if (conn
->remote_cap
!= HCI_IO_NO_INPUT_OUTPUT
&&
4529 conn
->io_capability
!= HCI_IO_NO_INPUT_OUTPUT
)
4530 return conn
->remote_auth
| 0x01;
4532 /* No MITM protection possible so ignore remote requirement */
4533 return (conn
->remote_auth
& ~0x01) | (conn
->auth_type
& 0x01);
4536 static u8
bredr_oob_data_present(struct hci_conn
*conn
)
4538 struct hci_dev
*hdev
= conn
->hdev
;
4539 struct oob_data
*data
;
4541 data
= hci_find_remote_oob_data(hdev
, &conn
->dst
, BDADDR_BREDR
);
4545 if (bredr_sc_enabled(hdev
)) {
4546 /* When Secure Connections is enabled, then just
4547 * return the present value stored with the OOB
4548 * data. The stored value contains the right present
4549 * information. However it can only be trusted when
4550 * not in Secure Connection Only mode.
4552 if (!hci_dev_test_flag(hdev
, HCI_SC_ONLY
))
4553 return data
->present
;
4555 /* When Secure Connections Only mode is enabled, then
4556 * the P-256 values are required. If they are not
4557 * available, then do not declare that OOB data is
4560 if (!memcmp(data
->rand256
, ZERO_KEY
, 16) ||
4561 !memcmp(data
->hash256
, ZERO_KEY
, 16))
4567 /* When Secure Connections is not enabled or actually
4568 * not supported by the hardware, then check that if
4569 * P-192 data values are present.
4571 if (!memcmp(data
->rand192
, ZERO_KEY
, 16) ||
4572 !memcmp(data
->hash192
, ZERO_KEY
, 16))
4578 static void hci_io_capa_request_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4580 struct hci_ev_io_capa_request
*ev
= (void *) skb
->data
;
4581 struct hci_conn
*conn
;
4583 BT_DBG("%s", hdev
->name
);
4587 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4591 hci_conn_hold(conn
);
4593 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
4596 /* Allow pairing if we're pairable, the initiators of the
4597 * pairing or if the remote is not requesting bonding.
4599 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
) ||
4600 test_bit(HCI_CONN_AUTH_INITIATOR
, &conn
->flags
) ||
4601 (conn
->remote_auth
& ~0x01) == HCI_AT_NO_BONDING
) {
4602 struct hci_cp_io_capability_reply cp
;
4604 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
4605 /* Change the IO capability from KeyboardDisplay
4606 * to DisplayYesNo as it is not supported by BT spec. */
4607 cp
.capability
= (conn
->io_capability
== 0x04) ?
4608 HCI_IO_DISPLAY_YESNO
: conn
->io_capability
;
4610 /* If we are initiators, there is no remote information yet */
4611 if (conn
->remote_auth
== 0xff) {
4612 /* Request MITM protection if our IO caps allow it
4613 * except for the no-bonding case.
4615 if (conn
->io_capability
!= HCI_IO_NO_INPUT_OUTPUT
&&
4616 conn
->auth_type
!= HCI_AT_NO_BONDING
)
4617 conn
->auth_type
|= 0x01;
4619 conn
->auth_type
= hci_get_auth_req(conn
);
4622 /* If we're not bondable, force one of the non-bondable
4623 * authentication requirement values.
4625 if (!hci_dev_test_flag(hdev
, HCI_BONDABLE
))
4626 conn
->auth_type
&= HCI_AT_NO_BONDING_MITM
;
4628 cp
.authentication
= conn
->auth_type
;
4629 cp
.oob_data
= bredr_oob_data_present(conn
);
4631 hci_send_cmd(hdev
, HCI_OP_IO_CAPABILITY_REPLY
,
4634 struct hci_cp_io_capability_neg_reply cp
;
4636 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
4637 cp
.reason
= HCI_ERROR_PAIRING_NOT_ALLOWED
;
4639 hci_send_cmd(hdev
, HCI_OP_IO_CAPABILITY_NEG_REPLY
,
4644 hci_dev_unlock(hdev
);
4647 static void hci_io_capa_reply_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4649 struct hci_ev_io_capa_reply
*ev
= (void *) skb
->data
;
4650 struct hci_conn
*conn
;
4652 BT_DBG("%s", hdev
->name
);
4656 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4660 conn
->remote_cap
= ev
->capability
;
4661 conn
->remote_auth
= ev
->authentication
;
4664 hci_dev_unlock(hdev
);
4667 static void hci_user_confirm_request_evt(struct hci_dev
*hdev
,
4668 struct sk_buff
*skb
)
4670 struct hci_ev_user_confirm_req
*ev
= (void *) skb
->data
;
4671 int loc_mitm
, rem_mitm
, confirm_hint
= 0;
4672 struct hci_conn
*conn
;
4674 BT_DBG("%s", hdev
->name
);
4678 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
4681 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4685 loc_mitm
= (conn
->auth_type
& 0x01);
4686 rem_mitm
= (conn
->remote_auth
& 0x01);
4688 /* If we require MITM but the remote device can't provide that
4689 * (it has NoInputNoOutput) then reject the confirmation
4690 * request. We check the security level here since it doesn't
4691 * necessarily match conn->auth_type.
4693 if (conn
->pending_sec_level
> BT_SECURITY_MEDIUM
&&
4694 conn
->remote_cap
== HCI_IO_NO_INPUT_OUTPUT
) {
4695 BT_DBG("Rejecting request: remote device can't provide MITM");
4696 hci_send_cmd(hdev
, HCI_OP_USER_CONFIRM_NEG_REPLY
,
4697 sizeof(ev
->bdaddr
), &ev
->bdaddr
);
4701 /* If no side requires MITM protection; auto-accept */
4702 if ((!loc_mitm
|| conn
->remote_cap
== HCI_IO_NO_INPUT_OUTPUT
) &&
4703 (!rem_mitm
|| conn
->io_capability
== HCI_IO_NO_INPUT_OUTPUT
)) {
4705 /* If we're not the initiators request authorization to
4706 * proceed from user space (mgmt_user_confirm with
4707 * confirm_hint set to 1). The exception is if neither
4708 * side had MITM or if the local IO capability is
4709 * NoInputNoOutput, in which case we do auto-accept
4711 if (!test_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
) &&
4712 conn
->io_capability
!= HCI_IO_NO_INPUT_OUTPUT
&&
4713 (loc_mitm
|| rem_mitm
)) {
4714 BT_DBG("Confirming auto-accept as acceptor");
4719 /* If there already exists link key in local host, leave the
4720 * decision to user space since the remote device could be
4721 * legitimate or malicious.
4723 if (hci_find_link_key(hdev
, &ev
->bdaddr
)) {
4724 bt_dev_dbg(hdev
, "Local host already has link key");
4729 BT_DBG("Auto-accept of user confirmation with %ums delay",
4730 hdev
->auto_accept_delay
);
4732 if (hdev
->auto_accept_delay
> 0) {
4733 int delay
= msecs_to_jiffies(hdev
->auto_accept_delay
);
4734 queue_delayed_work(conn
->hdev
->workqueue
,
4735 &conn
->auto_accept_work
, delay
);
4739 hci_send_cmd(hdev
, HCI_OP_USER_CONFIRM_REPLY
,
4740 sizeof(ev
->bdaddr
), &ev
->bdaddr
);
4745 mgmt_user_confirm_request(hdev
, &ev
->bdaddr
, ACL_LINK
, 0,
4746 le32_to_cpu(ev
->passkey
), confirm_hint
);
4749 hci_dev_unlock(hdev
);
4752 static void hci_user_passkey_request_evt(struct hci_dev
*hdev
,
4753 struct sk_buff
*skb
)
4755 struct hci_ev_user_passkey_req
*ev
= (void *) skb
->data
;
4757 BT_DBG("%s", hdev
->name
);
4759 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
4760 mgmt_user_passkey_request(hdev
, &ev
->bdaddr
, ACL_LINK
, 0);
4763 static void hci_user_passkey_notify_evt(struct hci_dev
*hdev
,
4764 struct sk_buff
*skb
)
4766 struct hci_ev_user_passkey_notify
*ev
= (void *) skb
->data
;
4767 struct hci_conn
*conn
;
4769 BT_DBG("%s", hdev
->name
);
4771 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4775 conn
->passkey_notify
= __le32_to_cpu(ev
->passkey
);
4776 conn
->passkey_entered
= 0;
4778 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
4779 mgmt_user_passkey_notify(hdev
, &conn
->dst
, conn
->type
,
4780 conn
->dst_type
, conn
->passkey_notify
,
4781 conn
->passkey_entered
);
4784 static void hci_keypress_notify_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4786 struct hci_ev_keypress_notify
*ev
= (void *) skb
->data
;
4787 struct hci_conn
*conn
;
4789 BT_DBG("%s", hdev
->name
);
4791 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4796 case HCI_KEYPRESS_STARTED
:
4797 conn
->passkey_entered
= 0;
4800 case HCI_KEYPRESS_ENTERED
:
4801 conn
->passkey_entered
++;
4804 case HCI_KEYPRESS_ERASED
:
4805 conn
->passkey_entered
--;
4808 case HCI_KEYPRESS_CLEARED
:
4809 conn
->passkey_entered
= 0;
4812 case HCI_KEYPRESS_COMPLETED
:
4816 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
4817 mgmt_user_passkey_notify(hdev
, &conn
->dst
, conn
->type
,
4818 conn
->dst_type
, conn
->passkey_notify
,
4819 conn
->passkey_entered
);
4822 static void hci_simple_pair_complete_evt(struct hci_dev
*hdev
,
4823 struct sk_buff
*skb
)
4825 struct hci_ev_simple_pair_complete
*ev
= (void *) skb
->data
;
4826 struct hci_conn
*conn
;
4828 BT_DBG("%s", hdev
->name
);
4832 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4836 /* Reset the authentication requirement to unknown */
4837 conn
->remote_auth
= 0xff;
4839 /* To avoid duplicate auth_failed events to user space we check
4840 * the HCI_CONN_AUTH_PEND flag which will be set if we
4841 * initiated the authentication. A traditional auth_complete
4842 * event gets always produced as initiator and is also mapped to
4843 * the mgmt_auth_failed event */
4844 if (!test_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
) && ev
->status
)
4845 mgmt_auth_failed(conn
, ev
->status
);
4847 hci_conn_drop(conn
);
4850 hci_dev_unlock(hdev
);
4853 static void hci_remote_host_features_evt(struct hci_dev
*hdev
,
4854 struct sk_buff
*skb
)
4856 struct hci_ev_remote_host_features
*ev
= (void *) skb
->data
;
4857 struct inquiry_entry
*ie
;
4858 struct hci_conn
*conn
;
4860 BT_DBG("%s", hdev
->name
);
4864 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &ev
->bdaddr
);
4866 memcpy(conn
->features
[1], ev
->features
, 8);
4868 ie
= hci_inquiry_cache_lookup(hdev
, &ev
->bdaddr
);
4870 ie
->data
.ssp_mode
= (ev
->features
[0] & LMP_HOST_SSP
);
4872 hci_dev_unlock(hdev
);
4875 static void hci_remote_oob_data_request_evt(struct hci_dev
*hdev
,
4876 struct sk_buff
*skb
)
4878 struct hci_ev_remote_oob_data_request
*ev
= (void *) skb
->data
;
4879 struct oob_data
*data
;
4881 BT_DBG("%s", hdev
->name
);
4885 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
4888 data
= hci_find_remote_oob_data(hdev
, &ev
->bdaddr
, BDADDR_BREDR
);
4890 struct hci_cp_remote_oob_data_neg_reply cp
;
4892 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
4893 hci_send_cmd(hdev
, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY
,
4898 if (bredr_sc_enabled(hdev
)) {
4899 struct hci_cp_remote_oob_ext_data_reply cp
;
4901 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
4902 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
4903 memset(cp
.hash192
, 0, sizeof(cp
.hash192
));
4904 memset(cp
.rand192
, 0, sizeof(cp
.rand192
));
4906 memcpy(cp
.hash192
, data
->hash192
, sizeof(cp
.hash192
));
4907 memcpy(cp
.rand192
, data
->rand192
, sizeof(cp
.rand192
));
4909 memcpy(cp
.hash256
, data
->hash256
, sizeof(cp
.hash256
));
4910 memcpy(cp
.rand256
, data
->rand256
, sizeof(cp
.rand256
));
4912 hci_send_cmd(hdev
, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY
,
4915 struct hci_cp_remote_oob_data_reply cp
;
4917 bacpy(&cp
.bdaddr
, &ev
->bdaddr
);
4918 memcpy(cp
.hash
, data
->hash192
, sizeof(cp
.hash
));
4919 memcpy(cp
.rand
, data
->rand192
, sizeof(cp
.rand
));
4921 hci_send_cmd(hdev
, HCI_OP_REMOTE_OOB_DATA_REPLY
,
4926 hci_dev_unlock(hdev
);
4929 #if IS_ENABLED(CONFIG_BT_HS)
4930 static void hci_chan_selected_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4932 struct hci_ev_channel_selected
*ev
= (void *)skb
->data
;
4933 struct hci_conn
*hcon
;
4935 BT_DBG("%s handle 0x%2.2x", hdev
->name
, ev
->phy_handle
);
4937 skb_pull(skb
, sizeof(*ev
));
4939 hcon
= hci_conn_hash_lookup_handle(hdev
, ev
->phy_handle
);
4943 amp_read_loc_assoc_final_data(hdev
, hcon
);
4946 static void hci_phy_link_complete_evt(struct hci_dev
*hdev
,
4947 struct sk_buff
*skb
)
4949 struct hci_ev_phy_link_complete
*ev
= (void *) skb
->data
;
4950 struct hci_conn
*hcon
, *bredr_hcon
;
4952 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev
->name
, ev
->phy_handle
,
4957 hcon
= hci_conn_hash_lookup_handle(hdev
, ev
->phy_handle
);
4969 bredr_hcon
= hcon
->amp_mgr
->l2cap_conn
->hcon
;
4971 hcon
->state
= BT_CONNECTED
;
4972 bacpy(&hcon
->dst
, &bredr_hcon
->dst
);
4974 hci_conn_hold(hcon
);
4975 hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
4976 hci_conn_drop(hcon
);
4978 hci_debugfs_create_conn(hcon
);
4979 hci_conn_add_sysfs(hcon
);
4981 amp_physical_cfm(bredr_hcon
, hcon
);
4984 hci_dev_unlock(hdev
);
4987 static void hci_loglink_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4989 struct hci_ev_logical_link_complete
*ev
= (void *) skb
->data
;
4990 struct hci_conn
*hcon
;
4991 struct hci_chan
*hchan
;
4992 struct amp_mgr
*mgr
;
4994 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4995 hdev
->name
, le16_to_cpu(ev
->handle
), ev
->phy_handle
,
4998 hcon
= hci_conn_hash_lookup_handle(hdev
, ev
->phy_handle
);
5002 /* Create AMP hchan */
5003 hchan
= hci_chan_create(hcon
);
5007 hchan
->handle
= le16_to_cpu(ev
->handle
);
5009 BT_DBG("hcon %p mgr %p hchan %p", hcon
, hcon
->amp_mgr
, hchan
);
5011 mgr
= hcon
->amp_mgr
;
5012 if (mgr
&& mgr
->bredr_chan
) {
5013 struct l2cap_chan
*bredr_chan
= mgr
->bredr_chan
;
5015 l2cap_chan_lock(bredr_chan
);
5017 bredr_chan
->conn
->mtu
= hdev
->block_mtu
;
5018 l2cap_logical_cfm(bredr_chan
, hchan
, 0);
5019 hci_conn_hold(hcon
);
5021 l2cap_chan_unlock(bredr_chan
);
5025 static void hci_disconn_loglink_complete_evt(struct hci_dev
*hdev
,
5026 struct sk_buff
*skb
)
5028 struct hci_ev_disconn_logical_link_complete
*ev
= (void *) skb
->data
;
5029 struct hci_chan
*hchan
;
5031 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev
->name
,
5032 le16_to_cpu(ev
->handle
), ev
->status
);
5039 hchan
= hci_chan_lookup_handle(hdev
, le16_to_cpu(ev
->handle
));
5043 amp_destroy_logical_link(hchan
, ev
->reason
);
5046 hci_dev_unlock(hdev
);
5049 static void hci_disconn_phylink_complete_evt(struct hci_dev
*hdev
,
5050 struct sk_buff
*skb
)
5052 struct hci_ev_disconn_phy_link_complete
*ev
= (void *) skb
->data
;
5053 struct hci_conn
*hcon
;
5055 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5062 hcon
= hci_conn_hash_lookup_handle(hdev
, ev
->phy_handle
);
5064 hcon
->state
= BT_CLOSED
;
5068 hci_dev_unlock(hdev
);
5072 static void le_conn_complete_evt(struct hci_dev
*hdev
, u8 status
,
5073 bdaddr_t
*bdaddr
, u8 bdaddr_type
, u8 role
, u16 handle
,
5074 u16 interval
, u16 latency
, u16 supervision_timeout
)
5076 struct hci_conn_params
*params
;
5077 struct hci_conn
*conn
;
5078 struct smp_irk
*irk
;
5083 /* All controllers implicitly stop advertising in the event of a
5084 * connection, so ensure that the state bit is cleared.
5086 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
5088 conn
= hci_lookup_le_connect(hdev
);
5090 conn
= hci_conn_add(hdev
, LE_LINK
, bdaddr
, role
);
5092 bt_dev_err(hdev
, "no memory for new connection");
5096 conn
->dst_type
= bdaddr_type
;
5098 /* If we didn't have a hci_conn object previously
5099 * but we're in master role this must be something
5100 * initiated using a white list. Since white list based
5101 * connections are not "first class citizens" we don't
5102 * have full tracking of them. Therefore, we go ahead
5103 * with a "best effort" approach of determining the
5104 * initiator address based on the HCI_PRIVACY flag.
5107 conn
->resp_addr_type
= bdaddr_type
;
5108 bacpy(&conn
->resp_addr
, bdaddr
);
5109 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
5110 conn
->init_addr_type
= ADDR_LE_DEV_RANDOM
;
5111 bacpy(&conn
->init_addr
, &hdev
->rpa
);
5113 hci_copy_identity_address(hdev
,
5115 &conn
->init_addr_type
);
5119 cancel_delayed_work(&conn
->le_conn_timeout
);
5123 /* Set the responder (our side) address type based on
5124 * the advertising address type.
5126 conn
->resp_addr_type
= hdev
->adv_addr_type
;
5127 if (hdev
->adv_addr_type
== ADDR_LE_DEV_RANDOM
) {
5128 /* In case of ext adv, resp_addr will be updated in
5129 * Adv Terminated event.
5131 if (!ext_adv_capable(hdev
))
5132 bacpy(&conn
->resp_addr
, &hdev
->random_addr
);
5134 bacpy(&conn
->resp_addr
, &hdev
->bdaddr
);
5137 conn
->init_addr_type
= bdaddr_type
;
5138 bacpy(&conn
->init_addr
, bdaddr
);
5140 /* For incoming connections, set the default minimum
5141 * and maximum connection interval. They will be used
5142 * to check if the parameters are in range and if not
5143 * trigger the connection update procedure.
5145 conn
->le_conn_min_interval
= hdev
->le_conn_min_interval
;
5146 conn
->le_conn_max_interval
= hdev
->le_conn_max_interval
;
5149 /* Lookup the identity address from the stored connection
5150 * address and address type.
5152 * When establishing connections to an identity address, the
5153 * connection procedure will store the resolvable random
5154 * address first. Now if it can be converted back into the
5155 * identity address, start using the identity address from
5158 irk
= hci_get_irk(hdev
, &conn
->dst
, conn
->dst_type
);
5160 bacpy(&conn
->dst
, &irk
->bdaddr
);
5161 conn
->dst_type
= irk
->addr_type
;
5165 hci_le_conn_failed(conn
, status
);
5169 if (conn
->dst_type
== ADDR_LE_DEV_PUBLIC
)
5170 addr_type
= BDADDR_LE_PUBLIC
;
5172 addr_type
= BDADDR_LE_RANDOM
;
5174 /* Drop the connection if the device is blocked */
5175 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, &conn
->dst
, addr_type
)) {
5176 hci_conn_drop(conn
);
5180 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
5181 mgmt_device_connected(hdev
, conn
, 0, NULL
, 0);
5183 conn
->sec_level
= BT_SECURITY_LOW
;
5184 conn
->handle
= handle
;
5185 conn
->state
= BT_CONFIG
;
5187 conn
->le_conn_interval
= interval
;
5188 conn
->le_conn_latency
= latency
;
5189 conn
->le_supv_timeout
= supervision_timeout
;
5191 hci_debugfs_create_conn(conn
);
5192 hci_conn_add_sysfs(conn
);
5194 /* The remote features procedure is defined for master
5195 * role only. So only in case of an initiated connection
5196 * request the remote features.
5198 * If the local controller supports slave-initiated features
5199 * exchange, then requesting the remote features in slave
5200 * role is possible. Otherwise just transition into the
5201 * connected state without requesting the remote features.
5204 (hdev
->le_features
[0] & HCI_LE_SLAVE_FEATURES
)) {
5205 struct hci_cp_le_read_remote_features cp
;
5207 cp
.handle
= __cpu_to_le16(conn
->handle
);
5209 hci_send_cmd(hdev
, HCI_OP_LE_READ_REMOTE_FEATURES
,
5212 hci_conn_hold(conn
);
5214 conn
->state
= BT_CONNECTED
;
5215 hci_connect_cfm(conn
, status
);
5218 params
= hci_pend_le_action_lookup(&hdev
->pend_le_conns
, &conn
->dst
,
5221 list_del_init(¶ms
->action
);
5223 hci_conn_drop(params
->conn
);
5224 hci_conn_put(params
->conn
);
5225 params
->conn
= NULL
;
5230 hci_update_background_scan(hdev
);
5231 hci_dev_unlock(hdev
);
5234 static void hci_le_conn_complete_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5236 struct hci_ev_le_conn_complete
*ev
= (void *) skb
->data
;
5238 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5240 le_conn_complete_evt(hdev
, ev
->status
, &ev
->bdaddr
, ev
->bdaddr_type
,
5241 ev
->role
, le16_to_cpu(ev
->handle
),
5242 le16_to_cpu(ev
->interval
),
5243 le16_to_cpu(ev
->latency
),
5244 le16_to_cpu(ev
->supervision_timeout
));
5247 static void hci_le_enh_conn_complete_evt(struct hci_dev
*hdev
,
5248 struct sk_buff
*skb
)
5250 struct hci_ev_le_enh_conn_complete
*ev
= (void *) skb
->data
;
5252 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5254 le_conn_complete_evt(hdev
, ev
->status
, &ev
->bdaddr
, ev
->bdaddr_type
,
5255 ev
->role
, le16_to_cpu(ev
->handle
),
5256 le16_to_cpu(ev
->interval
),
5257 le16_to_cpu(ev
->latency
),
5258 le16_to_cpu(ev
->supervision_timeout
));
5260 if (use_ll_privacy(hdev
) &&
5261 hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
) &&
5262 hci_dev_test_flag(hdev
, HCI_LL_RPA_RESOLUTION
))
5263 hci_req_disable_address_resolution(hdev
);
5266 static void hci_le_ext_adv_term_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5268 struct hci_evt_le_ext_adv_set_term
*ev
= (void *) skb
->data
;
5269 struct hci_conn
*conn
;
5271 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5276 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->conn_handle
));
5278 struct adv_info
*adv_instance
;
5280 if (hdev
->adv_addr_type
!= ADDR_LE_DEV_RANDOM
)
5283 if (!hdev
->cur_adv_instance
) {
5284 bacpy(&conn
->resp_addr
, &hdev
->random_addr
);
5288 adv_instance
= hci_find_adv_instance(hdev
, hdev
->cur_adv_instance
);
5290 bacpy(&conn
->resp_addr
, &adv_instance
->random_addr
);
5294 static void hci_le_conn_update_complete_evt(struct hci_dev
*hdev
,
5295 struct sk_buff
*skb
)
5297 struct hci_ev_le_conn_update_complete
*ev
= (void *) skb
->data
;
5298 struct hci_conn
*conn
;
5300 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5307 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
5309 conn
->le_conn_interval
= le16_to_cpu(ev
->interval
);
5310 conn
->le_conn_latency
= le16_to_cpu(ev
->latency
);
5311 conn
->le_supv_timeout
= le16_to_cpu(ev
->supervision_timeout
);
5314 hci_dev_unlock(hdev
);
5317 /* This function requires the caller holds hdev->lock */
5318 static struct hci_conn
*check_pending_le_conn(struct hci_dev
*hdev
,
5320 u8 addr_type
, u8 adv_type
,
5321 bdaddr_t
*direct_rpa
)
5323 struct hci_conn
*conn
;
5324 struct hci_conn_params
*params
;
5326 /* If the event is not connectable don't proceed further */
5327 if (adv_type
!= LE_ADV_IND
&& adv_type
!= LE_ADV_DIRECT_IND
)
5330 /* Ignore if the device is blocked */
5331 if (hci_bdaddr_list_lookup(&hdev
->blacklist
, addr
, addr_type
))
5334 /* Most controller will fail if we try to create new connections
5335 * while we have an existing one in slave role.
5337 if (hdev
->conn_hash
.le_num_slave
> 0 &&
5338 (!test_bit(HCI_QUIRK_VALID_LE_STATES
, &hdev
->quirks
) ||
5339 !(hdev
->le_states
[3] & 0x10)))
5342 /* If we're not connectable only connect devices that we have in
5343 * our pend_le_conns list.
5345 params
= hci_pend_le_action_lookup(&hdev
->pend_le_conns
, addr
,
5350 if (!params
->explicit_connect
) {
5351 switch (params
->auto_connect
) {
5352 case HCI_AUTO_CONN_DIRECT
:
5353 /* Only devices advertising with ADV_DIRECT_IND are
5354 * triggering a connection attempt. This is allowing
5355 * incoming connections from slave devices.
5357 if (adv_type
!= LE_ADV_DIRECT_IND
)
5360 case HCI_AUTO_CONN_ALWAYS
:
5361 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5362 * are triggering a connection attempt. This means
5363 * that incoming connections from slave device are
5364 * accepted and also outgoing connections to slave
5365 * devices are established when found.
5373 conn
= hci_connect_le(hdev
, addr
, addr_type
, BT_SECURITY_LOW
,
5374 hdev
->def_le_autoconnect_timeout
, HCI_ROLE_MASTER
,
5376 if (!IS_ERR(conn
)) {
5377 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5378 * by higher layer that tried to connect, if no then
5379 * store the pointer since we don't really have any
5380 * other owner of the object besides the params that
5381 * triggered it. This way we can abort the connection if
5382 * the parameters get removed and keep the reference
5383 * count consistent once the connection is established.
5386 if (!params
->explicit_connect
)
5387 params
->conn
= hci_conn_get(conn
);
5392 switch (PTR_ERR(conn
)) {
5394 /* If hci_connect() returns -EBUSY it means there is already
5395 * an LE connection attempt going on. Since controllers don't
5396 * support more than one connection attempt at the time, we
5397 * don't consider this an error case.
5401 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn
));
5408 static void process_adv_report(struct hci_dev
*hdev
, u8 type
, bdaddr_t
*bdaddr
,
5409 u8 bdaddr_type
, bdaddr_t
*direct_addr
,
5410 u8 direct_addr_type
, s8 rssi
, u8
*data
, u8 len
,
5413 struct discovery_state
*d
= &hdev
->discovery
;
5414 struct smp_irk
*irk
;
5415 struct hci_conn
*conn
;
5422 case LE_ADV_DIRECT_IND
:
5423 case LE_ADV_SCAN_IND
:
5424 case LE_ADV_NONCONN_IND
:
5425 case LE_ADV_SCAN_RSP
:
5428 bt_dev_err_ratelimited(hdev
, "unknown advertising packet "
5429 "type: 0x%02x", type
);
5433 if (!ext_adv
&& len
> HCI_MAX_AD_LENGTH
) {
5434 bt_dev_err_ratelimited(hdev
, "legacy adv larger than 31 bytes");
5438 /* Find the end of the data in case the report contains padded zero
5439 * bytes at the end causing an invalid length value.
5441 * When data is NULL, len is 0 so there is no need for extra ptr
5442 * check as 'ptr < data + 0' is already false in such case.
5444 for (ptr
= data
; ptr
< data
+ len
&& *ptr
; ptr
+= *ptr
+ 1) {
5445 if (ptr
+ 1 + *ptr
> data
+ len
)
5449 real_len
= ptr
- data
;
5451 /* Adjust for actual length */
5452 if (len
!= real_len
) {
5453 bt_dev_err_ratelimited(hdev
, "advertising data len corrected %u -> %u",
5458 /* If the direct address is present, then this report is from
5459 * a LE Direct Advertising Report event. In that case it is
5460 * important to see if the address is matching the local
5461 * controller address.
5464 /* Only resolvable random addresses are valid for these
5465 * kind of reports and others can be ignored.
5467 if (!hci_bdaddr_is_rpa(direct_addr
, direct_addr_type
))
5470 /* If the controller is not using resolvable random
5471 * addresses, then this report can be ignored.
5473 if (!hci_dev_test_flag(hdev
, HCI_PRIVACY
))
5476 /* If the local IRK of the controller does not match
5477 * with the resolvable random address provided, then
5478 * this report can be ignored.
5480 if (!smp_irk_matches(hdev
, hdev
->irk
, direct_addr
))
5484 /* Check if we need to convert to identity address */
5485 irk
= hci_get_irk(hdev
, bdaddr
, bdaddr_type
);
5487 bdaddr
= &irk
->bdaddr
;
5488 bdaddr_type
= irk
->addr_type
;
5491 /* Check if we have been requested to connect to this device.
5493 * direct_addr is set only for directed advertising reports (it is NULL
5494 * for advertising reports) and is already verified to be RPA above.
5496 conn
= check_pending_le_conn(hdev
, bdaddr
, bdaddr_type
, type
,
5498 if (!ext_adv
&& conn
&& type
== LE_ADV_IND
&& len
<= HCI_MAX_AD_LENGTH
) {
5499 /* Store report for later inclusion by
5500 * mgmt_device_connected
5502 memcpy(conn
->le_adv_data
, data
, len
);
5503 conn
->le_adv_data_len
= len
;
5506 /* Passive scanning shouldn't trigger any device found events,
5507 * except for devices marked as CONN_REPORT for which we do send
5508 * device found events, or advertisement monitoring requested.
5510 if (hdev
->le_scan_type
== LE_SCAN_PASSIVE
) {
5511 if (type
== LE_ADV_DIRECT_IND
)
5514 if (!hci_pend_le_action_lookup(&hdev
->pend_le_reports
,
5515 bdaddr
, bdaddr_type
) &&
5516 idr_is_empty(&hdev
->adv_monitors_idr
))
5519 if (type
== LE_ADV_NONCONN_IND
|| type
== LE_ADV_SCAN_IND
)
5520 flags
= MGMT_DEV_FOUND_NOT_CONNECTABLE
;
5523 mgmt_device_found(hdev
, bdaddr
, LE_LINK
, bdaddr_type
, NULL
,
5524 rssi
, flags
, data
, len
, NULL
, 0);
5528 /* When receiving non-connectable or scannable undirected
5529 * advertising reports, this means that the remote device is
5530 * not connectable and then clearly indicate this in the
5531 * device found event.
5533 * When receiving a scan response, then there is no way to
5534 * know if the remote device is connectable or not. However
5535 * since scan responses are merged with a previously seen
5536 * advertising report, the flags field from that report
5539 * In the really unlikely case that a controller get confused
5540 * and just sends a scan response event, then it is marked as
5541 * not connectable as well.
5543 if (type
== LE_ADV_NONCONN_IND
|| type
== LE_ADV_SCAN_IND
||
5544 type
== LE_ADV_SCAN_RSP
)
5545 flags
= MGMT_DEV_FOUND_NOT_CONNECTABLE
;
5549 /* If there's nothing pending either store the data from this
5550 * event or send an immediate device found event if the data
5551 * should not be stored for later.
5553 if (!ext_adv
&& !has_pending_adv_report(hdev
)) {
5554 /* If the report will trigger a SCAN_REQ store it for
5557 if (type
== LE_ADV_IND
|| type
== LE_ADV_SCAN_IND
) {
5558 store_pending_adv_report(hdev
, bdaddr
, bdaddr_type
,
5559 rssi
, flags
, data
, len
);
5563 mgmt_device_found(hdev
, bdaddr
, LE_LINK
, bdaddr_type
, NULL
,
5564 rssi
, flags
, data
, len
, NULL
, 0);
5568 /* Check if the pending report is for the same device as the new one */
5569 match
= (!bacmp(bdaddr
, &d
->last_adv_addr
) &&
5570 bdaddr_type
== d
->last_adv_addr_type
);
5572 /* If the pending data doesn't match this report or this isn't a
5573 * scan response (e.g. we got a duplicate ADV_IND) then force
5574 * sending of the pending data.
5576 if (type
!= LE_ADV_SCAN_RSP
|| !match
) {
5577 /* Send out whatever is in the cache, but skip duplicates */
5579 mgmt_device_found(hdev
, &d
->last_adv_addr
, LE_LINK
,
5580 d
->last_adv_addr_type
, NULL
,
5581 d
->last_adv_rssi
, d
->last_adv_flags
,
5583 d
->last_adv_data_len
, NULL
, 0);
5585 /* If the new report will trigger a SCAN_REQ store it for
5588 if (!ext_adv
&& (type
== LE_ADV_IND
||
5589 type
== LE_ADV_SCAN_IND
)) {
5590 store_pending_adv_report(hdev
, bdaddr
, bdaddr_type
,
5591 rssi
, flags
, data
, len
);
5595 /* The advertising reports cannot be merged, so clear
5596 * the pending report and send out a device found event.
5598 clear_pending_adv_report(hdev
);
5599 mgmt_device_found(hdev
, bdaddr
, LE_LINK
, bdaddr_type
, NULL
,
5600 rssi
, flags
, data
, len
, NULL
, 0);
5604 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5605 * the new event is a SCAN_RSP. We can therefore proceed with
5606 * sending a merged device found event.
5608 mgmt_device_found(hdev
, &d
->last_adv_addr
, LE_LINK
,
5609 d
->last_adv_addr_type
, NULL
, rssi
, d
->last_adv_flags
,
5610 d
->last_adv_data
, d
->last_adv_data_len
, data
, len
);
5611 clear_pending_adv_report(hdev
);
5614 static void hci_le_adv_report_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5616 u8 num_reports
= skb
->data
[0];
5617 void *ptr
= &skb
->data
[1];
5621 while (num_reports
--) {
5622 struct hci_ev_le_advertising_info
*ev
= ptr
;
5625 if (ev
->length
<= HCI_MAX_AD_LENGTH
) {
5626 rssi
= ev
->data
[ev
->length
];
5627 process_adv_report(hdev
, ev
->evt_type
, &ev
->bdaddr
,
5628 ev
->bdaddr_type
, NULL
, 0, rssi
,
5629 ev
->data
, ev
->length
, false);
5631 bt_dev_err(hdev
, "Dropping invalid advertising data");
5634 ptr
+= sizeof(*ev
) + ev
->length
+ 1;
5637 hci_dev_unlock(hdev
);
5640 static u8
ext_evt_type_to_legacy(struct hci_dev
*hdev
, u16 evt_type
)
5642 if (evt_type
& LE_EXT_ADV_LEGACY_PDU
) {
5644 case LE_LEGACY_ADV_IND
:
5646 case LE_LEGACY_ADV_DIRECT_IND
:
5647 return LE_ADV_DIRECT_IND
;
5648 case LE_LEGACY_ADV_SCAN_IND
:
5649 return LE_ADV_SCAN_IND
;
5650 case LE_LEGACY_NONCONN_IND
:
5651 return LE_ADV_NONCONN_IND
;
5652 case LE_LEGACY_SCAN_RSP_ADV
:
5653 case LE_LEGACY_SCAN_RSP_ADV_SCAN
:
5654 return LE_ADV_SCAN_RSP
;
5660 if (evt_type
& LE_EXT_ADV_CONN_IND
) {
5661 if (evt_type
& LE_EXT_ADV_DIRECT_IND
)
5662 return LE_ADV_DIRECT_IND
;
5667 if (evt_type
& LE_EXT_ADV_SCAN_RSP
)
5668 return LE_ADV_SCAN_RSP
;
5670 if (evt_type
& LE_EXT_ADV_SCAN_IND
)
5671 return LE_ADV_SCAN_IND
;
5673 if (evt_type
== LE_EXT_ADV_NON_CONN_IND
||
5674 evt_type
& LE_EXT_ADV_DIRECT_IND
)
5675 return LE_ADV_NONCONN_IND
;
5678 bt_dev_err_ratelimited(hdev
, "Unknown advertising packet type: 0x%02x",
5681 return LE_ADV_INVALID
;
5684 static void hci_le_ext_adv_report_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5686 u8 num_reports
= skb
->data
[0];
5687 void *ptr
= &skb
->data
[1];
5691 while (num_reports
--) {
5692 struct hci_ev_le_ext_adv_report
*ev
= ptr
;
5696 evt_type
= __le16_to_cpu(ev
->evt_type
);
5697 legacy_evt_type
= ext_evt_type_to_legacy(hdev
, evt_type
);
5698 if (legacy_evt_type
!= LE_ADV_INVALID
) {
5699 process_adv_report(hdev
, legacy_evt_type
, &ev
->bdaddr
,
5700 ev
->bdaddr_type
, NULL
, 0, ev
->rssi
,
5701 ev
->data
, ev
->length
,
5702 !(evt_type
& LE_EXT_ADV_LEGACY_PDU
));
5705 ptr
+= sizeof(*ev
) + ev
->length
;
5708 hci_dev_unlock(hdev
);
5711 static void hci_le_remote_feat_complete_evt(struct hci_dev
*hdev
,
5712 struct sk_buff
*skb
)
5714 struct hci_ev_le_remote_feat_complete
*ev
= (void *)skb
->data
;
5715 struct hci_conn
*conn
;
5717 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5721 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
5724 memcpy(conn
->features
[0], ev
->features
, 8);
5726 if (conn
->state
== BT_CONFIG
) {
5729 /* If the local controller supports slave-initiated
5730 * features exchange, but the remote controller does
5731 * not, then it is possible that the error code 0x1a
5732 * for unsupported remote feature gets returned.
5734 * In this specific case, allow the connection to
5735 * transition into connected state and mark it as
5738 if ((hdev
->le_features
[0] & HCI_LE_SLAVE_FEATURES
) &&
5739 !conn
->out
&& ev
->status
== 0x1a)
5742 status
= ev
->status
;
5744 conn
->state
= BT_CONNECTED
;
5745 hci_connect_cfm(conn
, status
);
5746 hci_conn_drop(conn
);
5750 hci_dev_unlock(hdev
);
5753 static void hci_le_ltk_request_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5755 struct hci_ev_le_ltk_req
*ev
= (void *) skb
->data
;
5756 struct hci_cp_le_ltk_reply cp
;
5757 struct hci_cp_le_ltk_neg_reply neg
;
5758 struct hci_conn
*conn
;
5759 struct smp_ltk
*ltk
;
5761 BT_DBG("%s handle 0x%4.4x", hdev
->name
, __le16_to_cpu(ev
->handle
));
5765 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
5769 ltk
= hci_find_ltk(hdev
, &conn
->dst
, conn
->dst_type
, conn
->role
);
5773 if (smp_ltk_is_sc(ltk
)) {
5774 /* With SC both EDiv and Rand are set to zero */
5775 if (ev
->ediv
|| ev
->rand
)
5778 /* For non-SC keys check that EDiv and Rand match */
5779 if (ev
->ediv
!= ltk
->ediv
|| ev
->rand
!= ltk
->rand
)
5783 memcpy(cp
.ltk
, ltk
->val
, ltk
->enc_size
);
5784 memset(cp
.ltk
+ ltk
->enc_size
, 0, sizeof(cp
.ltk
) - ltk
->enc_size
);
5785 cp
.handle
= cpu_to_le16(conn
->handle
);
5787 conn
->pending_sec_level
= smp_ltk_sec_level(ltk
);
5789 conn
->enc_key_size
= ltk
->enc_size
;
5791 hci_send_cmd(hdev
, HCI_OP_LE_LTK_REPLY
, sizeof(cp
), &cp
);
5793 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5794 * temporary key used to encrypt a connection following
5795 * pairing. It is used during the Encrypted Session Setup to
5796 * distribute the keys. Later, security can be re-established
5797 * using a distributed LTK.
5799 if (ltk
->type
== SMP_STK
) {
5800 set_bit(HCI_CONN_STK_ENCRYPT
, &conn
->flags
);
5801 list_del_rcu(<k
->list
);
5802 kfree_rcu(ltk
, rcu
);
5804 clear_bit(HCI_CONN_STK_ENCRYPT
, &conn
->flags
);
5807 hci_dev_unlock(hdev
);
5812 neg
.handle
= ev
->handle
;
5813 hci_send_cmd(hdev
, HCI_OP_LE_LTK_NEG_REPLY
, sizeof(neg
), &neg
);
5814 hci_dev_unlock(hdev
);
5817 static void send_conn_param_neg_reply(struct hci_dev
*hdev
, u16 handle
,
5820 struct hci_cp_le_conn_param_req_neg_reply cp
;
5822 cp
.handle
= cpu_to_le16(handle
);
5825 hci_send_cmd(hdev
, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY
, sizeof(cp
),
5829 static void hci_le_remote_conn_param_req_evt(struct hci_dev
*hdev
,
5830 struct sk_buff
*skb
)
5832 struct hci_ev_le_remote_conn_param_req
*ev
= (void *) skb
->data
;
5833 struct hci_cp_le_conn_param_req_reply cp
;
5834 struct hci_conn
*hcon
;
5835 u16 handle
, min
, max
, latency
, timeout
;
5837 handle
= le16_to_cpu(ev
->handle
);
5838 min
= le16_to_cpu(ev
->interval_min
);
5839 max
= le16_to_cpu(ev
->interval_max
);
5840 latency
= le16_to_cpu(ev
->latency
);
5841 timeout
= le16_to_cpu(ev
->timeout
);
5843 hcon
= hci_conn_hash_lookup_handle(hdev
, handle
);
5844 if (!hcon
|| hcon
->state
!= BT_CONNECTED
)
5845 return send_conn_param_neg_reply(hdev
, handle
,
5846 HCI_ERROR_UNKNOWN_CONN_ID
);
5848 if (hci_check_conn_params(min
, max
, latency
, timeout
))
5849 return send_conn_param_neg_reply(hdev
, handle
,
5850 HCI_ERROR_INVALID_LL_PARAMS
);
5852 if (hcon
->role
== HCI_ROLE_MASTER
) {
5853 struct hci_conn_params
*params
;
5858 params
= hci_conn_params_lookup(hdev
, &hcon
->dst
,
5861 params
->conn_min_interval
= min
;
5862 params
->conn_max_interval
= max
;
5863 params
->conn_latency
= latency
;
5864 params
->supervision_timeout
= timeout
;
5870 hci_dev_unlock(hdev
);
5872 mgmt_new_conn_param(hdev
, &hcon
->dst
, hcon
->dst_type
,
5873 store_hint
, min
, max
, latency
, timeout
);
5876 cp
.handle
= ev
->handle
;
5877 cp
.interval_min
= ev
->interval_min
;
5878 cp
.interval_max
= ev
->interval_max
;
5879 cp
.latency
= ev
->latency
;
5880 cp
.timeout
= ev
->timeout
;
5884 hci_send_cmd(hdev
, HCI_OP_LE_CONN_PARAM_REQ_REPLY
, sizeof(cp
), &cp
);
5887 static void hci_le_direct_adv_report_evt(struct hci_dev
*hdev
,
5888 struct sk_buff
*skb
)
5890 u8 num_reports
= skb
->data
[0];
5891 struct hci_ev_le_direct_adv_info
*ev
= (void *)&skb
->data
[1];
5893 if (!num_reports
|| skb
->len
< num_reports
* sizeof(*ev
) + 1)
5898 for (; num_reports
; num_reports
--, ev
++)
5899 process_adv_report(hdev
, ev
->evt_type
, &ev
->bdaddr
,
5900 ev
->bdaddr_type
, &ev
->direct_addr
,
5901 ev
->direct_addr_type
, ev
->rssi
, NULL
, 0,
5904 hci_dev_unlock(hdev
);
5907 static void hci_le_phy_update_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5909 struct hci_ev_le_phy_update_complete
*ev
= (void *) skb
->data
;
5910 struct hci_conn
*conn
;
5912 BT_DBG("%s status 0x%2.2x", hdev
->name
, ev
->status
);
5919 conn
= hci_conn_hash_lookup_handle(hdev
, __le16_to_cpu(ev
->handle
));
5923 conn
->le_tx_phy
= ev
->tx_phy
;
5924 conn
->le_rx_phy
= ev
->rx_phy
;
5927 hci_dev_unlock(hdev
);
5930 static void hci_le_meta_evt(struct hci_dev
*hdev
, struct sk_buff
*skb
)
5932 struct hci_ev_le_meta
*le_ev
= (void *) skb
->data
;
5934 skb_pull(skb
, sizeof(*le_ev
));
5936 switch (le_ev
->subevent
) {
5937 case HCI_EV_LE_CONN_COMPLETE
:
5938 hci_le_conn_complete_evt(hdev
, skb
);
5941 case HCI_EV_LE_CONN_UPDATE_COMPLETE
:
5942 hci_le_conn_update_complete_evt(hdev
, skb
);
5945 case HCI_EV_LE_ADVERTISING_REPORT
:
5946 hci_le_adv_report_evt(hdev
, skb
);
5949 case HCI_EV_LE_REMOTE_FEAT_COMPLETE
:
5950 hci_le_remote_feat_complete_evt(hdev
, skb
);
5953 case HCI_EV_LE_LTK_REQ
:
5954 hci_le_ltk_request_evt(hdev
, skb
);
5957 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ
:
5958 hci_le_remote_conn_param_req_evt(hdev
, skb
);
5961 case HCI_EV_LE_DIRECT_ADV_REPORT
:
5962 hci_le_direct_adv_report_evt(hdev
, skb
);
5965 case HCI_EV_LE_PHY_UPDATE_COMPLETE
:
5966 hci_le_phy_update_evt(hdev
, skb
);
5969 case HCI_EV_LE_EXT_ADV_REPORT
:
5970 hci_le_ext_adv_report_evt(hdev
, skb
);
5973 case HCI_EV_LE_ENHANCED_CONN_COMPLETE
:
5974 hci_le_enh_conn_complete_evt(hdev
, skb
);
5977 case HCI_EV_LE_EXT_ADV_SET_TERM
:
5978 hci_le_ext_adv_term_evt(hdev
, skb
);
5986 static bool hci_get_cmd_complete(struct hci_dev
*hdev
, u16 opcode
,
5987 u8 event
, struct sk_buff
*skb
)
5989 struct hci_ev_cmd_complete
*ev
;
5990 struct hci_event_hdr
*hdr
;
5995 if (skb
->len
< sizeof(*hdr
)) {
5996 bt_dev_err(hdev
, "too short HCI event");
6000 hdr
= (void *) skb
->data
;
6001 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
6004 if (hdr
->evt
!= event
)
6009 /* Check if request ended in Command Status - no way to retreive
6010 * any extra parameters in this case.
6012 if (hdr
->evt
== HCI_EV_CMD_STATUS
)
6015 if (hdr
->evt
!= HCI_EV_CMD_COMPLETE
) {
6016 bt_dev_err(hdev
, "last event is not cmd complete (0x%2.2x)",
6021 if (skb
->len
< sizeof(*ev
)) {
6022 bt_dev_err(hdev
, "too short cmd_complete event");
6026 ev
= (void *) skb
->data
;
6027 skb_pull(skb
, sizeof(*ev
));
6029 if (opcode
!= __le16_to_cpu(ev
->opcode
)) {
6030 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode
,
6031 __le16_to_cpu(ev
->opcode
));
6038 static void hci_store_wake_reason(struct hci_dev
*hdev
, u8 event
,
6039 struct sk_buff
*skb
)
6041 struct hci_ev_le_advertising_info
*adv
;
6042 struct hci_ev_le_direct_adv_info
*direct_adv
;
6043 struct hci_ev_le_ext_adv_report
*ext_adv
;
6044 const struct hci_ev_conn_complete
*conn_complete
= (void *)skb
->data
;
6045 const struct hci_ev_conn_request
*conn_request
= (void *)skb
->data
;
6049 /* If we are currently suspended and this is the first BT event seen,
6050 * save the wake reason associated with the event.
6052 if (!hdev
->suspended
|| hdev
->wake_reason
)
6055 /* Default to remote wake. Values for wake_reason are documented in the
6056 * Bluez mgmt api docs.
6058 hdev
->wake_reason
= MGMT_WAKE_REASON_REMOTE_WAKE
;
6060 /* Once configured for remote wakeup, we should only wake up for
6061 * reconnections. It's useful to see which device is waking us up so
6062 * keep track of the bdaddr of the connection event that woke us up.
6064 if (event
== HCI_EV_CONN_REQUEST
) {
6065 bacpy(&hdev
->wake_addr
, &conn_complete
->bdaddr
);
6066 hdev
->wake_addr_type
= BDADDR_BREDR
;
6067 } else if (event
== HCI_EV_CONN_COMPLETE
) {
6068 bacpy(&hdev
->wake_addr
, &conn_request
->bdaddr
);
6069 hdev
->wake_addr_type
= BDADDR_BREDR
;
6070 } else if (event
== HCI_EV_LE_META
) {
6071 struct hci_ev_le_meta
*le_ev
= (void *)skb
->data
;
6072 u8 subevent
= le_ev
->subevent
;
6073 u8
*ptr
= &skb
->data
[sizeof(*le_ev
)];
6074 u8 num_reports
= *ptr
;
6076 if ((subevent
== HCI_EV_LE_ADVERTISING_REPORT
||
6077 subevent
== HCI_EV_LE_DIRECT_ADV_REPORT
||
6078 subevent
== HCI_EV_LE_EXT_ADV_REPORT
) &&
6080 adv
= (void *)(ptr
+ 1);
6081 direct_adv
= (void *)(ptr
+ 1);
6082 ext_adv
= (void *)(ptr
+ 1);
6085 case HCI_EV_LE_ADVERTISING_REPORT
:
6086 bacpy(&hdev
->wake_addr
, &adv
->bdaddr
);
6087 hdev
->wake_addr_type
= adv
->bdaddr_type
;
6089 case HCI_EV_LE_DIRECT_ADV_REPORT
:
6090 bacpy(&hdev
->wake_addr
, &direct_adv
->bdaddr
);
6091 hdev
->wake_addr_type
= direct_adv
->bdaddr_type
;
6093 case HCI_EV_LE_EXT_ADV_REPORT
:
6094 bacpy(&hdev
->wake_addr
, &ext_adv
->bdaddr
);
6095 hdev
->wake_addr_type
= ext_adv
->bdaddr_type
;
6100 hdev
->wake_reason
= MGMT_WAKE_REASON_UNEXPECTED
;
6104 hci_dev_unlock(hdev
);
6107 void hci_event_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
6109 struct hci_event_hdr
*hdr
= (void *) skb
->data
;
6110 hci_req_complete_t req_complete
= NULL
;
6111 hci_req_complete_skb_t req_complete_skb
= NULL
;
6112 struct sk_buff
*orig_skb
= NULL
;
6113 u8 status
= 0, event
= hdr
->evt
, req_evt
= 0;
6114 u16 opcode
= HCI_OP_NOP
;
6117 bt_dev_warn(hdev
, "Received unexpected HCI Event 00000000");
6121 if (hdev
->sent_cmd
&& bt_cb(hdev
->sent_cmd
)->hci
.req_event
== event
) {
6122 struct hci_command_hdr
*cmd_hdr
= (void *) hdev
->sent_cmd
->data
;
6123 opcode
= __le16_to_cpu(cmd_hdr
->opcode
);
6124 hci_req_cmd_complete(hdev
, opcode
, status
, &req_complete
,
6129 /* If it looks like we might end up having to call
6130 * req_complete_skb, store a pristine copy of the skb since the
6131 * various handlers may modify the original one through
6132 * skb_pull() calls, etc.
6134 if (req_complete_skb
|| event
== HCI_EV_CMD_STATUS
||
6135 event
== HCI_EV_CMD_COMPLETE
)
6136 orig_skb
= skb_clone(skb
, GFP_KERNEL
);
6138 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
6140 /* Store wake reason if we're suspended */
6141 hci_store_wake_reason(hdev
, event
, skb
);
6144 case HCI_EV_INQUIRY_COMPLETE
:
6145 hci_inquiry_complete_evt(hdev
, skb
);
6148 case HCI_EV_INQUIRY_RESULT
:
6149 hci_inquiry_result_evt(hdev
, skb
);
6152 case HCI_EV_CONN_COMPLETE
:
6153 hci_conn_complete_evt(hdev
, skb
);
6156 case HCI_EV_CONN_REQUEST
:
6157 hci_conn_request_evt(hdev
, skb
);
6160 case HCI_EV_DISCONN_COMPLETE
:
6161 hci_disconn_complete_evt(hdev
, skb
);
6164 case HCI_EV_AUTH_COMPLETE
:
6165 hci_auth_complete_evt(hdev
, skb
);
6168 case HCI_EV_REMOTE_NAME
:
6169 hci_remote_name_evt(hdev
, skb
);
6172 case HCI_EV_ENCRYPT_CHANGE
:
6173 hci_encrypt_change_evt(hdev
, skb
);
6176 case HCI_EV_CHANGE_LINK_KEY_COMPLETE
:
6177 hci_change_link_key_complete_evt(hdev
, skb
);
6180 case HCI_EV_REMOTE_FEATURES
:
6181 hci_remote_features_evt(hdev
, skb
);
6184 case HCI_EV_CMD_COMPLETE
:
6185 hci_cmd_complete_evt(hdev
, skb
, &opcode
, &status
,
6186 &req_complete
, &req_complete_skb
);
6189 case HCI_EV_CMD_STATUS
:
6190 hci_cmd_status_evt(hdev
, skb
, &opcode
, &status
, &req_complete
,
6194 case HCI_EV_HARDWARE_ERROR
:
6195 hci_hardware_error_evt(hdev
, skb
);
6198 case HCI_EV_ROLE_CHANGE
:
6199 hci_role_change_evt(hdev
, skb
);
6202 case HCI_EV_NUM_COMP_PKTS
:
6203 hci_num_comp_pkts_evt(hdev
, skb
);
6206 case HCI_EV_MODE_CHANGE
:
6207 hci_mode_change_evt(hdev
, skb
);
6210 case HCI_EV_PIN_CODE_REQ
:
6211 hci_pin_code_request_evt(hdev
, skb
);
6214 case HCI_EV_LINK_KEY_REQ
:
6215 hci_link_key_request_evt(hdev
, skb
);
6218 case HCI_EV_LINK_KEY_NOTIFY
:
6219 hci_link_key_notify_evt(hdev
, skb
);
6222 case HCI_EV_CLOCK_OFFSET
:
6223 hci_clock_offset_evt(hdev
, skb
);
6226 case HCI_EV_PKT_TYPE_CHANGE
:
6227 hci_pkt_type_change_evt(hdev
, skb
);
6230 case HCI_EV_PSCAN_REP_MODE
:
6231 hci_pscan_rep_mode_evt(hdev
, skb
);
6234 case HCI_EV_INQUIRY_RESULT_WITH_RSSI
:
6235 hci_inquiry_result_with_rssi_evt(hdev
, skb
);
6238 case HCI_EV_REMOTE_EXT_FEATURES
:
6239 hci_remote_ext_features_evt(hdev
, skb
);
6242 case HCI_EV_SYNC_CONN_COMPLETE
:
6243 hci_sync_conn_complete_evt(hdev
, skb
);
6246 case HCI_EV_EXTENDED_INQUIRY_RESULT
:
6247 hci_extended_inquiry_result_evt(hdev
, skb
);
6250 case HCI_EV_KEY_REFRESH_COMPLETE
:
6251 hci_key_refresh_complete_evt(hdev
, skb
);
6254 case HCI_EV_IO_CAPA_REQUEST
:
6255 hci_io_capa_request_evt(hdev
, skb
);
6258 case HCI_EV_IO_CAPA_REPLY
:
6259 hci_io_capa_reply_evt(hdev
, skb
);
6262 case HCI_EV_USER_CONFIRM_REQUEST
:
6263 hci_user_confirm_request_evt(hdev
, skb
);
6266 case HCI_EV_USER_PASSKEY_REQUEST
:
6267 hci_user_passkey_request_evt(hdev
, skb
);
6270 case HCI_EV_USER_PASSKEY_NOTIFY
:
6271 hci_user_passkey_notify_evt(hdev
, skb
);
6274 case HCI_EV_KEYPRESS_NOTIFY
:
6275 hci_keypress_notify_evt(hdev
, skb
);
6278 case HCI_EV_SIMPLE_PAIR_COMPLETE
:
6279 hci_simple_pair_complete_evt(hdev
, skb
);
6282 case HCI_EV_REMOTE_HOST_FEATURES
:
6283 hci_remote_host_features_evt(hdev
, skb
);
6286 case HCI_EV_LE_META
:
6287 hci_le_meta_evt(hdev
, skb
);
6290 case HCI_EV_REMOTE_OOB_DATA_REQUEST
:
6291 hci_remote_oob_data_request_evt(hdev
, skb
);
6294 #if IS_ENABLED(CONFIG_BT_HS)
6295 case HCI_EV_CHANNEL_SELECTED
:
6296 hci_chan_selected_evt(hdev
, skb
);
6299 case HCI_EV_PHY_LINK_COMPLETE
:
6300 hci_phy_link_complete_evt(hdev
, skb
);
6303 case HCI_EV_LOGICAL_LINK_COMPLETE
:
6304 hci_loglink_complete_evt(hdev
, skb
);
6307 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE
:
6308 hci_disconn_loglink_complete_evt(hdev
, skb
);
6311 case HCI_EV_DISCONN_PHY_LINK_COMPLETE
:
6312 hci_disconn_phylink_complete_evt(hdev
, skb
);
6316 case HCI_EV_NUM_COMP_BLOCKS
:
6317 hci_num_comp_blocks_evt(hdev
, skb
);
6321 msft_vendor_evt(hdev
, skb
);
6325 BT_DBG("%s event 0x%2.2x", hdev
->name
, event
);
6330 req_complete(hdev
, status
, opcode
);
6331 } else if (req_complete_skb
) {
6332 if (!hci_get_cmd_complete(hdev
, opcode
, req_evt
, orig_skb
)) {
6333 kfree_skb(orig_skb
);
6336 req_complete_skb(hdev
, status
, opcode
, orig_skb
);
6340 kfree_skb(orig_skb
);
6342 hdev
->stat
.evt_rx
++;