ixgbevf: remove ndo_poll_controller
[linux/fpc-iii.git] / net / bluetooth / hci_event.c
blobf12555f23a49a025563a6f11f174a0be71556a75
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
50 if (status)
51 return;
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
57 hci_dev_lock(hdev);
58 /* Set discovery state to stopped if we're not doing LE active
59 * scanning.
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 hci_dev_unlock(hdev);
66 hci_conn_check_pending(hdev);
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 if (status)
76 return;
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 __u8 status = *((__u8 *) skb->data);
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
87 if (status)
88 return;
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 hci_conn_check_pending(hdev);
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 struct sk_buff *skb)
98 BT_DBG("%s", hdev->name);
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
108 if (rp->status)
109 return;
111 hci_dev_lock(hdev);
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 if (conn)
115 conn->role = rp->role;
117 hci_dev_unlock(hdev);
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
127 if (rp->status)
128 return;
130 hci_dev_lock(hdev);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 if (conn)
134 conn->link_policy = __le16_to_cpu(rp->policy);
136 hci_dev_unlock(hdev);
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
143 void *sent;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 if (rp->status)
148 return;
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 if (!sent)
152 return;
154 hci_dev_lock(hdev);
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 if (conn)
158 conn->link_policy = get_unaligned_le16(sent + 2);
160 hci_dev_unlock(hdev);
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 struct sk_buff *skb)
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
170 if (rp->status)
171 return;
173 hdev->link_policy = __le16_to_cpu(rp->policy);
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 struct sk_buff *skb)
179 __u8 status = *((__u8 *) skb->data);
180 void *sent;
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
184 if (status)
185 return;
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 if (!sent)
189 return;
191 hdev->link_policy = get_unaligned_le16(sent);
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 __u8 status = *((__u8 *) skb->data);
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 clear_bit(HCI_RESET, &hdev->flags);
202 if (status)
203 return;
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
221 hdev->ssp_debug_mode = 0;
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 struct sk_buff *skb)
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 if (!sent)
237 return;
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
252 if (rp->status)
253 return;
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
257 else
258 hdev->stored_num_keys = 0;
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
264 void *sent;
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 if (!sent)
270 return;
272 hci_dev_lock(hdev);
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
276 else if (!status)
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 hci_dev_unlock(hdev);
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
288 if (rp->status)
289 return;
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
299 void *sent;
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 if (!sent)
305 return;
307 hci_dev_lock(hdev);
309 if (!status) {
310 __u8 param = *((__u8 *) sent);
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
314 else
315 clear_bit(HCI_AUTH, &hdev->flags);
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
321 hci_dev_unlock(hdev);
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 __u8 status = *((__u8 *) skb->data);
327 __u8 param;
328 void *sent;
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
332 if (status)
333 return;
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 if (!sent)
337 return;
339 param = *((__u8 *) sent);
341 if (param)
342 set_bit(HCI_ENCRYPT, &hdev->flags);
343 else
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 __u8 status = *((__u8 *) skb->data);
350 __u8 param;
351 void *sent;
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 if (!sent)
357 return;
359 param = *((__u8 *) sent);
361 hci_dev_lock(hdev);
363 if (status) {
364 hdev->discov_timeout = 0;
365 goto done;
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
370 else
371 clear_bit(HCI_ISCAN, &hdev->flags);
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
375 else
376 clear_bit(HCI_PSCAN, &hdev->flags);
378 done:
379 hci_dev_unlock(hdev);
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
388 if (rp->status)
389 return;
391 memcpy(hdev->dev_class, rp->dev_class, 3);
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
400 void *sent;
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 if (!sent)
406 return;
408 hci_dev_lock(hdev);
410 if (status == 0)
411 memcpy(hdev->dev_class, sent, 3);
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
416 hci_dev_unlock(hdev);
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 __u16 setting;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426 if (rp->status)
427 return;
429 setting = __le16_to_cpu(rp->voice_setting);
431 if (hdev->voice_setting == setting)
432 return;
434 hdev->voice_setting = setting;
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
438 if (hdev->notify)
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 struct sk_buff *skb)
445 __u8 status = *((__u8 *) skb->data);
446 __u16 setting;
447 void *sent;
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
451 if (status)
452 return;
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 if (!sent)
456 return;
458 setting = get_unaligned_le16(sent);
460 if (hdev->voice_setting == setting)
461 return;
463 hdev->voice_setting = setting;
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
467 if (hdev->notify)
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 struct sk_buff *skb)
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
478 if (rp->status)
479 return;
481 hdev->num_iac = rp->num_iac;
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 if (!sent)
495 return;
497 hci_dev_lock(hdev);
499 if (!status) {
500 if (sent->mode)
501 hdev->features[1][0] |= LMP_HOST_SSP;
502 else
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 else if (!status) {
509 if (sent->mode)
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 else
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
515 hci_dev_unlock(hdev);
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 if (!sent)
527 return;
529 hci_dev_lock(hdev);
531 if (!status) {
532 if (sent->support)
533 hdev->features[1][0] |= LMP_HOST_SC;
534 else
535 hdev->features[1][0] &= ~LMP_HOST_SC;
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 if (sent->support)
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 else
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
545 hci_dev_unlock(hdev);
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
554 if (rp->status)
555 return;
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 struct sk_buff *skb)
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574 if (rp->status)
575 return;
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
582 static void hci_cc_read_local_features(struct hci_dev *hdev,
583 struct sk_buff *skb)
585 struct hci_rp_read_local_features *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589 if (rp->status)
590 return;
592 memcpy(hdev->features, rp->features, 8);
594 /* Adjust default settings according to features
595 * supported by device. */
597 if (hdev->features[0][0] & LMP_3SLOT)
598 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
600 if (hdev->features[0][0] & LMP_5SLOT)
601 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
603 if (hdev->features[0][1] & LMP_HV2) {
604 hdev->pkt_type |= (HCI_HV2);
605 hdev->esco_type |= (ESCO_HV2);
608 if (hdev->features[0][1] & LMP_HV3) {
609 hdev->pkt_type |= (HCI_HV3);
610 hdev->esco_type |= (ESCO_HV3);
613 if (lmp_esco_capable(hdev))
614 hdev->esco_type |= (ESCO_EV3);
616 if (hdev->features[0][4] & LMP_EV4)
617 hdev->esco_type |= (ESCO_EV4);
619 if (hdev->features[0][4] & LMP_EV5)
620 hdev->esco_type |= (ESCO_EV5);
622 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 hdev->esco_type |= (ESCO_2EV3);
625 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 hdev->esco_type |= (ESCO_3EV3);
628 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
633 struct sk_buff *skb)
635 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
639 if (rp->status)
640 return;
642 if (hdev->max_page < rp->max_page)
643 hdev->max_page = rp->max_page;
645 if (rp->page < HCI_MAX_PAGES)
646 memcpy(hdev->features[rp->page], rp->features, 8);
649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
650 struct sk_buff *skb)
652 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
654 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
656 if (rp->status)
657 return;
659 hdev->flow_ctl_mode = rp->mode;
662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
664 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
668 if (rp->status)
669 return;
671 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
672 hdev->sco_mtu = rp->sco_mtu;
673 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
676 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
677 hdev->sco_mtu = 64;
678 hdev->sco_pkts = 8;
681 hdev->acl_cnt = hdev->acl_pkts;
682 hdev->sco_cnt = hdev->sco_pkts;
684 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
690 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
692 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
694 if (rp->status)
695 return;
697 if (test_bit(HCI_INIT, &hdev->flags))
698 bacpy(&hdev->bdaddr, &rp->bdaddr);
700 if (hci_dev_test_flag(hdev, HCI_SETUP))
701 bacpy(&hdev->setup_addr, &rp->bdaddr);
704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
705 struct sk_buff *skb)
707 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711 if (rp->status)
712 return;
714 if (test_bit(HCI_INIT, &hdev->flags)) {
715 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 hdev->page_scan_window = __le16_to_cpu(rp->window);
720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
721 struct sk_buff *skb)
723 u8 status = *((u8 *) skb->data);
724 struct hci_cp_write_page_scan_activity *sent;
726 BT_DBG("%s status 0x%2.2x", hdev->name, status);
728 if (status)
729 return;
731 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
732 if (!sent)
733 return;
735 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 hdev->page_scan_window = __le16_to_cpu(sent->window);
739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
740 struct sk_buff *skb)
742 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
746 if (rp->status)
747 return;
749 if (test_bit(HCI_INIT, &hdev->flags))
750 hdev->page_scan_type = rp->type;
753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
754 struct sk_buff *skb)
756 u8 status = *((u8 *) skb->data);
757 u8 *type;
759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
761 if (status)
762 return;
764 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
765 if (type)
766 hdev->page_scan_type = *type;
769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
770 struct sk_buff *skb)
772 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
776 if (rp->status)
777 return;
779 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 hdev->block_len = __le16_to_cpu(rp->block_len);
781 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
783 hdev->block_cnt = hdev->num_blocks;
785 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 hdev->block_cnt, hdev->block_len);
789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_clock *rp = (void *) skb->data;
792 struct hci_cp_read_clock *cp;
793 struct hci_conn *conn;
795 BT_DBG("%s", hdev->name);
797 if (skb->len < sizeof(*rp))
798 return;
800 if (rp->status)
801 return;
803 hci_dev_lock(hdev);
805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
806 if (!cp)
807 goto unlock;
809 if (cp->which == 0x00) {
810 hdev->clock = le32_to_cpu(rp->clock);
811 goto unlock;
814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 if (conn) {
816 conn->clock = le32_to_cpu(rp->clock);
817 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
820 unlock:
821 hci_dev_unlock(hdev);
824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
825 struct sk_buff *skb)
827 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
831 if (rp->status)
832 return;
834 hdev->amp_status = rp->amp_status;
835 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 hdev->amp_type = rp->amp_type;
840 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
847 struct sk_buff *skb)
849 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853 if (rp->status)
854 return;
856 hdev->inq_tx_power = rp->tx_power;
859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
861 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 struct hci_cp_pin_code_reply *cp;
863 struct hci_conn *conn;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
867 hci_dev_lock(hdev);
869 if (hci_dev_test_flag(hdev, HCI_MGMT))
870 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
872 if (rp->status)
873 goto unlock;
875 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
876 if (!cp)
877 goto unlock;
879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
880 if (conn)
881 conn->pin_length = cp->pin_len;
883 unlock:
884 hci_dev_unlock(hdev);
887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
889 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
893 hci_dev_lock(hdev);
895 if (hci_dev_test_flag(hdev, HCI_MGMT))
896 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
897 rp->status);
899 hci_dev_unlock(hdev);
902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
903 struct sk_buff *skb)
905 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909 if (rp->status)
910 return;
912 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 hdev->le_pkts = rp->le_max_pkt;
915 hdev->le_cnt = hdev->le_pkts;
917 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
921 struct sk_buff *skb)
923 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927 if (rp->status)
928 return;
930 memcpy(hdev->le_features, rp->features, 8);
933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
934 struct sk_buff *skb)
936 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
940 if (rp->status)
941 return;
943 hdev->adv_tx_power = rp->tx_power;
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952 hci_dev_lock(hdev);
954 if (hci_dev_test_flag(hdev, HCI_MGMT))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 rp->status);
958 hci_dev_unlock(hdev);
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 hci_dev_lock(hdev);
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
974 hci_dev_unlock(hdev);
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983 hci_dev_lock(hdev);
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 0, rp->status);
989 hci_dev_unlock(hdev);
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct sk_buff *skb)
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 hci_dev_lock(hdev);
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1005 hci_dev_unlock(hdev);
1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1026 __u8 status = *((__u8 *) skb->data);
1027 bdaddr_t *sent;
1029 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1031 if (status)
1032 return;
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1035 if (!sent)
1036 return;
1038 hci_dev_lock(hdev);
1040 bacpy(&hdev->random_addr, sent);
1042 hci_dev_unlock(hdev);
1045 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1047 __u8 status = *((__u8 *) skb->data);
1048 struct hci_cp_le_set_default_phy *cp;
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1052 if (status)
1053 return;
1055 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1056 if (!cp)
1057 return;
1059 hci_dev_lock(hdev);
1061 hdev->le_tx_def_phys = cp->tx_phys;
1062 hdev->le_rx_def_phys = cp->rx_phys;
1064 hci_dev_unlock(hdev);
1067 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 __u8 status = *((__u8 *) skb->data);
1071 struct hci_cp_le_set_adv_set_rand_addr *cp;
1072 struct adv_info *adv_instance;
1074 if (status)
1075 return;
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1078 if (!cp)
1079 return;
1081 hci_dev_lock(hdev);
1083 if (!hdev->cur_adv_instance) {
1084 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1085 bacpy(&hdev->random_addr, &cp->bdaddr);
1086 } else {
1087 adv_instance = hci_find_adv_instance(hdev,
1088 hdev->cur_adv_instance);
1089 if (adv_instance)
1090 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1093 hci_dev_unlock(hdev);
1096 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1098 __u8 *sent, status = *((__u8 *) skb->data);
1100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1102 if (status)
1103 return;
1105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1106 if (!sent)
1107 return;
1109 hci_dev_lock(hdev);
1111 /* If we're doing connection initiation as peripheral. Set a
1112 * timeout in case something goes wrong.
1114 if (*sent) {
1115 struct hci_conn *conn;
1117 hci_dev_set_flag(hdev, HCI_LE_ADV);
1119 conn = hci_lookup_le_connect(hdev);
1120 if (conn)
1121 queue_delayed_work(hdev->workqueue,
1122 &conn->le_conn_timeout,
1123 conn->conn_timeout);
1124 } else {
1125 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1128 hci_dev_unlock(hdev);
1131 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1134 struct hci_cp_le_set_ext_adv_enable *cp;
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139 if (status)
1140 return;
1142 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1143 if (!cp)
1144 return;
1146 hci_dev_lock(hdev);
1148 if (cp->enable) {
1149 struct hci_conn *conn;
1151 hci_dev_set_flag(hdev, HCI_LE_ADV);
1153 conn = hci_lookup_le_connect(hdev);
1154 if (conn)
1155 queue_delayed_work(hdev->workqueue,
1156 &conn->le_conn_timeout,
1157 conn->conn_timeout);
1158 } else {
1159 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1162 hci_dev_unlock(hdev);
1165 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1167 struct hci_cp_le_set_scan_param *cp;
1168 __u8 status = *((__u8 *) skb->data);
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1172 if (status)
1173 return;
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1176 if (!cp)
1177 return;
1179 hci_dev_lock(hdev);
1181 hdev->le_scan_type = cp->type;
1183 hci_dev_unlock(hdev);
1186 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_le_set_ext_scan_params *cp;
1190 __u8 status = *((__u8 *) skb->data);
1191 struct hci_cp_le_scan_phy_params *phy_param;
1193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1195 if (status)
1196 return;
1198 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1199 if (!cp)
1200 return;
1202 phy_param = (void *)cp->data;
1204 hci_dev_lock(hdev);
1206 hdev->le_scan_type = phy_param->type;
1208 hci_dev_unlock(hdev);
1211 static bool has_pending_adv_report(struct hci_dev *hdev)
1213 struct discovery_state *d = &hdev->discovery;
1215 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1218 static void clear_pending_adv_report(struct hci_dev *hdev)
1220 struct discovery_state *d = &hdev->discovery;
1222 bacpy(&d->last_adv_addr, BDADDR_ANY);
1223 d->last_adv_data_len = 0;
1226 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 bdaddr_type, s8 rssi, u32 flags,
1228 u8 *data, u8 len)
1230 struct discovery_state *d = &hdev->discovery;
1232 bacpy(&d->last_adv_addr, bdaddr);
1233 d->last_adv_addr_type = bdaddr_type;
1234 d->last_adv_rssi = rssi;
1235 d->last_adv_flags = flags;
1236 memcpy(d->last_adv_data, data, len);
1237 d->last_adv_data_len = len;
1240 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1242 hci_dev_lock(hdev);
1244 switch (enable) {
1245 case LE_SCAN_ENABLE:
1246 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1247 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1248 clear_pending_adv_report(hdev);
1249 break;
1251 case LE_SCAN_DISABLE:
1252 /* We do this here instead of when setting DISCOVERY_STOPPED
1253 * since the latter would potentially require waiting for
1254 * inquiry to stop too.
1256 if (has_pending_adv_report(hdev)) {
1257 struct discovery_state *d = &hdev->discovery;
1259 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1260 d->last_adv_addr_type, NULL,
1261 d->last_adv_rssi, d->last_adv_flags,
1262 d->last_adv_data,
1263 d->last_adv_data_len, NULL, 0);
1266 /* Cancel this timer so that we don't try to disable scanning
1267 * when it's already disabled.
1269 cancel_delayed_work(&hdev->le_scan_disable);
1271 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1273 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1274 * interrupted scanning due to a connect request. Mark
1275 * therefore discovery as stopped. If this was not
1276 * because of a connect request advertising might have
1277 * been disabled because of active scanning, so
1278 * re-enable it again if necessary.
1280 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1282 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1283 hdev->discovery.state == DISCOVERY_FINDING)
1284 hci_req_reenable_advertising(hdev);
1286 break;
1288 default:
1289 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1290 enable);
1291 break;
1294 hci_dev_unlock(hdev);
1297 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1300 struct hci_cp_le_set_scan_enable *cp;
1301 __u8 status = *((__u8 *) skb->data);
1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305 if (status)
1306 return;
1308 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1309 if (!cp)
1310 return;
1312 le_set_scan_enable_complete(hdev, cp->enable);
1315 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1318 struct hci_cp_le_set_ext_scan_enable *cp;
1319 __u8 status = *((__u8 *) skb->data);
1321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1323 if (status)
1324 return;
1326 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1327 if (!cp)
1328 return;
1330 le_set_scan_enable_complete(hdev, cp->enable);
1333 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1334 struct sk_buff *skb)
1336 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1338 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1339 rp->num_of_sets);
1341 if (rp->status)
1342 return;
1344 hdev->le_num_of_adv_sets = rp->num_of_sets;
1347 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1348 struct sk_buff *skb)
1350 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1352 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1354 if (rp->status)
1355 return;
1357 hdev->le_white_list_size = rp->size;
1360 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1361 struct sk_buff *skb)
1363 __u8 status = *((__u8 *) skb->data);
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1367 if (status)
1368 return;
1370 hci_bdaddr_list_clear(&hdev->le_white_list);
1373 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1376 struct hci_cp_le_add_to_white_list *sent;
1377 __u8 status = *((__u8 *) skb->data);
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1381 if (status)
1382 return;
1384 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1385 if (!sent)
1386 return;
1388 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1389 sent->bdaddr_type);
1392 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1393 struct sk_buff *skb)
1395 struct hci_cp_le_del_from_white_list *sent;
1396 __u8 status = *((__u8 *) skb->data);
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400 if (status)
1401 return;
1403 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1404 if (!sent)
1405 return;
1407 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1408 sent->bdaddr_type);
1411 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1412 struct sk_buff *skb)
1414 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1416 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1418 if (rp->status)
1419 return;
1421 memcpy(hdev->le_states, rp->le_states, 8);
1424 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1425 struct sk_buff *skb)
1427 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1429 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1431 if (rp->status)
1432 return;
1434 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1435 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1438 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1439 struct sk_buff *skb)
1441 struct hci_cp_le_write_def_data_len *sent;
1442 __u8 status = *((__u8 *) skb->data);
1444 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1446 if (status)
1447 return;
1449 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1450 if (!sent)
1451 return;
1453 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1454 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1457 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1458 struct sk_buff *skb)
1460 __u8 status = *((__u8 *) skb->data);
1462 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1464 if (status)
1465 return;
1467 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1470 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1471 struct sk_buff *skb)
1473 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1475 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1477 if (rp->status)
1478 return;
1480 hdev->le_resolv_list_size = rp->size;
1483 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1484 struct sk_buff *skb)
1486 __u8 *sent, status = *((__u8 *) skb->data);
1488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1490 if (status)
1491 return;
1493 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1494 if (!sent)
1495 return;
1497 hci_dev_lock(hdev);
1499 if (*sent)
1500 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1501 else
1502 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1504 hci_dev_unlock(hdev);
1507 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1508 struct sk_buff *skb)
1510 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1512 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1514 if (rp->status)
1515 return;
1517 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1518 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1519 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1520 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1523 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1524 struct sk_buff *skb)
1526 struct hci_cp_write_le_host_supported *sent;
1527 __u8 status = *((__u8 *) skb->data);
1529 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1531 if (status)
1532 return;
1534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1535 if (!sent)
1536 return;
1538 hci_dev_lock(hdev);
1540 if (sent->le) {
1541 hdev->features[1][0] |= LMP_HOST_LE;
1542 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1543 } else {
1544 hdev->features[1][0] &= ~LMP_HOST_LE;
1545 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1549 if (sent->simul)
1550 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1551 else
1552 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1554 hci_dev_unlock(hdev);
1557 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1559 struct hci_cp_le_set_adv_param *cp;
1560 u8 status = *((u8 *) skb->data);
1562 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1564 if (status)
1565 return;
1567 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1568 if (!cp)
1569 return;
1571 hci_dev_lock(hdev);
1572 hdev->adv_addr_type = cp->own_address_type;
1573 hci_dev_unlock(hdev);
1576 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1578 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1579 struct hci_cp_le_set_ext_adv_params *cp;
1580 struct adv_info *adv_instance;
1582 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1584 if (rp->status)
1585 return;
1587 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1588 if (!cp)
1589 return;
1591 hci_dev_lock(hdev);
1592 hdev->adv_addr_type = cp->own_addr_type;
1593 if (!hdev->cur_adv_instance) {
1594 /* Store in hdev for instance 0 */
1595 hdev->adv_tx_power = rp->tx_power;
1596 } else {
1597 adv_instance = hci_find_adv_instance(hdev,
1598 hdev->cur_adv_instance);
1599 if (adv_instance)
1600 adv_instance->tx_power = rp->tx_power;
1602 /* Update adv data as tx power is known now */
1603 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1604 hci_dev_unlock(hdev);
1607 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1609 struct hci_rp_read_rssi *rp = (void *) skb->data;
1610 struct hci_conn *conn;
1612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1614 if (rp->status)
1615 return;
1617 hci_dev_lock(hdev);
1619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1620 if (conn)
1621 conn->rssi = rp->rssi;
1623 hci_dev_unlock(hdev);
1626 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1628 struct hci_cp_read_tx_power *sent;
1629 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1630 struct hci_conn *conn;
1632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1634 if (rp->status)
1635 return;
1637 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1638 if (!sent)
1639 return;
1641 hci_dev_lock(hdev);
1643 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1644 if (!conn)
1645 goto unlock;
1647 switch (sent->type) {
1648 case 0x00:
1649 conn->tx_power = rp->tx_power;
1650 break;
1651 case 0x01:
1652 conn->max_tx_power = rp->tx_power;
1653 break;
1656 unlock:
1657 hci_dev_unlock(hdev);
1660 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1662 u8 status = *((u8 *) skb->data);
1663 u8 *mode;
1665 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1667 if (status)
1668 return;
1670 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1671 if (mode)
1672 hdev->ssp_debug_mode = *mode;
1675 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1679 if (status) {
1680 hci_conn_check_pending(hdev);
1681 return;
1684 set_bit(HCI_INQUIRY, &hdev->flags);
1687 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1689 struct hci_cp_create_conn *cp;
1690 struct hci_conn *conn;
1692 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1694 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1695 if (!cp)
1696 return;
1698 hci_dev_lock(hdev);
1700 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1702 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1704 if (status) {
1705 if (conn && conn->state == BT_CONNECT) {
1706 if (status != 0x0c || conn->attempt > 2) {
1707 conn->state = BT_CLOSED;
1708 hci_connect_cfm(conn, status);
1709 hci_conn_del(conn);
1710 } else
1711 conn->state = BT_CONNECT2;
1713 } else {
1714 if (!conn) {
1715 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1716 HCI_ROLE_MASTER);
1717 if (!conn)
1718 bt_dev_err(hdev, "no memory for new connection");
1722 hci_dev_unlock(hdev);
1725 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1727 struct hci_cp_add_sco *cp;
1728 struct hci_conn *acl, *sco;
1729 __u16 handle;
1731 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1733 if (!status)
1734 return;
1736 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1737 if (!cp)
1738 return;
1740 handle = __le16_to_cpu(cp->handle);
1742 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1744 hci_dev_lock(hdev);
1746 acl = hci_conn_hash_lookup_handle(hdev, handle);
1747 if (acl) {
1748 sco = acl->link;
1749 if (sco) {
1750 sco->state = BT_CLOSED;
1752 hci_connect_cfm(sco, status);
1753 hci_conn_del(sco);
1757 hci_dev_unlock(hdev);
1760 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1762 struct hci_cp_auth_requested *cp;
1763 struct hci_conn *conn;
1765 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1767 if (!status)
1768 return;
1770 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1771 if (!cp)
1772 return;
1774 hci_dev_lock(hdev);
1776 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1777 if (conn) {
1778 if (conn->state == BT_CONFIG) {
1779 hci_connect_cfm(conn, status);
1780 hci_conn_drop(conn);
1784 hci_dev_unlock(hdev);
1787 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1789 struct hci_cp_set_conn_encrypt *cp;
1790 struct hci_conn *conn;
1792 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1794 if (!status)
1795 return;
1797 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1798 if (!cp)
1799 return;
1801 hci_dev_lock(hdev);
1803 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1804 if (conn) {
1805 if (conn->state == BT_CONFIG) {
1806 hci_connect_cfm(conn, status);
1807 hci_conn_drop(conn);
1811 hci_dev_unlock(hdev);
1814 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1815 struct hci_conn *conn)
1817 if (conn->state != BT_CONFIG || !conn->out)
1818 return 0;
1820 if (conn->pending_sec_level == BT_SECURITY_SDP)
1821 return 0;
1823 /* Only request authentication for SSP connections or non-SSP
1824 * devices with sec_level MEDIUM or HIGH or if MITM protection
1825 * is requested.
1827 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1828 conn->pending_sec_level != BT_SECURITY_FIPS &&
1829 conn->pending_sec_level != BT_SECURITY_HIGH &&
1830 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1831 return 0;
1833 return 1;
1836 static int hci_resolve_name(struct hci_dev *hdev,
1837 struct inquiry_entry *e)
1839 struct hci_cp_remote_name_req cp;
1841 memset(&cp, 0, sizeof(cp));
1843 bacpy(&cp.bdaddr, &e->data.bdaddr);
1844 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1845 cp.pscan_mode = e->data.pscan_mode;
1846 cp.clock_offset = e->data.clock_offset;
1848 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1851 static bool hci_resolve_next_name(struct hci_dev *hdev)
1853 struct discovery_state *discov = &hdev->discovery;
1854 struct inquiry_entry *e;
1856 if (list_empty(&discov->resolve))
1857 return false;
1859 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1860 if (!e)
1861 return false;
1863 if (hci_resolve_name(hdev, e) == 0) {
1864 e->name_state = NAME_PENDING;
1865 return true;
1868 return false;
1871 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1872 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1874 struct discovery_state *discov = &hdev->discovery;
1875 struct inquiry_entry *e;
1877 /* Update the mgmt connected state if necessary. Be careful with
1878 * conn objects that exist but are not (yet) connected however.
1879 * Only those in BT_CONFIG or BT_CONNECTED states can be
1880 * considered connected.
1882 if (conn &&
1883 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1884 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1885 mgmt_device_connected(hdev, conn, 0, name, name_len);
1887 if (discov->state == DISCOVERY_STOPPED)
1888 return;
1890 if (discov->state == DISCOVERY_STOPPING)
1891 goto discov_complete;
1893 if (discov->state != DISCOVERY_RESOLVING)
1894 return;
1896 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1897 /* If the device was not found in a list of found devices names of which
1898 * are pending. there is no need to continue resolving a next name as it
1899 * will be done upon receiving another Remote Name Request Complete
1900 * Event */
1901 if (!e)
1902 return;
1904 list_del(&e->list);
1905 if (name) {
1906 e->name_state = NAME_KNOWN;
1907 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1908 e->data.rssi, name, name_len);
1909 } else {
1910 e->name_state = NAME_NOT_KNOWN;
1913 if (hci_resolve_next_name(hdev))
1914 return;
1916 discov_complete:
1917 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1920 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1922 struct hci_cp_remote_name_req *cp;
1923 struct hci_conn *conn;
1925 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1927 /* If successful wait for the name req complete event before
1928 * checking for the need to do authentication */
1929 if (!status)
1930 return;
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1933 if (!cp)
1934 return;
1936 hci_dev_lock(hdev);
1938 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1940 if (hci_dev_test_flag(hdev, HCI_MGMT))
1941 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1943 if (!conn)
1944 goto unlock;
1946 if (!hci_outgoing_auth_needed(hdev, conn))
1947 goto unlock;
1949 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1950 struct hci_cp_auth_requested auth_cp;
1952 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1954 auth_cp.handle = __cpu_to_le16(conn->handle);
1955 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1956 sizeof(auth_cp), &auth_cp);
1959 unlock:
1960 hci_dev_unlock(hdev);
1963 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1965 struct hci_cp_read_remote_features *cp;
1966 struct hci_conn *conn;
1968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1970 if (!status)
1971 return;
1973 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1974 if (!cp)
1975 return;
1977 hci_dev_lock(hdev);
1979 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1980 if (conn) {
1981 if (conn->state == BT_CONFIG) {
1982 hci_connect_cfm(conn, status);
1983 hci_conn_drop(conn);
1987 hci_dev_unlock(hdev);
1990 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1992 struct hci_cp_read_remote_ext_features *cp;
1993 struct hci_conn *conn;
1995 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1997 if (!status)
1998 return;
2000 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2001 if (!cp)
2002 return;
2004 hci_dev_lock(hdev);
2006 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2007 if (conn) {
2008 if (conn->state == BT_CONFIG) {
2009 hci_connect_cfm(conn, status);
2010 hci_conn_drop(conn);
2014 hci_dev_unlock(hdev);
2017 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2019 struct hci_cp_setup_sync_conn *cp;
2020 struct hci_conn *acl, *sco;
2021 __u16 handle;
2023 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2025 if (!status)
2026 return;
2028 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2029 if (!cp)
2030 return;
2032 handle = __le16_to_cpu(cp->handle);
2034 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2036 hci_dev_lock(hdev);
2038 acl = hci_conn_hash_lookup_handle(hdev, handle);
2039 if (acl) {
2040 sco = acl->link;
2041 if (sco) {
2042 sco->state = BT_CLOSED;
2044 hci_connect_cfm(sco, status);
2045 hci_conn_del(sco);
2049 hci_dev_unlock(hdev);
2052 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2054 struct hci_cp_sniff_mode *cp;
2055 struct hci_conn *conn;
2057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2059 if (!status)
2060 return;
2062 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2063 if (!cp)
2064 return;
2066 hci_dev_lock(hdev);
2068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2069 if (conn) {
2070 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2072 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2073 hci_sco_setup(conn, status);
2076 hci_dev_unlock(hdev);
2079 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2081 struct hci_cp_exit_sniff_mode *cp;
2082 struct hci_conn *conn;
2084 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2086 if (!status)
2087 return;
2089 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2090 if (!cp)
2091 return;
2093 hci_dev_lock(hdev);
2095 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2096 if (conn) {
2097 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2099 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2100 hci_sco_setup(conn, status);
2103 hci_dev_unlock(hdev);
2106 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2108 struct hci_cp_disconnect *cp;
2109 struct hci_conn *conn;
2111 if (!status)
2112 return;
2114 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2115 if (!cp)
2116 return;
2118 hci_dev_lock(hdev);
2120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2121 if (conn)
2122 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2123 conn->dst_type, status);
2125 hci_dev_unlock(hdev);
2128 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2129 u8 peer_addr_type, u8 own_address_type,
2130 u8 filter_policy)
2132 struct hci_conn *conn;
2134 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2135 peer_addr_type);
2136 if (!conn)
2137 return;
2139 /* Store the initiator and responder address information which
2140 * is needed for SMP. These values will not change during the
2141 * lifetime of the connection.
2143 conn->init_addr_type = own_address_type;
2144 if (own_address_type == ADDR_LE_DEV_RANDOM)
2145 bacpy(&conn->init_addr, &hdev->random_addr);
2146 else
2147 bacpy(&conn->init_addr, &hdev->bdaddr);
2149 conn->resp_addr_type = peer_addr_type;
2150 bacpy(&conn->resp_addr, peer_addr);
2152 /* We don't want the connection attempt to stick around
2153 * indefinitely since LE doesn't have a page timeout concept
2154 * like BR/EDR. Set a timer for any connection that doesn't use
2155 * the white list for connecting.
2157 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2158 queue_delayed_work(conn->hdev->workqueue,
2159 &conn->le_conn_timeout,
2160 conn->conn_timeout);
2163 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2165 struct hci_cp_le_create_conn *cp;
2167 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2169 /* All connection failure handling is taken care of by the
2170 * hci_le_conn_failed function which is triggered by the HCI
2171 * request completion callbacks used for connecting.
2173 if (status)
2174 return;
2176 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2177 if (!cp)
2178 return;
2180 hci_dev_lock(hdev);
2182 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2183 cp->own_address_type, cp->filter_policy);
2185 hci_dev_unlock(hdev);
2188 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2190 struct hci_cp_le_ext_create_conn *cp;
2192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2194 /* All connection failure handling is taken care of by the
2195 * hci_le_conn_failed function which is triggered by the HCI
2196 * request completion callbacks used for connecting.
2198 if (status)
2199 return;
2201 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2202 if (!cp)
2203 return;
2205 hci_dev_lock(hdev);
2207 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2208 cp->own_addr_type, cp->filter_policy);
2210 hci_dev_unlock(hdev);
2213 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2215 struct hci_cp_le_read_remote_features *cp;
2216 struct hci_conn *conn;
2218 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2220 if (!status)
2221 return;
2223 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2224 if (!cp)
2225 return;
2227 hci_dev_lock(hdev);
2229 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2230 if (conn) {
2231 if (conn->state == BT_CONFIG) {
2232 hci_connect_cfm(conn, status);
2233 hci_conn_drop(conn);
2237 hci_dev_unlock(hdev);
2240 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2242 struct hci_cp_le_start_enc *cp;
2243 struct hci_conn *conn;
2245 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2247 if (!status)
2248 return;
2250 hci_dev_lock(hdev);
2252 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2253 if (!cp)
2254 goto unlock;
2256 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2257 if (!conn)
2258 goto unlock;
2260 if (conn->state != BT_CONNECTED)
2261 goto unlock;
2263 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2264 hci_conn_drop(conn);
2266 unlock:
2267 hci_dev_unlock(hdev);
2270 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2272 struct hci_cp_switch_role *cp;
2273 struct hci_conn *conn;
2275 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2277 if (!status)
2278 return;
2280 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2281 if (!cp)
2282 return;
2284 hci_dev_lock(hdev);
2286 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2287 if (conn)
2288 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2290 hci_dev_unlock(hdev);
2293 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2295 __u8 status = *((__u8 *) skb->data);
2296 struct discovery_state *discov = &hdev->discovery;
2297 struct inquiry_entry *e;
2299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2301 hci_conn_check_pending(hdev);
2303 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2304 return;
2306 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2307 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2309 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2310 return;
2312 hci_dev_lock(hdev);
2314 if (discov->state != DISCOVERY_FINDING)
2315 goto unlock;
2317 if (list_empty(&discov->resolve)) {
2318 /* When BR/EDR inquiry is active and no LE scanning is in
2319 * progress, then change discovery state to indicate completion.
2321 * When running LE scanning and BR/EDR inquiry simultaneously
2322 * and the LE scan already finished, then change the discovery
2323 * state to indicate completion.
2325 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2326 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2327 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2328 goto unlock;
2331 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2332 if (e && hci_resolve_name(hdev, e) == 0) {
2333 e->name_state = NAME_PENDING;
2334 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2335 } else {
2336 /* When BR/EDR inquiry is active and no LE scanning is in
2337 * progress, then change discovery state to indicate completion.
2339 * When running LE scanning and BR/EDR inquiry simultaneously
2340 * and the LE scan already finished, then change the discovery
2341 * state to indicate completion.
2343 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2344 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2345 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2348 unlock:
2349 hci_dev_unlock(hdev);
2352 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2354 struct inquiry_data data;
2355 struct inquiry_info *info = (void *) (skb->data + 1);
2356 int num_rsp = *((__u8 *) skb->data);
2358 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2360 if (!num_rsp)
2361 return;
2363 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2364 return;
2366 hci_dev_lock(hdev);
2368 for (; num_rsp; num_rsp--, info++) {
2369 u32 flags;
2371 bacpy(&data.bdaddr, &info->bdaddr);
2372 data.pscan_rep_mode = info->pscan_rep_mode;
2373 data.pscan_period_mode = info->pscan_period_mode;
2374 data.pscan_mode = info->pscan_mode;
2375 memcpy(data.dev_class, info->dev_class, 3);
2376 data.clock_offset = info->clock_offset;
2377 data.rssi = HCI_RSSI_INVALID;
2378 data.ssp_mode = 0x00;
2380 flags = hci_inquiry_cache_update(hdev, &data, false);
2382 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2383 info->dev_class, HCI_RSSI_INVALID,
2384 flags, NULL, 0, NULL, 0);
2387 hci_dev_unlock(hdev);
2390 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2392 struct hci_ev_conn_complete *ev = (void *) skb->data;
2393 struct hci_conn *conn;
2395 BT_DBG("%s", hdev->name);
2397 hci_dev_lock(hdev);
2399 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2400 if (!conn) {
2401 if (ev->link_type != SCO_LINK)
2402 goto unlock;
2404 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2405 if (!conn)
2406 goto unlock;
2408 conn->type = SCO_LINK;
2411 if (!ev->status) {
2412 conn->handle = __le16_to_cpu(ev->handle);
2414 if (conn->type == ACL_LINK) {
2415 conn->state = BT_CONFIG;
2416 hci_conn_hold(conn);
2418 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2419 !hci_find_link_key(hdev, &ev->bdaddr))
2420 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2421 else
2422 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2423 } else
2424 conn->state = BT_CONNECTED;
2426 hci_debugfs_create_conn(conn);
2427 hci_conn_add_sysfs(conn);
2429 if (test_bit(HCI_AUTH, &hdev->flags))
2430 set_bit(HCI_CONN_AUTH, &conn->flags);
2432 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2433 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2435 /* Get remote features */
2436 if (conn->type == ACL_LINK) {
2437 struct hci_cp_read_remote_features cp;
2438 cp.handle = ev->handle;
2439 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2440 sizeof(cp), &cp);
2442 hci_req_update_scan(hdev);
2445 /* Set packet type for incoming connection */
2446 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2447 struct hci_cp_change_conn_ptype cp;
2448 cp.handle = ev->handle;
2449 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2450 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2451 &cp);
2453 } else {
2454 conn->state = BT_CLOSED;
2455 if (conn->type == ACL_LINK)
2456 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2457 conn->dst_type, ev->status);
2460 if (conn->type == ACL_LINK)
2461 hci_sco_setup(conn, ev->status);
2463 if (ev->status) {
2464 hci_connect_cfm(conn, ev->status);
2465 hci_conn_del(conn);
2466 } else if (ev->link_type != ACL_LINK)
2467 hci_connect_cfm(conn, ev->status);
2469 unlock:
2470 hci_dev_unlock(hdev);
2472 hci_conn_check_pending(hdev);
2475 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2477 struct hci_cp_reject_conn_req cp;
2479 bacpy(&cp.bdaddr, bdaddr);
2480 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2481 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2484 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2486 struct hci_ev_conn_request *ev = (void *) skb->data;
2487 int mask = hdev->link_mode;
2488 struct inquiry_entry *ie;
2489 struct hci_conn *conn;
2490 __u8 flags = 0;
2492 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2493 ev->link_type);
2495 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2496 &flags);
2498 if (!(mask & HCI_LM_ACCEPT)) {
2499 hci_reject_conn(hdev, &ev->bdaddr);
2500 return;
2503 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2504 BDADDR_BREDR)) {
2505 hci_reject_conn(hdev, &ev->bdaddr);
2506 return;
2509 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2510 * connection. These features are only touched through mgmt so
2511 * only do the checks if HCI_MGMT is set.
2513 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2514 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2515 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2516 BDADDR_BREDR)) {
2517 hci_reject_conn(hdev, &ev->bdaddr);
2518 return;
2521 /* Connection accepted */
2523 hci_dev_lock(hdev);
2525 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2526 if (ie)
2527 memcpy(ie->data.dev_class, ev->dev_class, 3);
2529 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2530 &ev->bdaddr);
2531 if (!conn) {
2532 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2533 HCI_ROLE_SLAVE);
2534 if (!conn) {
2535 bt_dev_err(hdev, "no memory for new connection");
2536 hci_dev_unlock(hdev);
2537 return;
2541 memcpy(conn->dev_class, ev->dev_class, 3);
2543 hci_dev_unlock(hdev);
2545 if (ev->link_type == ACL_LINK ||
2546 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2547 struct hci_cp_accept_conn_req cp;
2548 conn->state = BT_CONNECT;
2550 bacpy(&cp.bdaddr, &ev->bdaddr);
2552 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2553 cp.role = 0x00; /* Become master */
2554 else
2555 cp.role = 0x01; /* Remain slave */
2557 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2558 } else if (!(flags & HCI_PROTO_DEFER)) {
2559 struct hci_cp_accept_sync_conn_req cp;
2560 conn->state = BT_CONNECT;
2562 bacpy(&cp.bdaddr, &ev->bdaddr);
2563 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2565 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2566 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2567 cp.max_latency = cpu_to_le16(0xffff);
2568 cp.content_format = cpu_to_le16(hdev->voice_setting);
2569 cp.retrans_effort = 0xff;
2571 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2572 &cp);
2573 } else {
2574 conn->state = BT_CONNECT2;
2575 hci_connect_cfm(conn, 0);
2579 static u8 hci_to_mgmt_reason(u8 err)
2581 switch (err) {
2582 case HCI_ERROR_CONNECTION_TIMEOUT:
2583 return MGMT_DEV_DISCONN_TIMEOUT;
2584 case HCI_ERROR_REMOTE_USER_TERM:
2585 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2586 case HCI_ERROR_REMOTE_POWER_OFF:
2587 return MGMT_DEV_DISCONN_REMOTE;
2588 case HCI_ERROR_LOCAL_HOST_TERM:
2589 return MGMT_DEV_DISCONN_LOCAL_HOST;
2590 default:
2591 return MGMT_DEV_DISCONN_UNKNOWN;
2595 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2597 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2598 u8 reason;
2599 struct hci_conn_params *params;
2600 struct hci_conn *conn;
2601 bool mgmt_connected;
2602 u8 type;
2604 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2606 hci_dev_lock(hdev);
2608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2609 if (!conn)
2610 goto unlock;
2612 if (ev->status) {
2613 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2614 conn->dst_type, ev->status);
2615 goto unlock;
2618 conn->state = BT_CLOSED;
2620 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2622 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2623 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2624 else
2625 reason = hci_to_mgmt_reason(ev->reason);
2627 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2628 reason, mgmt_connected);
2630 if (conn->type == ACL_LINK) {
2631 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2632 hci_remove_link_key(hdev, &conn->dst);
2634 hci_req_update_scan(hdev);
2637 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2638 if (params) {
2639 switch (params->auto_connect) {
2640 case HCI_AUTO_CONN_LINK_LOSS:
2641 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2642 break;
2643 /* Fall through */
2645 case HCI_AUTO_CONN_DIRECT:
2646 case HCI_AUTO_CONN_ALWAYS:
2647 list_del_init(&params->action);
2648 list_add(&params->action, &hdev->pend_le_conns);
2649 hci_update_background_scan(hdev);
2650 break;
2652 default:
2653 break;
2657 type = conn->type;
2659 hci_disconn_cfm(conn, ev->reason);
2660 hci_conn_del(conn);
2662 /* Re-enable advertising if necessary, since it might
2663 * have been disabled by the connection. From the
2664 * HCI_LE_Set_Advertise_Enable command description in
2665 * the core specification (v4.0):
2666 * "The Controller shall continue advertising until the Host
2667 * issues an LE_Set_Advertise_Enable command with
2668 * Advertising_Enable set to 0x00 (Advertising is disabled)
2669 * or until a connection is created or until the Advertising
2670 * is timed out due to Directed Advertising."
2672 if (type == LE_LINK)
2673 hci_req_reenable_advertising(hdev);
2675 unlock:
2676 hci_dev_unlock(hdev);
2679 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2681 struct hci_ev_auth_complete *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2684 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2686 hci_dev_lock(hdev);
2688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2689 if (!conn)
2690 goto unlock;
2692 if (!ev->status) {
2693 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2695 if (!hci_conn_ssp_enabled(conn) &&
2696 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2697 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2698 } else {
2699 set_bit(HCI_CONN_AUTH, &conn->flags);
2700 conn->sec_level = conn->pending_sec_level;
2702 } else {
2703 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2704 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2706 mgmt_auth_failed(conn, ev->status);
2709 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2710 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2712 if (conn->state == BT_CONFIG) {
2713 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2714 struct hci_cp_set_conn_encrypt cp;
2715 cp.handle = ev->handle;
2716 cp.encrypt = 0x01;
2717 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2718 &cp);
2719 } else {
2720 conn->state = BT_CONNECTED;
2721 hci_connect_cfm(conn, ev->status);
2722 hci_conn_drop(conn);
2724 } else {
2725 hci_auth_cfm(conn, ev->status);
2727 hci_conn_hold(conn);
2728 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2729 hci_conn_drop(conn);
2732 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2733 if (!ev->status) {
2734 struct hci_cp_set_conn_encrypt cp;
2735 cp.handle = ev->handle;
2736 cp.encrypt = 0x01;
2737 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2738 &cp);
2739 } else {
2740 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2741 hci_encrypt_cfm(conn, ev->status, 0x00);
2745 unlock:
2746 hci_dev_unlock(hdev);
2749 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2751 struct hci_ev_remote_name *ev = (void *) skb->data;
2752 struct hci_conn *conn;
2754 BT_DBG("%s", hdev->name);
2756 hci_conn_check_pending(hdev);
2758 hci_dev_lock(hdev);
2760 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2762 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2763 goto check_auth;
2765 if (ev->status == 0)
2766 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2767 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2768 else
2769 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2771 check_auth:
2772 if (!conn)
2773 goto unlock;
2775 if (!hci_outgoing_auth_needed(hdev, conn))
2776 goto unlock;
2778 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2779 struct hci_cp_auth_requested cp;
2781 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2783 cp.handle = __cpu_to_le16(conn->handle);
2784 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2787 unlock:
2788 hci_dev_unlock(hdev);
2791 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2792 u16 opcode, struct sk_buff *skb)
2794 const struct hci_rp_read_enc_key_size *rp;
2795 struct hci_conn *conn;
2796 u16 handle;
2798 BT_DBG("%s status 0x%02x", hdev->name, status);
2800 if (!skb || skb->len < sizeof(*rp)) {
2801 bt_dev_err(hdev, "invalid read key size response");
2802 return;
2805 rp = (void *)skb->data;
2806 handle = le16_to_cpu(rp->handle);
2808 hci_dev_lock(hdev);
2810 conn = hci_conn_hash_lookup_handle(hdev, handle);
2811 if (!conn)
2812 goto unlock;
2814 /* If we fail to read the encryption key size, assume maximum
2815 * (which is the same we do also when this HCI command isn't
2816 * supported.
2818 if (rp->status) {
2819 bt_dev_err(hdev, "failed to read key size for handle %u",
2820 handle);
2821 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2822 } else {
2823 conn->enc_key_size = rp->key_size;
2826 if (conn->state == BT_CONFIG) {
2827 conn->state = BT_CONNECTED;
2828 hci_connect_cfm(conn, 0);
2829 hci_conn_drop(conn);
2830 } else {
2831 u8 encrypt;
2833 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2834 encrypt = 0x00;
2835 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2836 encrypt = 0x02;
2837 else
2838 encrypt = 0x01;
2840 hci_encrypt_cfm(conn, 0, encrypt);
2843 unlock:
2844 hci_dev_unlock(hdev);
2847 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2849 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2850 struct hci_conn *conn;
2852 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2854 hci_dev_lock(hdev);
2856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2857 if (!conn)
2858 goto unlock;
2860 if (!ev->status) {
2861 if (ev->encrypt) {
2862 /* Encryption implies authentication */
2863 set_bit(HCI_CONN_AUTH, &conn->flags);
2864 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2865 conn->sec_level = conn->pending_sec_level;
2867 /* P-256 authentication key implies FIPS */
2868 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2869 set_bit(HCI_CONN_FIPS, &conn->flags);
2871 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2872 conn->type == LE_LINK)
2873 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2874 } else {
2875 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2876 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2880 /* We should disregard the current RPA and generate a new one
2881 * whenever the encryption procedure fails.
2883 if (ev->status && conn->type == LE_LINK) {
2884 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2885 hci_adv_instances_set_rpa_expired(hdev, true);
2888 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2890 if (ev->status && conn->state == BT_CONNECTED) {
2891 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2892 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2894 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2895 hci_conn_drop(conn);
2896 goto unlock;
2899 /* In Secure Connections Only mode, do not allow any connections
2900 * that are not encrypted with AES-CCM using a P-256 authenticated
2901 * combination key.
2903 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2904 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2905 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2906 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2907 hci_conn_drop(conn);
2908 goto unlock;
2911 /* Try reading the encryption key size for encrypted ACL links */
2912 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2913 struct hci_cp_read_enc_key_size cp;
2914 struct hci_request req;
2916 /* Only send HCI_Read_Encryption_Key_Size if the
2917 * controller really supports it. If it doesn't, assume
2918 * the default size (16).
2920 if (!(hdev->commands[20] & 0x10)) {
2921 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2922 goto notify;
2925 hci_req_init(&req, hdev);
2927 cp.handle = cpu_to_le16(conn->handle);
2928 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2930 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2931 bt_dev_err(hdev, "sending read key size failed");
2932 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2933 goto notify;
2936 goto unlock;
2939 notify:
2940 if (conn->state == BT_CONFIG) {
2941 if (!ev->status)
2942 conn->state = BT_CONNECTED;
2944 hci_connect_cfm(conn, ev->status);
2945 hci_conn_drop(conn);
2946 } else
2947 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2949 unlock:
2950 hci_dev_unlock(hdev);
2953 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2954 struct sk_buff *skb)
2956 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2957 struct hci_conn *conn;
2959 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2961 hci_dev_lock(hdev);
2963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2964 if (conn) {
2965 if (!ev->status)
2966 set_bit(HCI_CONN_SECURE, &conn->flags);
2968 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2970 hci_key_change_cfm(conn, ev->status);
2973 hci_dev_unlock(hdev);
2976 static void hci_remote_features_evt(struct hci_dev *hdev,
2977 struct sk_buff *skb)
2979 struct hci_ev_remote_features *ev = (void *) skb->data;
2980 struct hci_conn *conn;
2982 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2984 hci_dev_lock(hdev);
2986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2987 if (!conn)
2988 goto unlock;
2990 if (!ev->status)
2991 memcpy(conn->features[0], ev->features, 8);
2993 if (conn->state != BT_CONFIG)
2994 goto unlock;
2996 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2997 lmp_ext_feat_capable(conn)) {
2998 struct hci_cp_read_remote_ext_features cp;
2999 cp.handle = ev->handle;
3000 cp.page = 0x01;
3001 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3002 sizeof(cp), &cp);
3003 goto unlock;
3006 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3007 struct hci_cp_remote_name_req cp;
3008 memset(&cp, 0, sizeof(cp));
3009 bacpy(&cp.bdaddr, &conn->dst);
3010 cp.pscan_rep_mode = 0x02;
3011 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3012 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3013 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3015 if (!hci_outgoing_auth_needed(hdev, conn)) {
3016 conn->state = BT_CONNECTED;
3017 hci_connect_cfm(conn, ev->status);
3018 hci_conn_drop(conn);
3021 unlock:
3022 hci_dev_unlock(hdev);
3025 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3026 u16 *opcode, u8 *status,
3027 hci_req_complete_t *req_complete,
3028 hci_req_complete_skb_t *req_complete_skb)
3030 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3032 *opcode = __le16_to_cpu(ev->opcode);
3033 *status = skb->data[sizeof(*ev)];
3035 skb_pull(skb, sizeof(*ev));
3037 switch (*opcode) {
3038 case HCI_OP_INQUIRY_CANCEL:
3039 hci_cc_inquiry_cancel(hdev, skb);
3040 break;
3042 case HCI_OP_PERIODIC_INQ:
3043 hci_cc_periodic_inq(hdev, skb);
3044 break;
3046 case HCI_OP_EXIT_PERIODIC_INQ:
3047 hci_cc_exit_periodic_inq(hdev, skb);
3048 break;
3050 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3051 hci_cc_remote_name_req_cancel(hdev, skb);
3052 break;
3054 case HCI_OP_ROLE_DISCOVERY:
3055 hci_cc_role_discovery(hdev, skb);
3056 break;
3058 case HCI_OP_READ_LINK_POLICY:
3059 hci_cc_read_link_policy(hdev, skb);
3060 break;
3062 case HCI_OP_WRITE_LINK_POLICY:
3063 hci_cc_write_link_policy(hdev, skb);
3064 break;
3066 case HCI_OP_READ_DEF_LINK_POLICY:
3067 hci_cc_read_def_link_policy(hdev, skb);
3068 break;
3070 case HCI_OP_WRITE_DEF_LINK_POLICY:
3071 hci_cc_write_def_link_policy(hdev, skb);
3072 break;
3074 case HCI_OP_RESET:
3075 hci_cc_reset(hdev, skb);
3076 break;
3078 case HCI_OP_READ_STORED_LINK_KEY:
3079 hci_cc_read_stored_link_key(hdev, skb);
3080 break;
3082 case HCI_OP_DELETE_STORED_LINK_KEY:
3083 hci_cc_delete_stored_link_key(hdev, skb);
3084 break;
3086 case HCI_OP_WRITE_LOCAL_NAME:
3087 hci_cc_write_local_name(hdev, skb);
3088 break;
3090 case HCI_OP_READ_LOCAL_NAME:
3091 hci_cc_read_local_name(hdev, skb);
3092 break;
3094 case HCI_OP_WRITE_AUTH_ENABLE:
3095 hci_cc_write_auth_enable(hdev, skb);
3096 break;
3098 case HCI_OP_WRITE_ENCRYPT_MODE:
3099 hci_cc_write_encrypt_mode(hdev, skb);
3100 break;
3102 case HCI_OP_WRITE_SCAN_ENABLE:
3103 hci_cc_write_scan_enable(hdev, skb);
3104 break;
3106 case HCI_OP_READ_CLASS_OF_DEV:
3107 hci_cc_read_class_of_dev(hdev, skb);
3108 break;
3110 case HCI_OP_WRITE_CLASS_OF_DEV:
3111 hci_cc_write_class_of_dev(hdev, skb);
3112 break;
3114 case HCI_OP_READ_VOICE_SETTING:
3115 hci_cc_read_voice_setting(hdev, skb);
3116 break;
3118 case HCI_OP_WRITE_VOICE_SETTING:
3119 hci_cc_write_voice_setting(hdev, skb);
3120 break;
3122 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3123 hci_cc_read_num_supported_iac(hdev, skb);
3124 break;
3126 case HCI_OP_WRITE_SSP_MODE:
3127 hci_cc_write_ssp_mode(hdev, skb);
3128 break;
3130 case HCI_OP_WRITE_SC_SUPPORT:
3131 hci_cc_write_sc_support(hdev, skb);
3132 break;
3134 case HCI_OP_READ_LOCAL_VERSION:
3135 hci_cc_read_local_version(hdev, skb);
3136 break;
3138 case HCI_OP_READ_LOCAL_COMMANDS:
3139 hci_cc_read_local_commands(hdev, skb);
3140 break;
3142 case HCI_OP_READ_LOCAL_FEATURES:
3143 hci_cc_read_local_features(hdev, skb);
3144 break;
3146 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3147 hci_cc_read_local_ext_features(hdev, skb);
3148 break;
3150 case HCI_OP_READ_BUFFER_SIZE:
3151 hci_cc_read_buffer_size(hdev, skb);
3152 break;
3154 case HCI_OP_READ_BD_ADDR:
3155 hci_cc_read_bd_addr(hdev, skb);
3156 break;
3158 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3159 hci_cc_read_page_scan_activity(hdev, skb);
3160 break;
3162 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3163 hci_cc_write_page_scan_activity(hdev, skb);
3164 break;
3166 case HCI_OP_READ_PAGE_SCAN_TYPE:
3167 hci_cc_read_page_scan_type(hdev, skb);
3168 break;
3170 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3171 hci_cc_write_page_scan_type(hdev, skb);
3172 break;
3174 case HCI_OP_READ_DATA_BLOCK_SIZE:
3175 hci_cc_read_data_block_size(hdev, skb);
3176 break;
3178 case HCI_OP_READ_FLOW_CONTROL_MODE:
3179 hci_cc_read_flow_control_mode(hdev, skb);
3180 break;
3182 case HCI_OP_READ_LOCAL_AMP_INFO:
3183 hci_cc_read_local_amp_info(hdev, skb);
3184 break;
3186 case HCI_OP_READ_CLOCK:
3187 hci_cc_read_clock(hdev, skb);
3188 break;
3190 case HCI_OP_READ_INQ_RSP_TX_POWER:
3191 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3192 break;
3194 case HCI_OP_PIN_CODE_REPLY:
3195 hci_cc_pin_code_reply(hdev, skb);
3196 break;
3198 case HCI_OP_PIN_CODE_NEG_REPLY:
3199 hci_cc_pin_code_neg_reply(hdev, skb);
3200 break;
3202 case HCI_OP_READ_LOCAL_OOB_DATA:
3203 hci_cc_read_local_oob_data(hdev, skb);
3204 break;
3206 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3207 hci_cc_read_local_oob_ext_data(hdev, skb);
3208 break;
3210 case HCI_OP_LE_READ_BUFFER_SIZE:
3211 hci_cc_le_read_buffer_size(hdev, skb);
3212 break;
3214 case HCI_OP_LE_READ_LOCAL_FEATURES:
3215 hci_cc_le_read_local_features(hdev, skb);
3216 break;
3218 case HCI_OP_LE_READ_ADV_TX_POWER:
3219 hci_cc_le_read_adv_tx_power(hdev, skb);
3220 break;
3222 case HCI_OP_USER_CONFIRM_REPLY:
3223 hci_cc_user_confirm_reply(hdev, skb);
3224 break;
3226 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3227 hci_cc_user_confirm_neg_reply(hdev, skb);
3228 break;
3230 case HCI_OP_USER_PASSKEY_REPLY:
3231 hci_cc_user_passkey_reply(hdev, skb);
3232 break;
3234 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3235 hci_cc_user_passkey_neg_reply(hdev, skb);
3236 break;
3238 case HCI_OP_LE_SET_RANDOM_ADDR:
3239 hci_cc_le_set_random_addr(hdev, skb);
3240 break;
3242 case HCI_OP_LE_SET_ADV_ENABLE:
3243 hci_cc_le_set_adv_enable(hdev, skb);
3244 break;
3246 case HCI_OP_LE_SET_SCAN_PARAM:
3247 hci_cc_le_set_scan_param(hdev, skb);
3248 break;
3250 case HCI_OP_LE_SET_SCAN_ENABLE:
3251 hci_cc_le_set_scan_enable(hdev, skb);
3252 break;
3254 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3255 hci_cc_le_read_white_list_size(hdev, skb);
3256 break;
3258 case HCI_OP_LE_CLEAR_WHITE_LIST:
3259 hci_cc_le_clear_white_list(hdev, skb);
3260 break;
3262 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3263 hci_cc_le_add_to_white_list(hdev, skb);
3264 break;
3266 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3267 hci_cc_le_del_from_white_list(hdev, skb);
3268 break;
3270 case HCI_OP_LE_READ_SUPPORTED_STATES:
3271 hci_cc_le_read_supported_states(hdev, skb);
3272 break;
3274 case HCI_OP_LE_READ_DEF_DATA_LEN:
3275 hci_cc_le_read_def_data_len(hdev, skb);
3276 break;
3278 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3279 hci_cc_le_write_def_data_len(hdev, skb);
3280 break;
3282 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3283 hci_cc_le_clear_resolv_list(hdev, skb);
3284 break;
3286 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3287 hci_cc_le_read_resolv_list_size(hdev, skb);
3288 break;
3290 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3291 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3292 break;
3294 case HCI_OP_LE_READ_MAX_DATA_LEN:
3295 hci_cc_le_read_max_data_len(hdev, skb);
3296 break;
3298 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3299 hci_cc_write_le_host_supported(hdev, skb);
3300 break;
3302 case HCI_OP_LE_SET_ADV_PARAM:
3303 hci_cc_set_adv_param(hdev, skb);
3304 break;
3306 case HCI_OP_READ_RSSI:
3307 hci_cc_read_rssi(hdev, skb);
3308 break;
3310 case HCI_OP_READ_TX_POWER:
3311 hci_cc_read_tx_power(hdev, skb);
3312 break;
3314 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3315 hci_cc_write_ssp_debug_mode(hdev, skb);
3316 break;
3318 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3319 hci_cc_le_set_ext_scan_param(hdev, skb);
3320 break;
3322 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3323 hci_cc_le_set_ext_scan_enable(hdev, skb);
3324 break;
3326 case HCI_OP_LE_SET_DEFAULT_PHY:
3327 hci_cc_le_set_default_phy(hdev, skb);
3328 break;
3330 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3331 hci_cc_le_read_num_adv_sets(hdev, skb);
3332 break;
3334 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3335 hci_cc_set_ext_adv_param(hdev, skb);
3336 break;
3338 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3339 hci_cc_le_set_ext_adv_enable(hdev, skb);
3340 break;
3342 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3343 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3344 break;
3346 default:
3347 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3348 break;
3351 if (*opcode != HCI_OP_NOP)
3352 cancel_delayed_work(&hdev->cmd_timer);
3354 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3355 atomic_set(&hdev->cmd_cnt, 1);
3357 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3358 req_complete_skb);
3360 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3361 queue_work(hdev->workqueue, &hdev->cmd_work);
3364 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3365 u16 *opcode, u8 *status,
3366 hci_req_complete_t *req_complete,
3367 hci_req_complete_skb_t *req_complete_skb)
3369 struct hci_ev_cmd_status *ev = (void *) skb->data;
3371 skb_pull(skb, sizeof(*ev));
3373 *opcode = __le16_to_cpu(ev->opcode);
3374 *status = ev->status;
3376 switch (*opcode) {
3377 case HCI_OP_INQUIRY:
3378 hci_cs_inquiry(hdev, ev->status);
3379 break;
3381 case HCI_OP_CREATE_CONN:
3382 hci_cs_create_conn(hdev, ev->status);
3383 break;
3385 case HCI_OP_DISCONNECT:
3386 hci_cs_disconnect(hdev, ev->status);
3387 break;
3389 case HCI_OP_ADD_SCO:
3390 hci_cs_add_sco(hdev, ev->status);
3391 break;
3393 case HCI_OP_AUTH_REQUESTED:
3394 hci_cs_auth_requested(hdev, ev->status);
3395 break;
3397 case HCI_OP_SET_CONN_ENCRYPT:
3398 hci_cs_set_conn_encrypt(hdev, ev->status);
3399 break;
3401 case HCI_OP_REMOTE_NAME_REQ:
3402 hci_cs_remote_name_req(hdev, ev->status);
3403 break;
3405 case HCI_OP_READ_REMOTE_FEATURES:
3406 hci_cs_read_remote_features(hdev, ev->status);
3407 break;
3409 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3410 hci_cs_read_remote_ext_features(hdev, ev->status);
3411 break;
3413 case HCI_OP_SETUP_SYNC_CONN:
3414 hci_cs_setup_sync_conn(hdev, ev->status);
3415 break;
3417 case HCI_OP_SNIFF_MODE:
3418 hci_cs_sniff_mode(hdev, ev->status);
3419 break;
3421 case HCI_OP_EXIT_SNIFF_MODE:
3422 hci_cs_exit_sniff_mode(hdev, ev->status);
3423 break;
3425 case HCI_OP_SWITCH_ROLE:
3426 hci_cs_switch_role(hdev, ev->status);
3427 break;
3429 case HCI_OP_LE_CREATE_CONN:
3430 hci_cs_le_create_conn(hdev, ev->status);
3431 break;
3433 case HCI_OP_LE_READ_REMOTE_FEATURES:
3434 hci_cs_le_read_remote_features(hdev, ev->status);
3435 break;
3437 case HCI_OP_LE_START_ENC:
3438 hci_cs_le_start_enc(hdev, ev->status);
3439 break;
3441 case HCI_OP_LE_EXT_CREATE_CONN:
3442 hci_cs_le_ext_create_conn(hdev, ev->status);
3443 break;
3445 default:
3446 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3447 break;
3450 if (*opcode != HCI_OP_NOP)
3451 cancel_delayed_work(&hdev->cmd_timer);
3453 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3454 atomic_set(&hdev->cmd_cnt, 1);
3456 /* Indicate request completion if the command failed. Also, if
3457 * we're not waiting for a special event and we get a success
3458 * command status we should try to flag the request as completed
3459 * (since for this kind of commands there will not be a command
3460 * complete event).
3462 if (ev->status ||
3463 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3464 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3465 req_complete_skb);
3467 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3468 queue_work(hdev->workqueue, &hdev->cmd_work);
3471 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3473 struct hci_ev_hardware_error *ev = (void *) skb->data;
3475 hdev->hw_error_code = ev->code;
3477 queue_work(hdev->req_workqueue, &hdev->error_reset);
3480 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3482 struct hci_ev_role_change *ev = (void *) skb->data;
3483 struct hci_conn *conn;
3485 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3487 hci_dev_lock(hdev);
3489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3490 if (conn) {
3491 if (!ev->status)
3492 conn->role = ev->role;
3494 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3496 hci_role_switch_cfm(conn, ev->status, ev->role);
3499 hci_dev_unlock(hdev);
3502 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3504 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3505 int i;
3507 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3508 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3509 return;
3512 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3513 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3514 BT_DBG("%s bad parameters", hdev->name);
3515 return;
3518 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3520 for (i = 0; i < ev->num_hndl; i++) {
3521 struct hci_comp_pkts_info *info = &ev->handles[i];
3522 struct hci_conn *conn;
3523 __u16 handle, count;
3525 handle = __le16_to_cpu(info->handle);
3526 count = __le16_to_cpu(info->count);
3528 conn = hci_conn_hash_lookup_handle(hdev, handle);
3529 if (!conn)
3530 continue;
3532 conn->sent -= count;
3534 switch (conn->type) {
3535 case ACL_LINK:
3536 hdev->acl_cnt += count;
3537 if (hdev->acl_cnt > hdev->acl_pkts)
3538 hdev->acl_cnt = hdev->acl_pkts;
3539 break;
3541 case LE_LINK:
3542 if (hdev->le_pkts) {
3543 hdev->le_cnt += count;
3544 if (hdev->le_cnt > hdev->le_pkts)
3545 hdev->le_cnt = hdev->le_pkts;
3546 } else {
3547 hdev->acl_cnt += count;
3548 if (hdev->acl_cnt > hdev->acl_pkts)
3549 hdev->acl_cnt = hdev->acl_pkts;
3551 break;
3553 case SCO_LINK:
3554 hdev->sco_cnt += count;
3555 if (hdev->sco_cnt > hdev->sco_pkts)
3556 hdev->sco_cnt = hdev->sco_pkts;
3557 break;
3559 default:
3560 bt_dev_err(hdev, "unknown type %d conn %p",
3561 conn->type, conn);
3562 break;
3566 queue_work(hdev->workqueue, &hdev->tx_work);
3569 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3570 __u16 handle)
3572 struct hci_chan *chan;
3574 switch (hdev->dev_type) {
3575 case HCI_PRIMARY:
3576 return hci_conn_hash_lookup_handle(hdev, handle);
3577 case HCI_AMP:
3578 chan = hci_chan_lookup_handle(hdev, handle);
3579 if (chan)
3580 return chan->conn;
3581 break;
3582 default:
3583 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3584 break;
3587 return NULL;
3590 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3592 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3593 int i;
3595 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3596 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3597 return;
3600 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3601 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3602 BT_DBG("%s bad parameters", hdev->name);
3603 return;
3606 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3607 ev->num_hndl);
3609 for (i = 0; i < ev->num_hndl; i++) {
3610 struct hci_comp_blocks_info *info = &ev->handles[i];
3611 struct hci_conn *conn = NULL;
3612 __u16 handle, block_count;
3614 handle = __le16_to_cpu(info->handle);
3615 block_count = __le16_to_cpu(info->blocks);
3617 conn = __hci_conn_lookup_handle(hdev, handle);
3618 if (!conn)
3619 continue;
3621 conn->sent -= block_count;
3623 switch (conn->type) {
3624 case ACL_LINK:
3625 case AMP_LINK:
3626 hdev->block_cnt += block_count;
3627 if (hdev->block_cnt > hdev->num_blocks)
3628 hdev->block_cnt = hdev->num_blocks;
3629 break;
3631 default:
3632 bt_dev_err(hdev, "unknown type %d conn %p",
3633 conn->type, conn);
3634 break;
3638 queue_work(hdev->workqueue, &hdev->tx_work);
3641 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3643 struct hci_ev_mode_change *ev = (void *) skb->data;
3644 struct hci_conn *conn;
3646 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3648 hci_dev_lock(hdev);
3650 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3651 if (conn) {
3652 conn->mode = ev->mode;
3654 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3655 &conn->flags)) {
3656 if (conn->mode == HCI_CM_ACTIVE)
3657 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3658 else
3659 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3662 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3663 hci_sco_setup(conn, ev->status);
3666 hci_dev_unlock(hdev);
3669 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3671 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3672 struct hci_conn *conn;
3674 BT_DBG("%s", hdev->name);
3676 hci_dev_lock(hdev);
3678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3679 if (!conn)
3680 goto unlock;
3682 if (conn->state == BT_CONNECTED) {
3683 hci_conn_hold(conn);
3684 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3685 hci_conn_drop(conn);
3688 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3689 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3690 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3691 sizeof(ev->bdaddr), &ev->bdaddr);
3692 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3693 u8 secure;
3695 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3696 secure = 1;
3697 else
3698 secure = 0;
3700 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3703 unlock:
3704 hci_dev_unlock(hdev);
3707 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3709 if (key_type == HCI_LK_CHANGED_COMBINATION)
3710 return;
3712 conn->pin_length = pin_len;
3713 conn->key_type = key_type;
3715 switch (key_type) {
3716 case HCI_LK_LOCAL_UNIT:
3717 case HCI_LK_REMOTE_UNIT:
3718 case HCI_LK_DEBUG_COMBINATION:
3719 return;
3720 case HCI_LK_COMBINATION:
3721 if (pin_len == 16)
3722 conn->pending_sec_level = BT_SECURITY_HIGH;
3723 else
3724 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3725 break;
3726 case HCI_LK_UNAUTH_COMBINATION_P192:
3727 case HCI_LK_UNAUTH_COMBINATION_P256:
3728 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3729 break;
3730 case HCI_LK_AUTH_COMBINATION_P192:
3731 conn->pending_sec_level = BT_SECURITY_HIGH;
3732 break;
3733 case HCI_LK_AUTH_COMBINATION_P256:
3734 conn->pending_sec_level = BT_SECURITY_FIPS;
3735 break;
3739 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3741 struct hci_ev_link_key_req *ev = (void *) skb->data;
3742 struct hci_cp_link_key_reply cp;
3743 struct hci_conn *conn;
3744 struct link_key *key;
3746 BT_DBG("%s", hdev->name);
3748 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3749 return;
3751 hci_dev_lock(hdev);
3753 key = hci_find_link_key(hdev, &ev->bdaddr);
3754 if (!key) {
3755 BT_DBG("%s link key not found for %pMR", hdev->name,
3756 &ev->bdaddr);
3757 goto not_found;
3760 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3761 &ev->bdaddr);
3763 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3764 if (conn) {
3765 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3767 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3768 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3769 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3770 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3771 goto not_found;
3774 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3775 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3776 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3777 BT_DBG("%s ignoring key unauthenticated for high security",
3778 hdev->name);
3779 goto not_found;
3782 conn_set_key(conn, key->type, key->pin_len);
3785 bacpy(&cp.bdaddr, &ev->bdaddr);
3786 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3788 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3790 hci_dev_unlock(hdev);
3792 return;
3794 not_found:
3795 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3796 hci_dev_unlock(hdev);
3799 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3801 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3802 struct hci_conn *conn;
3803 struct link_key *key;
3804 bool persistent;
3805 u8 pin_len = 0;
3807 BT_DBG("%s", hdev->name);
3809 hci_dev_lock(hdev);
3811 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3812 if (!conn)
3813 goto unlock;
3815 hci_conn_hold(conn);
3816 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3817 hci_conn_drop(conn);
3819 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3820 conn_set_key(conn, ev->key_type, conn->pin_length);
3822 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3823 goto unlock;
3825 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3826 ev->key_type, pin_len, &persistent);
3827 if (!key)
3828 goto unlock;
3830 /* Update connection information since adding the key will have
3831 * fixed up the type in the case of changed combination keys.
3833 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3834 conn_set_key(conn, key->type, key->pin_len);
3836 mgmt_new_link_key(hdev, key, persistent);
3838 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3839 * is set. If it's not set simply remove the key from the kernel
3840 * list (we've still notified user space about it but with
3841 * store_hint being 0).
3843 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3844 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3845 list_del_rcu(&key->list);
3846 kfree_rcu(key, rcu);
3847 goto unlock;
3850 if (persistent)
3851 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3852 else
3853 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3855 unlock:
3856 hci_dev_unlock(hdev);
3859 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3861 struct hci_ev_clock_offset *ev = (void *) skb->data;
3862 struct hci_conn *conn;
3864 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3866 hci_dev_lock(hdev);
3868 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3869 if (conn && !ev->status) {
3870 struct inquiry_entry *ie;
3872 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3873 if (ie) {
3874 ie->data.clock_offset = ev->clock_offset;
3875 ie->timestamp = jiffies;
3879 hci_dev_unlock(hdev);
3882 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3884 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3885 struct hci_conn *conn;
3887 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3889 hci_dev_lock(hdev);
3891 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3892 if (conn && !ev->status)
3893 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3895 hci_dev_unlock(hdev);
3898 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3900 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3901 struct inquiry_entry *ie;
3903 BT_DBG("%s", hdev->name);
3905 hci_dev_lock(hdev);
3907 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3908 if (ie) {
3909 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3910 ie->timestamp = jiffies;
3913 hci_dev_unlock(hdev);
3916 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3917 struct sk_buff *skb)
3919 struct inquiry_data data;
3920 int num_rsp = *((__u8 *) skb->data);
3922 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3924 if (!num_rsp)
3925 return;
3927 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3928 return;
3930 hci_dev_lock(hdev);
3932 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3933 struct inquiry_info_with_rssi_and_pscan_mode *info;
3934 info = (void *) (skb->data + 1);
3936 for (; num_rsp; num_rsp--, info++) {
3937 u32 flags;
3939 bacpy(&data.bdaddr, &info->bdaddr);
3940 data.pscan_rep_mode = info->pscan_rep_mode;
3941 data.pscan_period_mode = info->pscan_period_mode;
3942 data.pscan_mode = info->pscan_mode;
3943 memcpy(data.dev_class, info->dev_class, 3);
3944 data.clock_offset = info->clock_offset;
3945 data.rssi = info->rssi;
3946 data.ssp_mode = 0x00;
3948 flags = hci_inquiry_cache_update(hdev, &data, false);
3950 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3951 info->dev_class, info->rssi,
3952 flags, NULL, 0, NULL, 0);
3954 } else {
3955 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3957 for (; num_rsp; num_rsp--, info++) {
3958 u32 flags;
3960 bacpy(&data.bdaddr, &info->bdaddr);
3961 data.pscan_rep_mode = info->pscan_rep_mode;
3962 data.pscan_period_mode = info->pscan_period_mode;
3963 data.pscan_mode = 0x00;
3964 memcpy(data.dev_class, info->dev_class, 3);
3965 data.clock_offset = info->clock_offset;
3966 data.rssi = info->rssi;
3967 data.ssp_mode = 0x00;
3969 flags = hci_inquiry_cache_update(hdev, &data, false);
3971 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3972 info->dev_class, info->rssi,
3973 flags, NULL, 0, NULL, 0);
3977 hci_dev_unlock(hdev);
3980 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3981 struct sk_buff *skb)
3983 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3984 struct hci_conn *conn;
3986 BT_DBG("%s", hdev->name);
3988 hci_dev_lock(hdev);
3990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3991 if (!conn)
3992 goto unlock;
3994 if (ev->page < HCI_MAX_PAGES)
3995 memcpy(conn->features[ev->page], ev->features, 8);
3997 if (!ev->status && ev->page == 0x01) {
3998 struct inquiry_entry *ie;
4000 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4001 if (ie)
4002 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4004 if (ev->features[0] & LMP_HOST_SSP) {
4005 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4006 } else {
4007 /* It is mandatory by the Bluetooth specification that
4008 * Extended Inquiry Results are only used when Secure
4009 * Simple Pairing is enabled, but some devices violate
4010 * this.
4012 * To make these devices work, the internal SSP
4013 * enabled flag needs to be cleared if the remote host
4014 * features do not indicate SSP support */
4015 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4018 if (ev->features[0] & LMP_HOST_SC)
4019 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4022 if (conn->state != BT_CONFIG)
4023 goto unlock;
4025 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4026 struct hci_cp_remote_name_req cp;
4027 memset(&cp, 0, sizeof(cp));
4028 bacpy(&cp.bdaddr, &conn->dst);
4029 cp.pscan_rep_mode = 0x02;
4030 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4031 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4032 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4034 if (!hci_outgoing_auth_needed(hdev, conn)) {
4035 conn->state = BT_CONNECTED;
4036 hci_connect_cfm(conn, ev->status);
4037 hci_conn_drop(conn);
4040 unlock:
4041 hci_dev_unlock(hdev);
4044 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4045 struct sk_buff *skb)
4047 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4048 struct hci_conn *conn;
4050 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4052 hci_dev_lock(hdev);
4054 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4055 if (!conn) {
4056 if (ev->link_type == ESCO_LINK)
4057 goto unlock;
4059 /* When the link type in the event indicates SCO connection
4060 * and lookup of the connection object fails, then check
4061 * if an eSCO connection object exists.
4063 * The core limits the synchronous connections to either
4064 * SCO or eSCO. The eSCO connection is preferred and tried
4065 * to be setup first and until successfully established,
4066 * the link type will be hinted as eSCO.
4068 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4069 if (!conn)
4070 goto unlock;
4073 switch (ev->status) {
4074 case 0x00:
4075 conn->handle = __le16_to_cpu(ev->handle);
4076 conn->state = BT_CONNECTED;
4077 conn->type = ev->link_type;
4079 hci_debugfs_create_conn(conn);
4080 hci_conn_add_sysfs(conn);
4081 break;
4083 case 0x10: /* Connection Accept Timeout */
4084 case 0x0d: /* Connection Rejected due to Limited Resources */
4085 case 0x11: /* Unsupported Feature or Parameter Value */
4086 case 0x1c: /* SCO interval rejected */
4087 case 0x1a: /* Unsupported Remote Feature */
4088 case 0x1f: /* Unspecified error */
4089 case 0x20: /* Unsupported LMP Parameter value */
4090 if (conn->out) {
4091 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4092 (hdev->esco_type & EDR_ESCO_MASK);
4093 if (hci_setup_sync(conn, conn->link->handle))
4094 goto unlock;
4096 /* fall through */
4098 default:
4099 conn->state = BT_CLOSED;
4100 break;
4103 hci_connect_cfm(conn, ev->status);
4104 if (ev->status)
4105 hci_conn_del(conn);
4107 unlock:
4108 hci_dev_unlock(hdev);
4111 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4113 size_t parsed = 0;
4115 while (parsed < eir_len) {
4116 u8 field_len = eir[0];
4118 if (field_len == 0)
4119 return parsed;
4121 parsed += field_len + 1;
4122 eir += field_len + 1;
4125 return eir_len;
4128 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4129 struct sk_buff *skb)
4131 struct inquiry_data data;
4132 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4133 int num_rsp = *((__u8 *) skb->data);
4134 size_t eir_len;
4136 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4138 if (!num_rsp)
4139 return;
4141 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4142 return;
4144 hci_dev_lock(hdev);
4146 for (; num_rsp; num_rsp--, info++) {
4147 u32 flags;
4148 bool name_known;
4150 bacpy(&data.bdaddr, &info->bdaddr);
4151 data.pscan_rep_mode = info->pscan_rep_mode;
4152 data.pscan_period_mode = info->pscan_period_mode;
4153 data.pscan_mode = 0x00;
4154 memcpy(data.dev_class, info->dev_class, 3);
4155 data.clock_offset = info->clock_offset;
4156 data.rssi = info->rssi;
4157 data.ssp_mode = 0x01;
4159 if (hci_dev_test_flag(hdev, HCI_MGMT))
4160 name_known = eir_get_data(info->data,
4161 sizeof(info->data),
4162 EIR_NAME_COMPLETE, NULL);
4163 else
4164 name_known = true;
4166 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4168 eir_len = eir_get_length(info->data, sizeof(info->data));
4170 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4171 info->dev_class, info->rssi,
4172 flags, info->data, eir_len, NULL, 0);
4175 hci_dev_unlock(hdev);
4178 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4179 struct sk_buff *skb)
4181 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4182 struct hci_conn *conn;
4184 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4185 __le16_to_cpu(ev->handle));
4187 hci_dev_lock(hdev);
4189 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4190 if (!conn)
4191 goto unlock;
4193 /* For BR/EDR the necessary steps are taken through the
4194 * auth_complete event.
4196 if (conn->type != LE_LINK)
4197 goto unlock;
4199 if (!ev->status)
4200 conn->sec_level = conn->pending_sec_level;
4202 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4204 if (ev->status && conn->state == BT_CONNECTED) {
4205 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4206 hci_conn_drop(conn);
4207 goto unlock;
4210 if (conn->state == BT_CONFIG) {
4211 if (!ev->status)
4212 conn->state = BT_CONNECTED;
4214 hci_connect_cfm(conn, ev->status);
4215 hci_conn_drop(conn);
4216 } else {
4217 hci_auth_cfm(conn, ev->status);
4219 hci_conn_hold(conn);
4220 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4221 hci_conn_drop(conn);
4224 unlock:
4225 hci_dev_unlock(hdev);
4228 static u8 hci_get_auth_req(struct hci_conn *conn)
4230 /* If remote requests no-bonding follow that lead */
4231 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4232 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4233 return conn->remote_auth | (conn->auth_type & 0x01);
4235 /* If both remote and local have enough IO capabilities, require
4236 * MITM protection
4238 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4239 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4240 return conn->remote_auth | 0x01;
4242 /* No MITM protection possible so ignore remote requirement */
4243 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4246 static u8 bredr_oob_data_present(struct hci_conn *conn)
4248 struct hci_dev *hdev = conn->hdev;
4249 struct oob_data *data;
4251 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4252 if (!data)
4253 return 0x00;
4255 if (bredr_sc_enabled(hdev)) {
4256 /* When Secure Connections is enabled, then just
4257 * return the present value stored with the OOB
4258 * data. The stored value contains the right present
4259 * information. However it can only be trusted when
4260 * not in Secure Connection Only mode.
4262 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4263 return data->present;
4265 /* When Secure Connections Only mode is enabled, then
4266 * the P-256 values are required. If they are not
4267 * available, then do not declare that OOB data is
4268 * present.
4270 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4271 !memcmp(data->hash256, ZERO_KEY, 16))
4272 return 0x00;
4274 return 0x02;
4277 /* When Secure Connections is not enabled or actually
4278 * not supported by the hardware, then check that if
4279 * P-192 data values are present.
4281 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4282 !memcmp(data->hash192, ZERO_KEY, 16))
4283 return 0x00;
4285 return 0x01;
4288 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4290 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4291 struct hci_conn *conn;
4293 BT_DBG("%s", hdev->name);
4295 hci_dev_lock(hdev);
4297 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4298 if (!conn)
4299 goto unlock;
4301 hci_conn_hold(conn);
4303 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4304 goto unlock;
4306 /* Allow pairing if we're pairable, the initiators of the
4307 * pairing or if the remote is not requesting bonding.
4309 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4310 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4311 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4312 struct hci_cp_io_capability_reply cp;
4314 bacpy(&cp.bdaddr, &ev->bdaddr);
4315 /* Change the IO capability from KeyboardDisplay
4316 * to DisplayYesNo as it is not supported by BT spec. */
4317 cp.capability = (conn->io_capability == 0x04) ?
4318 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4320 /* If we are initiators, there is no remote information yet */
4321 if (conn->remote_auth == 0xff) {
4322 /* Request MITM protection if our IO caps allow it
4323 * except for the no-bonding case.
4325 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4326 conn->auth_type != HCI_AT_NO_BONDING)
4327 conn->auth_type |= 0x01;
4328 } else {
4329 conn->auth_type = hci_get_auth_req(conn);
4332 /* If we're not bondable, force one of the non-bondable
4333 * authentication requirement values.
4335 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4336 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4338 cp.authentication = conn->auth_type;
4339 cp.oob_data = bredr_oob_data_present(conn);
4341 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4342 sizeof(cp), &cp);
4343 } else {
4344 struct hci_cp_io_capability_neg_reply cp;
4346 bacpy(&cp.bdaddr, &ev->bdaddr);
4347 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4349 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4350 sizeof(cp), &cp);
4353 unlock:
4354 hci_dev_unlock(hdev);
4357 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4359 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4360 struct hci_conn *conn;
4362 BT_DBG("%s", hdev->name);
4364 hci_dev_lock(hdev);
4366 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4367 if (!conn)
4368 goto unlock;
4370 conn->remote_cap = ev->capability;
4371 conn->remote_auth = ev->authentication;
4373 unlock:
4374 hci_dev_unlock(hdev);
4377 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4378 struct sk_buff *skb)
4380 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4381 int loc_mitm, rem_mitm, confirm_hint = 0;
4382 struct hci_conn *conn;
4384 BT_DBG("%s", hdev->name);
4386 hci_dev_lock(hdev);
4388 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4389 goto unlock;
4391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4392 if (!conn)
4393 goto unlock;
4395 loc_mitm = (conn->auth_type & 0x01);
4396 rem_mitm = (conn->remote_auth & 0x01);
4398 /* If we require MITM but the remote device can't provide that
4399 * (it has NoInputNoOutput) then reject the confirmation
4400 * request. We check the security level here since it doesn't
4401 * necessarily match conn->auth_type.
4403 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4404 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4405 BT_DBG("Rejecting request: remote device can't provide MITM");
4406 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4407 sizeof(ev->bdaddr), &ev->bdaddr);
4408 goto unlock;
4411 /* If no side requires MITM protection; auto-accept */
4412 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4413 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4415 /* If we're not the initiators request authorization to
4416 * proceed from user space (mgmt_user_confirm with
4417 * confirm_hint set to 1). The exception is if neither
4418 * side had MITM or if the local IO capability is
4419 * NoInputNoOutput, in which case we do auto-accept
4421 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4422 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4423 (loc_mitm || rem_mitm)) {
4424 BT_DBG("Confirming auto-accept as acceptor");
4425 confirm_hint = 1;
4426 goto confirm;
4429 BT_DBG("Auto-accept of user confirmation with %ums delay",
4430 hdev->auto_accept_delay);
4432 if (hdev->auto_accept_delay > 0) {
4433 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4434 queue_delayed_work(conn->hdev->workqueue,
4435 &conn->auto_accept_work, delay);
4436 goto unlock;
4439 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4440 sizeof(ev->bdaddr), &ev->bdaddr);
4441 goto unlock;
4444 confirm:
4445 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4446 le32_to_cpu(ev->passkey), confirm_hint);
4448 unlock:
4449 hci_dev_unlock(hdev);
4452 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4453 struct sk_buff *skb)
4455 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4457 BT_DBG("%s", hdev->name);
4459 if (hci_dev_test_flag(hdev, HCI_MGMT))
4460 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4463 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4464 struct sk_buff *skb)
4466 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4467 struct hci_conn *conn;
4469 BT_DBG("%s", hdev->name);
4471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4472 if (!conn)
4473 return;
4475 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4476 conn->passkey_entered = 0;
4478 if (hci_dev_test_flag(hdev, HCI_MGMT))
4479 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4480 conn->dst_type, conn->passkey_notify,
4481 conn->passkey_entered);
4484 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4486 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4487 struct hci_conn *conn;
4489 BT_DBG("%s", hdev->name);
4491 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4492 if (!conn)
4493 return;
4495 switch (ev->type) {
4496 case HCI_KEYPRESS_STARTED:
4497 conn->passkey_entered = 0;
4498 return;
4500 case HCI_KEYPRESS_ENTERED:
4501 conn->passkey_entered++;
4502 break;
4504 case HCI_KEYPRESS_ERASED:
4505 conn->passkey_entered--;
4506 break;
4508 case HCI_KEYPRESS_CLEARED:
4509 conn->passkey_entered = 0;
4510 break;
4512 case HCI_KEYPRESS_COMPLETED:
4513 return;
4516 if (hci_dev_test_flag(hdev, HCI_MGMT))
4517 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4518 conn->dst_type, conn->passkey_notify,
4519 conn->passkey_entered);
4522 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4523 struct sk_buff *skb)
4525 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4526 struct hci_conn *conn;
4528 BT_DBG("%s", hdev->name);
4530 hci_dev_lock(hdev);
4532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4533 if (!conn)
4534 goto unlock;
4536 /* Reset the authentication requirement to unknown */
4537 conn->remote_auth = 0xff;
4539 /* To avoid duplicate auth_failed events to user space we check
4540 * the HCI_CONN_AUTH_PEND flag which will be set if we
4541 * initiated the authentication. A traditional auth_complete
4542 * event gets always produced as initiator and is also mapped to
4543 * the mgmt_auth_failed event */
4544 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4545 mgmt_auth_failed(conn, ev->status);
4547 hci_conn_drop(conn);
4549 unlock:
4550 hci_dev_unlock(hdev);
4553 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4554 struct sk_buff *skb)
4556 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4557 struct inquiry_entry *ie;
4558 struct hci_conn *conn;
4560 BT_DBG("%s", hdev->name);
4562 hci_dev_lock(hdev);
4564 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4565 if (conn)
4566 memcpy(conn->features[1], ev->features, 8);
4568 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4569 if (ie)
4570 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4572 hci_dev_unlock(hdev);
4575 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4576 struct sk_buff *skb)
4578 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4579 struct oob_data *data;
4581 BT_DBG("%s", hdev->name);
4583 hci_dev_lock(hdev);
4585 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4586 goto unlock;
4588 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4589 if (!data) {
4590 struct hci_cp_remote_oob_data_neg_reply cp;
4592 bacpy(&cp.bdaddr, &ev->bdaddr);
4593 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4594 sizeof(cp), &cp);
4595 goto unlock;
4598 if (bredr_sc_enabled(hdev)) {
4599 struct hci_cp_remote_oob_ext_data_reply cp;
4601 bacpy(&cp.bdaddr, &ev->bdaddr);
4602 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4603 memset(cp.hash192, 0, sizeof(cp.hash192));
4604 memset(cp.rand192, 0, sizeof(cp.rand192));
4605 } else {
4606 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4607 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4609 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4610 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4612 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4613 sizeof(cp), &cp);
4614 } else {
4615 struct hci_cp_remote_oob_data_reply cp;
4617 bacpy(&cp.bdaddr, &ev->bdaddr);
4618 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4619 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4621 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4622 sizeof(cp), &cp);
4625 unlock:
4626 hci_dev_unlock(hdev);
4629 #if IS_ENABLED(CONFIG_BT_HS)
4630 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4632 struct hci_ev_channel_selected *ev = (void *)skb->data;
4633 struct hci_conn *hcon;
4635 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4637 skb_pull(skb, sizeof(*ev));
4639 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4640 if (!hcon)
4641 return;
4643 amp_read_loc_assoc_final_data(hdev, hcon);
4646 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4647 struct sk_buff *skb)
4649 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4650 struct hci_conn *hcon, *bredr_hcon;
4652 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4653 ev->status);
4655 hci_dev_lock(hdev);
4657 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4658 if (!hcon) {
4659 hci_dev_unlock(hdev);
4660 return;
4663 if (ev->status) {
4664 hci_conn_del(hcon);
4665 hci_dev_unlock(hdev);
4666 return;
4669 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4671 hcon->state = BT_CONNECTED;
4672 bacpy(&hcon->dst, &bredr_hcon->dst);
4674 hci_conn_hold(hcon);
4675 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4676 hci_conn_drop(hcon);
4678 hci_debugfs_create_conn(hcon);
4679 hci_conn_add_sysfs(hcon);
4681 amp_physical_cfm(bredr_hcon, hcon);
4683 hci_dev_unlock(hdev);
4686 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4688 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4689 struct hci_conn *hcon;
4690 struct hci_chan *hchan;
4691 struct amp_mgr *mgr;
4693 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4694 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4695 ev->status);
4697 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4698 if (!hcon)
4699 return;
4701 /* Create AMP hchan */
4702 hchan = hci_chan_create(hcon);
4703 if (!hchan)
4704 return;
4706 hchan->handle = le16_to_cpu(ev->handle);
4708 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4710 mgr = hcon->amp_mgr;
4711 if (mgr && mgr->bredr_chan) {
4712 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4714 l2cap_chan_lock(bredr_chan);
4716 bredr_chan->conn->mtu = hdev->block_mtu;
4717 l2cap_logical_cfm(bredr_chan, hchan, 0);
4718 hci_conn_hold(hcon);
4720 l2cap_chan_unlock(bredr_chan);
4724 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4725 struct sk_buff *skb)
4727 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4728 struct hci_chan *hchan;
4730 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4731 le16_to_cpu(ev->handle), ev->status);
4733 if (ev->status)
4734 return;
4736 hci_dev_lock(hdev);
4738 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4739 if (!hchan)
4740 goto unlock;
4742 amp_destroy_logical_link(hchan, ev->reason);
4744 unlock:
4745 hci_dev_unlock(hdev);
4748 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4749 struct sk_buff *skb)
4751 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4752 struct hci_conn *hcon;
4754 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4756 if (ev->status)
4757 return;
4759 hci_dev_lock(hdev);
4761 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4762 if (hcon) {
4763 hcon->state = BT_CLOSED;
4764 hci_conn_del(hcon);
4767 hci_dev_unlock(hdev);
4769 #endif
4771 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4772 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4773 u16 interval, u16 latency, u16 supervision_timeout)
4775 struct hci_conn_params *params;
4776 struct hci_conn *conn;
4777 struct smp_irk *irk;
4778 u8 addr_type;
4780 hci_dev_lock(hdev);
4782 /* All controllers implicitly stop advertising in the event of a
4783 * connection, so ensure that the state bit is cleared.
4785 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4787 conn = hci_lookup_le_connect(hdev);
4788 if (!conn) {
4789 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4790 if (!conn) {
4791 bt_dev_err(hdev, "no memory for new connection");
4792 goto unlock;
4795 conn->dst_type = bdaddr_type;
4797 /* If we didn't have a hci_conn object previously
4798 * but we're in master role this must be something
4799 * initiated using a white list. Since white list based
4800 * connections are not "first class citizens" we don't
4801 * have full tracking of them. Therefore, we go ahead
4802 * with a "best effort" approach of determining the
4803 * initiator address based on the HCI_PRIVACY flag.
4805 if (conn->out) {
4806 conn->resp_addr_type = bdaddr_type;
4807 bacpy(&conn->resp_addr, bdaddr);
4808 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4809 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4810 bacpy(&conn->init_addr, &hdev->rpa);
4811 } else {
4812 hci_copy_identity_address(hdev,
4813 &conn->init_addr,
4814 &conn->init_addr_type);
4817 } else {
4818 cancel_delayed_work(&conn->le_conn_timeout);
4821 if (!conn->out) {
4822 /* Set the responder (our side) address type based on
4823 * the advertising address type.
4825 conn->resp_addr_type = hdev->adv_addr_type;
4826 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4827 /* In case of ext adv, resp_addr will be updated in
4828 * Adv Terminated event.
4830 if (!ext_adv_capable(hdev))
4831 bacpy(&conn->resp_addr, &hdev->random_addr);
4832 } else {
4833 bacpy(&conn->resp_addr, &hdev->bdaddr);
4836 conn->init_addr_type = bdaddr_type;
4837 bacpy(&conn->init_addr, bdaddr);
4839 /* For incoming connections, set the default minimum
4840 * and maximum connection interval. They will be used
4841 * to check if the parameters are in range and if not
4842 * trigger the connection update procedure.
4844 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4845 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4848 /* Lookup the identity address from the stored connection
4849 * address and address type.
4851 * When establishing connections to an identity address, the
4852 * connection procedure will store the resolvable random
4853 * address first. Now if it can be converted back into the
4854 * identity address, start using the identity address from
4855 * now on.
4857 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4858 if (irk) {
4859 bacpy(&conn->dst, &irk->bdaddr);
4860 conn->dst_type = irk->addr_type;
4863 if (status) {
4864 hci_le_conn_failed(conn, status);
4865 goto unlock;
4868 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4869 addr_type = BDADDR_LE_PUBLIC;
4870 else
4871 addr_type = BDADDR_LE_RANDOM;
4873 /* Drop the connection if the device is blocked */
4874 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4875 hci_conn_drop(conn);
4876 goto unlock;
4879 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4880 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4882 conn->sec_level = BT_SECURITY_LOW;
4883 conn->handle = handle;
4884 conn->state = BT_CONFIG;
4886 conn->le_conn_interval = interval;
4887 conn->le_conn_latency = latency;
4888 conn->le_supv_timeout = supervision_timeout;
4890 hci_debugfs_create_conn(conn);
4891 hci_conn_add_sysfs(conn);
4893 if (!status) {
4894 /* The remote features procedure is defined for master
4895 * role only. So only in case of an initiated connection
4896 * request the remote features.
4898 * If the local controller supports slave-initiated features
4899 * exchange, then requesting the remote features in slave
4900 * role is possible. Otherwise just transition into the
4901 * connected state without requesting the remote features.
4903 if (conn->out ||
4904 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4905 struct hci_cp_le_read_remote_features cp;
4907 cp.handle = __cpu_to_le16(conn->handle);
4909 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4910 sizeof(cp), &cp);
4912 hci_conn_hold(conn);
4913 } else {
4914 conn->state = BT_CONNECTED;
4915 hci_connect_cfm(conn, status);
4917 } else {
4918 hci_connect_cfm(conn, status);
4921 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4922 conn->dst_type);
4923 if (params) {
4924 list_del_init(&params->action);
4925 if (params->conn) {
4926 hci_conn_drop(params->conn);
4927 hci_conn_put(params->conn);
4928 params->conn = NULL;
4932 unlock:
4933 hci_update_background_scan(hdev);
4934 hci_dev_unlock(hdev);
4937 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4939 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4941 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4943 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4944 ev->role, le16_to_cpu(ev->handle),
4945 le16_to_cpu(ev->interval),
4946 le16_to_cpu(ev->latency),
4947 le16_to_cpu(ev->supervision_timeout));
4950 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
4951 struct sk_buff *skb)
4953 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
4955 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4957 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4958 ev->role, le16_to_cpu(ev->handle),
4959 le16_to_cpu(ev->interval),
4960 le16_to_cpu(ev->latency),
4961 le16_to_cpu(ev->supervision_timeout));
4964 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
4966 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
4967 struct hci_conn *conn;
4969 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4971 if (ev->status)
4972 return;
4974 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
4975 if (conn) {
4976 struct adv_info *adv_instance;
4978 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
4979 return;
4981 if (!hdev->cur_adv_instance) {
4982 bacpy(&conn->resp_addr, &hdev->random_addr);
4983 return;
4986 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
4987 if (adv_instance)
4988 bacpy(&conn->resp_addr, &adv_instance->random_addr);
4992 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4993 struct sk_buff *skb)
4995 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4996 struct hci_conn *conn;
4998 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5000 if (ev->status)
5001 return;
5003 hci_dev_lock(hdev);
5005 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5006 if (conn) {
5007 conn->le_conn_interval = le16_to_cpu(ev->interval);
5008 conn->le_conn_latency = le16_to_cpu(ev->latency);
5009 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5012 hci_dev_unlock(hdev);
5015 /* This function requires the caller holds hdev->lock */
5016 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5017 bdaddr_t *addr,
5018 u8 addr_type, u8 adv_type,
5019 bdaddr_t *direct_rpa)
5021 struct hci_conn *conn;
5022 struct hci_conn_params *params;
5024 /* If the event is not connectable don't proceed further */
5025 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5026 return NULL;
5028 /* Ignore if the device is blocked */
5029 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5030 return NULL;
5032 /* Most controller will fail if we try to create new connections
5033 * while we have an existing one in slave role.
5035 if (hdev->conn_hash.le_num_slave > 0)
5036 return NULL;
5038 /* If we're not connectable only connect devices that we have in
5039 * our pend_le_conns list.
5041 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5042 addr_type);
5043 if (!params)
5044 return NULL;
5046 if (!params->explicit_connect) {
5047 switch (params->auto_connect) {
5048 case HCI_AUTO_CONN_DIRECT:
5049 /* Only devices advertising with ADV_DIRECT_IND are
5050 * triggering a connection attempt. This is allowing
5051 * incoming connections from slave devices.
5053 if (adv_type != LE_ADV_DIRECT_IND)
5054 return NULL;
5055 break;
5056 case HCI_AUTO_CONN_ALWAYS:
5057 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5058 * are triggering a connection attempt. This means
5059 * that incoming connectioms from slave device are
5060 * accepted and also outgoing connections to slave
5061 * devices are established when found.
5063 break;
5064 default:
5065 return NULL;
5069 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5070 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5071 direct_rpa);
5072 if (!IS_ERR(conn)) {
5073 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5074 * by higher layer that tried to connect, if no then
5075 * store the pointer since we don't really have any
5076 * other owner of the object besides the params that
5077 * triggered it. This way we can abort the connection if
5078 * the parameters get removed and keep the reference
5079 * count consistent once the connection is established.
5082 if (!params->explicit_connect)
5083 params->conn = hci_conn_get(conn);
5085 return conn;
5088 switch (PTR_ERR(conn)) {
5089 case -EBUSY:
5090 /* If hci_connect() returns -EBUSY it means there is already
5091 * an LE connection attempt going on. Since controllers don't
5092 * support more than one connection attempt at the time, we
5093 * don't consider this an error case.
5095 break;
5096 default:
5097 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5098 return NULL;
5101 return NULL;
5104 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5105 u8 bdaddr_type, bdaddr_t *direct_addr,
5106 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5108 struct discovery_state *d = &hdev->discovery;
5109 struct smp_irk *irk;
5110 struct hci_conn *conn;
5111 bool match;
5112 u32 flags;
5113 u8 *ptr, real_len;
5115 switch (type) {
5116 case LE_ADV_IND:
5117 case LE_ADV_DIRECT_IND:
5118 case LE_ADV_SCAN_IND:
5119 case LE_ADV_NONCONN_IND:
5120 case LE_ADV_SCAN_RSP:
5121 break;
5122 default:
5123 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5124 "type: 0x%02x", type);
5125 return;
5128 /* Find the end of the data in case the report contains padded zero
5129 * bytes at the end causing an invalid length value.
5131 * When data is NULL, len is 0 so there is no need for extra ptr
5132 * check as 'ptr < data + 0' is already false in such case.
5134 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5135 if (ptr + 1 + *ptr > data + len)
5136 break;
5139 real_len = ptr - data;
5141 /* Adjust for actual length */
5142 if (len != real_len) {
5143 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5144 len = real_len;
5147 /* If the direct address is present, then this report is from
5148 * a LE Direct Advertising Report event. In that case it is
5149 * important to see if the address is matching the local
5150 * controller address.
5152 if (direct_addr) {
5153 /* Only resolvable random addresses are valid for these
5154 * kind of reports and others can be ignored.
5156 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5157 return;
5159 /* If the controller is not using resolvable random
5160 * addresses, then this report can be ignored.
5162 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5163 return;
5165 /* If the local IRK of the controller does not match
5166 * with the resolvable random address provided, then
5167 * this report can be ignored.
5169 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5170 return;
5173 /* Check if we need to convert to identity address */
5174 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5175 if (irk) {
5176 bdaddr = &irk->bdaddr;
5177 bdaddr_type = irk->addr_type;
5180 /* Check if we have been requested to connect to this device.
5182 * direct_addr is set only for directed advertising reports (it is NULL
5183 * for advertising reports) and is already verified to be RPA above.
5185 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5186 direct_addr);
5187 if (conn && type == LE_ADV_IND) {
5188 /* Store report for later inclusion by
5189 * mgmt_device_connected
5191 memcpy(conn->le_adv_data, data, len);
5192 conn->le_adv_data_len = len;
5195 /* Passive scanning shouldn't trigger any device found events,
5196 * except for devices marked as CONN_REPORT for which we do send
5197 * device found events.
5199 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5200 if (type == LE_ADV_DIRECT_IND)
5201 return;
5203 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5204 bdaddr, bdaddr_type))
5205 return;
5207 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5208 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5209 else
5210 flags = 0;
5211 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5212 rssi, flags, data, len, NULL, 0);
5213 return;
5216 /* When receiving non-connectable or scannable undirected
5217 * advertising reports, this means that the remote device is
5218 * not connectable and then clearly indicate this in the
5219 * device found event.
5221 * When receiving a scan response, then there is no way to
5222 * know if the remote device is connectable or not. However
5223 * since scan responses are merged with a previously seen
5224 * advertising report, the flags field from that report
5225 * will be used.
5227 * In the really unlikely case that a controller get confused
5228 * and just sends a scan response event, then it is marked as
5229 * not connectable as well.
5231 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5232 type == LE_ADV_SCAN_RSP)
5233 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5234 else
5235 flags = 0;
5237 /* If there's nothing pending either store the data from this
5238 * event or send an immediate device found event if the data
5239 * should not be stored for later.
5241 if (!has_pending_adv_report(hdev)) {
5242 /* If the report will trigger a SCAN_REQ store it for
5243 * later merging.
5245 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5246 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5247 rssi, flags, data, len);
5248 return;
5251 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5252 rssi, flags, data, len, NULL, 0);
5253 return;
5256 /* Check if the pending report is for the same device as the new one */
5257 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5258 bdaddr_type == d->last_adv_addr_type);
5260 /* If the pending data doesn't match this report or this isn't a
5261 * scan response (e.g. we got a duplicate ADV_IND) then force
5262 * sending of the pending data.
5264 if (type != LE_ADV_SCAN_RSP || !match) {
5265 /* Send out whatever is in the cache, but skip duplicates */
5266 if (!match)
5267 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5268 d->last_adv_addr_type, NULL,
5269 d->last_adv_rssi, d->last_adv_flags,
5270 d->last_adv_data,
5271 d->last_adv_data_len, NULL, 0);
5273 /* If the new report will trigger a SCAN_REQ store it for
5274 * later merging.
5276 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5277 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5278 rssi, flags, data, len);
5279 return;
5282 /* The advertising reports cannot be merged, so clear
5283 * the pending report and send out a device found event.
5285 clear_pending_adv_report(hdev);
5286 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5287 rssi, flags, data, len, NULL, 0);
5288 return;
5291 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5292 * the new event is a SCAN_RSP. We can therefore proceed with
5293 * sending a merged device found event.
5295 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5296 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5297 d->last_adv_data, d->last_adv_data_len, data, len);
5298 clear_pending_adv_report(hdev);
5301 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5303 u8 num_reports = skb->data[0];
5304 void *ptr = &skb->data[1];
5306 hci_dev_lock(hdev);
5308 while (num_reports--) {
5309 struct hci_ev_le_advertising_info *ev = ptr;
5310 s8 rssi;
5312 if (ev->length <= HCI_MAX_AD_LENGTH) {
5313 rssi = ev->data[ev->length];
5314 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5315 ev->bdaddr_type, NULL, 0, rssi,
5316 ev->data, ev->length);
5317 } else {
5318 bt_dev_err(hdev, "Dropping invalid advertising data");
5321 ptr += sizeof(*ev) + ev->length + 1;
5324 hci_dev_unlock(hdev);
5327 static u8 ext_evt_type_to_legacy(u16 evt_type)
5329 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5330 switch (evt_type) {
5331 case LE_LEGACY_ADV_IND:
5332 return LE_ADV_IND;
5333 case LE_LEGACY_ADV_DIRECT_IND:
5334 return LE_ADV_DIRECT_IND;
5335 case LE_LEGACY_ADV_SCAN_IND:
5336 return LE_ADV_SCAN_IND;
5337 case LE_LEGACY_NONCONN_IND:
5338 return LE_ADV_NONCONN_IND;
5339 case LE_LEGACY_SCAN_RSP_ADV:
5340 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5341 return LE_ADV_SCAN_RSP;
5344 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5345 evt_type);
5347 return LE_ADV_INVALID;
5350 if (evt_type & LE_EXT_ADV_CONN_IND) {
5351 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5352 return LE_ADV_DIRECT_IND;
5354 return LE_ADV_IND;
5357 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5358 return LE_ADV_SCAN_RSP;
5360 if (evt_type & LE_EXT_ADV_SCAN_IND)
5361 return LE_ADV_SCAN_IND;
5363 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5364 evt_type & LE_EXT_ADV_DIRECT_IND)
5365 return LE_ADV_NONCONN_IND;
5367 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5368 evt_type);
5370 return LE_ADV_INVALID;
5373 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5375 u8 num_reports = skb->data[0];
5376 void *ptr = &skb->data[1];
5378 hci_dev_lock(hdev);
5380 while (num_reports--) {
5381 struct hci_ev_le_ext_adv_report *ev = ptr;
5382 u8 legacy_evt_type;
5383 u16 evt_type;
5385 evt_type = __le16_to_cpu(ev->evt_type);
5386 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5387 if (legacy_evt_type != LE_ADV_INVALID) {
5388 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5389 ev->bdaddr_type, NULL, 0, ev->rssi,
5390 ev->data, ev->length);
5393 ptr += sizeof(*ev) + ev->length + 1;
5396 hci_dev_unlock(hdev);
5399 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5400 struct sk_buff *skb)
5402 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5403 struct hci_conn *conn;
5405 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5407 hci_dev_lock(hdev);
5409 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5410 if (conn) {
5411 if (!ev->status)
5412 memcpy(conn->features[0], ev->features, 8);
5414 if (conn->state == BT_CONFIG) {
5415 __u8 status;
5417 /* If the local controller supports slave-initiated
5418 * features exchange, but the remote controller does
5419 * not, then it is possible that the error code 0x1a
5420 * for unsupported remote feature gets returned.
5422 * In this specific case, allow the connection to
5423 * transition into connected state and mark it as
5424 * successful.
5426 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5427 !conn->out && ev->status == 0x1a)
5428 status = 0x00;
5429 else
5430 status = ev->status;
5432 conn->state = BT_CONNECTED;
5433 hci_connect_cfm(conn, status);
5434 hci_conn_drop(conn);
5438 hci_dev_unlock(hdev);
5441 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5443 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5444 struct hci_cp_le_ltk_reply cp;
5445 struct hci_cp_le_ltk_neg_reply neg;
5446 struct hci_conn *conn;
5447 struct smp_ltk *ltk;
5449 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5451 hci_dev_lock(hdev);
5453 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5454 if (conn == NULL)
5455 goto not_found;
5457 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5458 if (!ltk)
5459 goto not_found;
5461 if (smp_ltk_is_sc(ltk)) {
5462 /* With SC both EDiv and Rand are set to zero */
5463 if (ev->ediv || ev->rand)
5464 goto not_found;
5465 } else {
5466 /* For non-SC keys check that EDiv and Rand match */
5467 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5468 goto not_found;
5471 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5472 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5473 cp.handle = cpu_to_le16(conn->handle);
5475 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5477 conn->enc_key_size = ltk->enc_size;
5479 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5481 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5482 * temporary key used to encrypt a connection following
5483 * pairing. It is used during the Encrypted Session Setup to
5484 * distribute the keys. Later, security can be re-established
5485 * using a distributed LTK.
5487 if (ltk->type == SMP_STK) {
5488 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5489 list_del_rcu(&ltk->list);
5490 kfree_rcu(ltk, rcu);
5491 } else {
5492 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5495 hci_dev_unlock(hdev);
5497 return;
5499 not_found:
5500 neg.handle = ev->handle;
5501 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5502 hci_dev_unlock(hdev);
5505 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5506 u8 reason)
5508 struct hci_cp_le_conn_param_req_neg_reply cp;
5510 cp.handle = cpu_to_le16(handle);
5511 cp.reason = reason;
5513 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5514 &cp);
5517 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5518 struct sk_buff *skb)
5520 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5521 struct hci_cp_le_conn_param_req_reply cp;
5522 struct hci_conn *hcon;
5523 u16 handle, min, max, latency, timeout;
5525 handle = le16_to_cpu(ev->handle);
5526 min = le16_to_cpu(ev->interval_min);
5527 max = le16_to_cpu(ev->interval_max);
5528 latency = le16_to_cpu(ev->latency);
5529 timeout = le16_to_cpu(ev->timeout);
5531 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5532 if (!hcon || hcon->state != BT_CONNECTED)
5533 return send_conn_param_neg_reply(hdev, handle,
5534 HCI_ERROR_UNKNOWN_CONN_ID);
5536 if (hci_check_conn_params(min, max, latency, timeout))
5537 return send_conn_param_neg_reply(hdev, handle,
5538 HCI_ERROR_INVALID_LL_PARAMS);
5540 if (hcon->role == HCI_ROLE_MASTER) {
5541 struct hci_conn_params *params;
5542 u8 store_hint;
5544 hci_dev_lock(hdev);
5546 params = hci_conn_params_lookup(hdev, &hcon->dst,
5547 hcon->dst_type);
5548 if (params) {
5549 params->conn_min_interval = min;
5550 params->conn_max_interval = max;
5551 params->conn_latency = latency;
5552 params->supervision_timeout = timeout;
5553 store_hint = 0x01;
5554 } else{
5555 store_hint = 0x00;
5558 hci_dev_unlock(hdev);
5560 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5561 store_hint, min, max, latency, timeout);
5564 cp.handle = ev->handle;
5565 cp.interval_min = ev->interval_min;
5566 cp.interval_max = ev->interval_max;
5567 cp.latency = ev->latency;
5568 cp.timeout = ev->timeout;
5569 cp.min_ce_len = 0;
5570 cp.max_ce_len = 0;
5572 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5575 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5576 struct sk_buff *skb)
5578 u8 num_reports = skb->data[0];
5579 void *ptr = &skb->data[1];
5581 hci_dev_lock(hdev);
5583 while (num_reports--) {
5584 struct hci_ev_le_direct_adv_info *ev = ptr;
5586 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5587 ev->bdaddr_type, &ev->direct_addr,
5588 ev->direct_addr_type, ev->rssi, NULL, 0);
5590 ptr += sizeof(*ev);
5593 hci_dev_unlock(hdev);
5596 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5598 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5600 skb_pull(skb, sizeof(*le_ev));
5602 switch (le_ev->subevent) {
5603 case HCI_EV_LE_CONN_COMPLETE:
5604 hci_le_conn_complete_evt(hdev, skb);
5605 break;
5607 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5608 hci_le_conn_update_complete_evt(hdev, skb);
5609 break;
5611 case HCI_EV_LE_ADVERTISING_REPORT:
5612 hci_le_adv_report_evt(hdev, skb);
5613 break;
5615 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5616 hci_le_remote_feat_complete_evt(hdev, skb);
5617 break;
5619 case HCI_EV_LE_LTK_REQ:
5620 hci_le_ltk_request_evt(hdev, skb);
5621 break;
5623 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5624 hci_le_remote_conn_param_req_evt(hdev, skb);
5625 break;
5627 case HCI_EV_LE_DIRECT_ADV_REPORT:
5628 hci_le_direct_adv_report_evt(hdev, skb);
5629 break;
5631 case HCI_EV_LE_EXT_ADV_REPORT:
5632 hci_le_ext_adv_report_evt(hdev, skb);
5633 break;
5635 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5636 hci_le_enh_conn_complete_evt(hdev, skb);
5637 break;
5639 case HCI_EV_LE_EXT_ADV_SET_TERM:
5640 hci_le_ext_adv_term_evt(hdev, skb);
5641 break;
5643 default:
5644 break;
5648 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5649 u8 event, struct sk_buff *skb)
5651 struct hci_ev_cmd_complete *ev;
5652 struct hci_event_hdr *hdr;
5654 if (!skb)
5655 return false;
5657 if (skb->len < sizeof(*hdr)) {
5658 bt_dev_err(hdev, "too short HCI event");
5659 return false;
5662 hdr = (void *) skb->data;
5663 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5665 if (event) {
5666 if (hdr->evt != event)
5667 return false;
5668 return true;
5671 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5672 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5673 hdr->evt);
5674 return false;
5677 if (skb->len < sizeof(*ev)) {
5678 bt_dev_err(hdev, "too short cmd_complete event");
5679 return false;
5682 ev = (void *) skb->data;
5683 skb_pull(skb, sizeof(*ev));
5685 if (opcode != __le16_to_cpu(ev->opcode)) {
5686 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5687 __le16_to_cpu(ev->opcode));
5688 return false;
5691 return true;
5694 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5696 struct hci_event_hdr *hdr = (void *) skb->data;
5697 hci_req_complete_t req_complete = NULL;
5698 hci_req_complete_skb_t req_complete_skb = NULL;
5699 struct sk_buff *orig_skb = NULL;
5700 u8 status = 0, event = hdr->evt, req_evt = 0;
5701 u16 opcode = HCI_OP_NOP;
5703 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5704 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5705 opcode = __le16_to_cpu(cmd_hdr->opcode);
5706 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5707 &req_complete_skb);
5708 req_evt = event;
5711 /* If it looks like we might end up having to call
5712 * req_complete_skb, store a pristine copy of the skb since the
5713 * various handlers may modify the original one through
5714 * skb_pull() calls, etc.
5716 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5717 event == HCI_EV_CMD_COMPLETE)
5718 orig_skb = skb_clone(skb, GFP_KERNEL);
5720 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5722 switch (event) {
5723 case HCI_EV_INQUIRY_COMPLETE:
5724 hci_inquiry_complete_evt(hdev, skb);
5725 break;
5727 case HCI_EV_INQUIRY_RESULT:
5728 hci_inquiry_result_evt(hdev, skb);
5729 break;
5731 case HCI_EV_CONN_COMPLETE:
5732 hci_conn_complete_evt(hdev, skb);
5733 break;
5735 case HCI_EV_CONN_REQUEST:
5736 hci_conn_request_evt(hdev, skb);
5737 break;
5739 case HCI_EV_DISCONN_COMPLETE:
5740 hci_disconn_complete_evt(hdev, skb);
5741 break;
5743 case HCI_EV_AUTH_COMPLETE:
5744 hci_auth_complete_evt(hdev, skb);
5745 break;
5747 case HCI_EV_REMOTE_NAME:
5748 hci_remote_name_evt(hdev, skb);
5749 break;
5751 case HCI_EV_ENCRYPT_CHANGE:
5752 hci_encrypt_change_evt(hdev, skb);
5753 break;
5755 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5756 hci_change_link_key_complete_evt(hdev, skb);
5757 break;
5759 case HCI_EV_REMOTE_FEATURES:
5760 hci_remote_features_evt(hdev, skb);
5761 break;
5763 case HCI_EV_CMD_COMPLETE:
5764 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5765 &req_complete, &req_complete_skb);
5766 break;
5768 case HCI_EV_CMD_STATUS:
5769 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5770 &req_complete_skb);
5771 break;
5773 case HCI_EV_HARDWARE_ERROR:
5774 hci_hardware_error_evt(hdev, skb);
5775 break;
5777 case HCI_EV_ROLE_CHANGE:
5778 hci_role_change_evt(hdev, skb);
5779 break;
5781 case HCI_EV_NUM_COMP_PKTS:
5782 hci_num_comp_pkts_evt(hdev, skb);
5783 break;
5785 case HCI_EV_MODE_CHANGE:
5786 hci_mode_change_evt(hdev, skb);
5787 break;
5789 case HCI_EV_PIN_CODE_REQ:
5790 hci_pin_code_request_evt(hdev, skb);
5791 break;
5793 case HCI_EV_LINK_KEY_REQ:
5794 hci_link_key_request_evt(hdev, skb);
5795 break;
5797 case HCI_EV_LINK_KEY_NOTIFY:
5798 hci_link_key_notify_evt(hdev, skb);
5799 break;
5801 case HCI_EV_CLOCK_OFFSET:
5802 hci_clock_offset_evt(hdev, skb);
5803 break;
5805 case HCI_EV_PKT_TYPE_CHANGE:
5806 hci_pkt_type_change_evt(hdev, skb);
5807 break;
5809 case HCI_EV_PSCAN_REP_MODE:
5810 hci_pscan_rep_mode_evt(hdev, skb);
5811 break;
5813 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5814 hci_inquiry_result_with_rssi_evt(hdev, skb);
5815 break;
5817 case HCI_EV_REMOTE_EXT_FEATURES:
5818 hci_remote_ext_features_evt(hdev, skb);
5819 break;
5821 case HCI_EV_SYNC_CONN_COMPLETE:
5822 hci_sync_conn_complete_evt(hdev, skb);
5823 break;
5825 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5826 hci_extended_inquiry_result_evt(hdev, skb);
5827 break;
5829 case HCI_EV_KEY_REFRESH_COMPLETE:
5830 hci_key_refresh_complete_evt(hdev, skb);
5831 break;
5833 case HCI_EV_IO_CAPA_REQUEST:
5834 hci_io_capa_request_evt(hdev, skb);
5835 break;
5837 case HCI_EV_IO_CAPA_REPLY:
5838 hci_io_capa_reply_evt(hdev, skb);
5839 break;
5841 case HCI_EV_USER_CONFIRM_REQUEST:
5842 hci_user_confirm_request_evt(hdev, skb);
5843 break;
5845 case HCI_EV_USER_PASSKEY_REQUEST:
5846 hci_user_passkey_request_evt(hdev, skb);
5847 break;
5849 case HCI_EV_USER_PASSKEY_NOTIFY:
5850 hci_user_passkey_notify_evt(hdev, skb);
5851 break;
5853 case HCI_EV_KEYPRESS_NOTIFY:
5854 hci_keypress_notify_evt(hdev, skb);
5855 break;
5857 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5858 hci_simple_pair_complete_evt(hdev, skb);
5859 break;
5861 case HCI_EV_REMOTE_HOST_FEATURES:
5862 hci_remote_host_features_evt(hdev, skb);
5863 break;
5865 case HCI_EV_LE_META:
5866 hci_le_meta_evt(hdev, skb);
5867 break;
5869 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5870 hci_remote_oob_data_request_evt(hdev, skb);
5871 break;
5873 #if IS_ENABLED(CONFIG_BT_HS)
5874 case HCI_EV_CHANNEL_SELECTED:
5875 hci_chan_selected_evt(hdev, skb);
5876 break;
5878 case HCI_EV_PHY_LINK_COMPLETE:
5879 hci_phy_link_complete_evt(hdev, skb);
5880 break;
5882 case HCI_EV_LOGICAL_LINK_COMPLETE:
5883 hci_loglink_complete_evt(hdev, skb);
5884 break;
5886 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5887 hci_disconn_loglink_complete_evt(hdev, skb);
5888 break;
5890 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5891 hci_disconn_phylink_complete_evt(hdev, skb);
5892 break;
5893 #endif
5895 case HCI_EV_NUM_COMP_BLOCKS:
5896 hci_num_comp_blocks_evt(hdev, skb);
5897 break;
5899 default:
5900 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5901 break;
5904 if (req_complete) {
5905 req_complete(hdev, status, opcode);
5906 } else if (req_complete_skb) {
5907 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5908 kfree_skb(orig_skb);
5909 orig_skb = NULL;
5911 req_complete_skb(hdev, status, opcode, orig_skb);
5914 kfree_skb(orig_skb);
5915 kfree_skb(skb);
5916 hdev->stat.evt_rx++;