mm/zsmalloc: allocate exactly size of struct zs_pool
[linux/fpc-iii.git] / net / bluetooth / hci_event.c
blob322abbbbcef991a36adaaf0750c723ceebba3147
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
37 /* Handle HCI Event packets */
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
41 __u8 status = *((__u8 *) skb->data);
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
45 if (status)
46 return;
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
56 hci_conn_check_pending(hdev);
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
61 __u8 status = *((__u8 *) skb->data);
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
65 if (status)
66 return;
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 __u8 status = *((__u8 *) skb->data);
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
77 if (status)
78 return;
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82 hci_conn_check_pending(hdev);
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
88 BT_DBG("%s", hdev->name);
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
98 if (rp->status)
99 return;
101 hci_dev_lock(hdev);
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
107 hci_dev_unlock(hdev);
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
117 if (rp->status)
118 return;
120 hci_dev_lock(hdev);
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
126 hci_dev_unlock(hdev);
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
137 if (rp->status)
138 return;
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
144 hci_dev_lock(hdev);
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
150 hci_dev_unlock(hdev);
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
160 if (rp->status)
161 return;
163 hdev->link_policy = __le16_to_cpu(rp->policy);
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
174 if (status)
175 return;
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
181 hdev->link_policy = get_unaligned_le16(sent);
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 __u8 status = *((__u8 *) skb->data);
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
190 clear_bit(HCI_RESET, &hdev->flags);
192 if (status)
193 return;
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
210 hdev->ssp_debug_mode = 0;
212 hci_bdaddr_list_clear(&hdev->le_white_list);
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
226 hci_dev_lock(hdev);
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
233 hci_dev_unlock(hdev);
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
242 if (rp->status)
243 return;
245 if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
251 __u8 status = *((__u8 *) skb->data);
252 void *sent;
254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
256 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
257 if (!sent)
258 return;
260 if (!status) {
261 __u8 param = *((__u8 *) sent);
263 if (param == AUTH_ENABLED)
264 set_bit(HCI_AUTH, &hdev->flags);
265 else
266 clear_bit(HCI_AUTH, &hdev->flags);
269 if (test_bit(HCI_MGMT, &hdev->dev_flags))
270 mgmt_auth_enable_complete(hdev, status);
273 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
275 __u8 status = *((__u8 *) skb->data);
276 __u8 param;
277 void *sent;
279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
281 if (status)
282 return;
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
288 param = *((__u8 *) sent);
290 if (param)
291 set_bit(HCI_ENCRYPT, &hdev->flags);
292 else
293 clear_bit(HCI_ENCRYPT, &hdev->flags);
296 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
299 __u8 param;
300 void *sent;
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
305 if (!sent)
306 return;
308 param = *((__u8 *) sent);
310 hci_dev_lock(hdev);
312 if (status) {
313 hdev->discov_timeout = 0;
314 goto done;
317 if (param & SCAN_INQUIRY)
318 set_bit(HCI_ISCAN, &hdev->flags);
319 else
320 clear_bit(HCI_ISCAN, &hdev->flags);
322 if (param & SCAN_PAGE)
323 set_bit(HCI_PSCAN, &hdev->flags);
324 else
325 clear_bit(HCI_PSCAN, &hdev->flags);
327 done:
328 hci_dev_unlock(hdev);
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337 if (rp->status)
338 return;
340 memcpy(hdev->dev_class, rp->dev_class, 3);
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354 if (!sent)
355 return;
357 hci_dev_lock(hdev);
359 if (status == 0)
360 memcpy(hdev->dev_class, sent, 3);
362 if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 mgmt_set_class_of_dev_complete(hdev, sent, status);
365 hci_dev_unlock(hdev);
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371 __u16 setting;
373 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375 if (rp->status)
376 return;
378 setting = __le16_to_cpu(rp->voice_setting);
380 if (hdev->voice_setting == setting)
381 return;
383 hdev->voice_setting = setting;
385 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387 if (hdev->notify)
388 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392 struct sk_buff *skb)
394 __u8 status = *((__u8 *) skb->data);
395 __u16 setting;
396 void *sent;
398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400 if (status)
401 return;
403 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404 if (!sent)
405 return;
407 setting = get_unaligned_le16(sent);
409 if (hdev->voice_setting == setting)
410 return;
412 hdev->voice_setting = setting;
414 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416 if (hdev->notify)
417 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
421 struct sk_buff *skb)
423 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427 if (rp->status)
428 return;
430 hdev->num_iac = rp->num_iac;
432 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 __u8 status = *((__u8 *) skb->data);
438 struct hci_cp_write_ssp_mode *sent;
440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 if (!sent)
444 return;
446 if (!status) {
447 if (sent->mode)
448 hdev->features[1][0] |= LMP_HOST_SSP;
449 else
450 hdev->features[1][0] &= ~LMP_HOST_SSP;
453 if (test_bit(HCI_MGMT, &hdev->dev_flags))
454 mgmt_ssp_enable_complete(hdev, sent->mode, status);
455 else if (!status) {
456 if (sent->mode)
457 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 else
459 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
465 u8 status = *((u8 *) skb->data);
466 struct hci_cp_write_sc_support *sent;
468 BT_DBG("%s status 0x%2.2x", hdev->name, status);
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471 if (!sent)
472 return;
474 if (!status) {
475 if (sent->support)
476 hdev->features[1][0] |= LMP_HOST_SC;
477 else
478 hdev->features[1][0] &= ~LMP_HOST_SC;
481 if (test_bit(HCI_MGMT, &hdev->dev_flags))
482 mgmt_sc_enable_complete(hdev, sent->support, status);
483 else if (!status) {
484 if (sent->support)
485 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 else
487 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
491 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
493 struct hci_rp_read_local_version *rp = (void *) skb->data;
495 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
497 if (rp->status)
498 return;
500 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501 hdev->hci_ver = rp->hci_ver;
502 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503 hdev->lmp_ver = rp->lmp_ver;
504 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
509 static void hci_cc_read_local_commands(struct hci_dev *hdev,
510 struct sk_buff *skb)
512 struct hci_rp_read_local_commands *rp = (void *) skb->data;
514 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516 if (rp->status)
517 return;
519 if (test_bit(HCI_SETUP, &hdev->dev_flags))
520 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
523 static void hci_cc_read_local_features(struct hci_dev *hdev,
524 struct sk_buff *skb)
526 struct hci_rp_read_local_features *rp = (void *) skb->data;
528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
530 if (rp->status)
531 return;
533 memcpy(hdev->features, rp->features, 8);
535 /* Adjust default settings according to features
536 * supported by device. */
538 if (hdev->features[0][0] & LMP_3SLOT)
539 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
541 if (hdev->features[0][0] & LMP_5SLOT)
542 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
544 if (hdev->features[0][1] & LMP_HV2) {
545 hdev->pkt_type |= (HCI_HV2);
546 hdev->esco_type |= (ESCO_HV2);
549 if (hdev->features[0][1] & LMP_HV3) {
550 hdev->pkt_type |= (HCI_HV3);
551 hdev->esco_type |= (ESCO_HV3);
554 if (lmp_esco_capable(hdev))
555 hdev->esco_type |= (ESCO_EV3);
557 if (hdev->features[0][4] & LMP_EV4)
558 hdev->esco_type |= (ESCO_EV4);
560 if (hdev->features[0][4] & LMP_EV5)
561 hdev->esco_type |= (ESCO_EV5);
563 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 hdev->esco_type |= (ESCO_2EV3);
566 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 hdev->esco_type |= (ESCO_3EV3);
569 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574 struct sk_buff *skb)
576 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
578 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
580 if (rp->status)
581 return;
583 if (hdev->max_page < rp->max_page)
584 hdev->max_page = rp->max_page;
586 if (rp->page < HCI_MAX_PAGES)
587 memcpy(hdev->features[rp->page], rp->features, 8);
590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591 struct sk_buff *skb)
593 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
597 if (rp->status)
598 return;
600 hdev->flow_ctl_mode = rp->mode;
603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
605 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
609 if (rp->status)
610 return;
612 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
613 hdev->sco_mtu = rp->sco_mtu;
614 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
617 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618 hdev->sco_mtu = 64;
619 hdev->sco_pkts = 8;
622 hdev->acl_cnt = hdev->acl_pkts;
623 hdev->sco_cnt = hdev->sco_pkts;
625 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
631 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
633 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635 if (rp->status)
636 return;
638 if (test_bit(HCI_INIT, &hdev->flags))
639 bacpy(&hdev->bdaddr, &rp->bdaddr);
641 if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 bacpy(&hdev->setup_addr, &rp->bdaddr);
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 struct sk_buff *skb)
648 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
652 if (rp->status)
653 return;
655 if (test_bit(HCI_INIT, &hdev->flags)) {
656 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 hdev->page_scan_window = __le16_to_cpu(rp->window);
661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662 struct sk_buff *skb)
664 u8 status = *((u8 *) skb->data);
665 struct hci_cp_write_page_scan_activity *sent;
667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
669 if (status)
670 return;
672 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673 if (!sent)
674 return;
676 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 hdev->page_scan_window = __le16_to_cpu(sent->window);
680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681 struct sk_buff *skb)
683 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
685 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687 if (rp->status)
688 return;
690 if (test_bit(HCI_INIT, &hdev->flags))
691 hdev->page_scan_type = rp->type;
694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695 struct sk_buff *skb)
697 u8 status = *((u8 *) skb->data);
698 u8 *type;
700 BT_DBG("%s status 0x%2.2x", hdev->name, status);
702 if (status)
703 return;
705 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 if (type)
707 hdev->page_scan_type = *type;
710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711 struct sk_buff *skb)
713 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
717 if (rp->status)
718 return;
720 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 hdev->block_len = __le16_to_cpu(rp->block_len);
722 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
724 hdev->block_cnt = hdev->num_blocks;
726 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 hdev->block_cnt, hdev->block_len);
730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
732 struct hci_rp_read_clock *rp = (void *) skb->data;
733 struct hci_cp_read_clock *cp;
734 struct hci_conn *conn;
736 BT_DBG("%s", hdev->name);
738 if (skb->len < sizeof(*rp))
739 return;
741 if (rp->status)
742 return;
744 hci_dev_lock(hdev);
746 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747 if (!cp)
748 goto unlock;
750 if (cp->which == 0x00) {
751 hdev->clock = le32_to_cpu(rp->clock);
752 goto unlock;
755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 if (conn) {
757 conn->clock = le32_to_cpu(rp->clock);
758 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
761 unlock:
762 hci_dev_unlock(hdev);
765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766 struct sk_buff *skb)
768 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772 if (rp->status)
773 goto a2mp_rsp;
775 hdev->amp_status = rp->amp_status;
776 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 hdev->amp_type = rp->amp_type;
781 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
786 a2mp_rsp:
787 a2mp_send_getinfo_rsp(hdev);
790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791 struct sk_buff *skb)
793 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 struct amp_assoc *assoc = &hdev->loc_assoc;
795 size_t rem_len, frag_len;
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
799 if (rp->status)
800 goto a2mp_rsp;
802 frag_len = skb->len - sizeof(*rp);
803 rem_len = __le16_to_cpu(rp->rem_len);
805 if (rem_len > frag_len) {
806 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
808 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 assoc->offset += frag_len;
811 /* Read other fragments */
812 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
814 return;
817 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 assoc->len = assoc->offset + rem_len;
819 assoc->offset = 0;
821 a2mp_rsp:
822 /* Send A2MP Rsp when all fragments are received */
823 a2mp_send_getampassoc_rsp(hdev, rp->status);
824 a2mp_send_create_phy_link_req(hdev, rp->status);
827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828 struct sk_buff *skb)
830 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834 if (rp->status)
835 return;
837 hdev->inq_tx_power = rp->tx_power;
840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
842 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 struct hci_cp_pin_code_reply *cp;
844 struct hci_conn *conn;
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848 hci_dev_lock(hdev);
850 if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
853 if (rp->status)
854 goto unlock;
856 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857 if (!cp)
858 goto unlock;
860 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 if (conn)
862 conn->pin_length = cp->pin_len;
864 unlock:
865 hci_dev_unlock(hdev);
868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
870 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
872 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
874 hci_dev_lock(hdev);
876 if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878 rp->status);
880 hci_dev_unlock(hdev);
883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884 struct sk_buff *skb)
886 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
888 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890 if (rp->status)
891 return;
893 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 hdev->le_pkts = rp->le_max_pkt;
896 hdev->le_cnt = hdev->le_pkts;
898 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902 struct sk_buff *skb)
904 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908 if (rp->status)
909 return;
911 memcpy(hdev->le_features, rp->features, 8);
914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915 struct sk_buff *skb)
917 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
921 if (rp->status)
922 return;
924 hdev->adv_tx_power = rp->tx_power;
927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
929 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
933 hci_dev_lock(hdev);
935 if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937 rp->status);
939 hci_dev_unlock(hdev);
942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943 struct sk_buff *skb)
945 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
949 hci_dev_lock(hdev);
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 ACL_LINK, 0, rp->status);
955 hci_dev_unlock(hdev);
958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
960 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 hci_dev_lock(hdev);
966 if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968 0, rp->status);
970 hci_dev_unlock(hdev);
973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974 struct sk_buff *skb)
976 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
978 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
980 hci_dev_lock(hdev);
982 if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 ACL_LINK, 0, rp->status);
986 hci_dev_unlock(hdev);
989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990 struct sk_buff *skb)
992 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
994 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996 hci_dev_lock(hdev);
997 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
998 rp->status);
999 hci_dev_unlock(hdev);
1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1005 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1009 hci_dev_lock(hdev);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1011 rp->hash256, rp->rand256,
1012 rp->status);
1013 hci_dev_unlock(hdev);
1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1019 __u8 status = *((__u8 *) skb->data);
1020 bdaddr_t *sent;
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024 if (status)
1025 return;
1027 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 if (!sent)
1029 return;
1031 hci_dev_lock(hdev);
1033 bacpy(&hdev->random_addr, sent);
1035 hci_dev_unlock(hdev);
1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1040 __u8 *sent, status = *((__u8 *) skb->data);
1042 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1044 if (status)
1045 return;
1047 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 if (!sent)
1049 return;
1051 hci_dev_lock(hdev);
1053 /* If we're doing connection initiation as peripheral. Set a
1054 * timeout in case something goes wrong.
1056 if (*sent) {
1057 struct hci_conn *conn;
1059 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1061 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 if (conn)
1063 queue_delayed_work(hdev->workqueue,
1064 &conn->le_conn_timeout,
1065 conn->conn_timeout);
1066 } else {
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1070 hci_dev_unlock(hdev);
1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1075 struct hci_cp_le_set_scan_param *cp;
1076 __u8 status = *((__u8 *) skb->data);
1078 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1080 if (status)
1081 return;
1083 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 if (!cp)
1085 return;
1087 hci_dev_lock(hdev);
1089 hdev->le_scan_type = cp->type;
1091 hci_dev_unlock(hdev);
1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1096 struct discovery_state *d = &hdev->discovery;
1098 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1103 struct discovery_state *d = &hdev->discovery;
1105 bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 d->last_adv_data_len = 0;
1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 u8 bdaddr_type, s8 rssi, u32 flags,
1111 u8 *data, u8 len)
1113 struct discovery_state *d = &hdev->discovery;
1115 bacpy(&d->last_adv_addr, bdaddr);
1116 d->last_adv_addr_type = bdaddr_type;
1117 d->last_adv_rssi = rssi;
1118 d->last_adv_flags = flags;
1119 memcpy(d->last_adv_data, data, len);
1120 d->last_adv_data_len = len;
1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 struct sk_buff *skb)
1126 struct hci_cp_le_set_scan_enable *cp;
1127 __u8 status = *((__u8 *) skb->data);
1129 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1131 if (status)
1132 return;
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135 if (!cp)
1136 return;
1138 switch (cp->enable) {
1139 case LE_SCAN_ENABLE:
1140 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 clear_pending_adv_report(hdev);
1143 break;
1145 case LE_SCAN_DISABLE:
1146 /* We do this here instead of when setting DISCOVERY_STOPPED
1147 * since the latter would potentially require waiting for
1148 * inquiry to stop too.
1150 if (has_pending_adv_report(hdev)) {
1151 struct discovery_state *d = &hdev->discovery;
1153 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 d->last_adv_addr_type, NULL,
1155 d->last_adv_rssi, d->last_adv_flags,
1156 d->last_adv_data,
1157 d->last_adv_data_len, NULL, 0);
1160 /* Cancel this timer so that we don't try to disable scanning
1161 * when it's already disabled.
1163 cancel_delayed_work(&hdev->le_scan_disable);
1165 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1167 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 * interrupted scanning due to a connect request. Mark
1169 * therefore discovery as stopped. If this was not
1170 * because of a connect request advertising might have
1171 * been disabled because of active scanning, so
1172 * re-enable it again if necessary.
1174 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 &hdev->dev_flags))
1176 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 hdev->discovery.state == DISCOVERY_FINDING)
1179 mgmt_reenable_advertising(hdev);
1181 break;
1183 default:
1184 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185 break;
1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 struct sk_buff *skb)
1192 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1194 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1196 if (rp->status)
1197 return;
1199 hdev->le_white_list_size = rp->size;
1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 struct sk_buff *skb)
1205 __u8 status = *((__u8 *) skb->data);
1207 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1209 if (status)
1210 return;
1212 hci_bdaddr_list_clear(&hdev->le_white_list);
1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 struct sk_buff *skb)
1218 struct hci_cp_le_add_to_white_list *sent;
1219 __u8 status = *((__u8 *) skb->data);
1221 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1223 if (status)
1224 return;
1226 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227 if (!sent)
1228 return;
1230 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231 sent->bdaddr_type);
1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1237 struct hci_cp_le_del_from_white_list *sent;
1238 __u8 status = *((__u8 *) skb->data);
1240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1242 if (status)
1243 return;
1245 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246 if (!sent)
1247 return;
1249 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250 sent->bdaddr_type);
1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 struct sk_buff *skb)
1256 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1258 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1260 if (rp->status)
1261 return;
1263 memcpy(hdev->le_states, rp->le_states, 8);
1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 struct sk_buff *skb)
1269 struct hci_cp_write_le_host_supported *sent;
1270 __u8 status = *((__u8 *) skb->data);
1272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1274 if (status)
1275 return;
1277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278 if (!sent)
1279 return;
1281 if (sent->le) {
1282 hdev->features[1][0] |= LMP_HOST_LE;
1283 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 } else {
1285 hdev->features[1][0] &= ~LMP_HOST_LE;
1286 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1290 if (sent->simul)
1291 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 else
1293 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1298 struct hci_cp_le_set_adv_param *cp;
1299 u8 status = *((u8 *) skb->data);
1301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303 if (status)
1304 return;
1306 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307 if (!cp)
1308 return;
1310 hci_dev_lock(hdev);
1311 hdev->adv_addr_type = cp->own_address_type;
1312 hci_dev_unlock(hdev);
1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1318 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1320 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 hdev->name, rp->status, rp->phy_handle);
1323 if (rp->status)
1324 return;
1326 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1331 struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 struct hci_conn *conn;
1334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1336 if (rp->status)
1337 return;
1339 hci_dev_lock(hdev);
1341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 if (conn)
1343 conn->rssi = rp->rssi;
1345 hci_dev_unlock(hdev);
1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1350 struct hci_cp_read_tx_power *sent;
1351 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 struct hci_conn *conn;
1354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1356 if (rp->status)
1357 return;
1359 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 if (!sent)
1361 return;
1363 hci_dev_lock(hdev);
1365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366 if (!conn)
1367 goto unlock;
1369 switch (sent->type) {
1370 case 0x00:
1371 conn->tx_power = rp->tx_power;
1372 break;
1373 case 0x01:
1374 conn->max_tx_power = rp->tx_power;
1375 break;
1378 unlock:
1379 hci_dev_unlock(hdev);
1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386 if (status) {
1387 hci_conn_check_pending(hdev);
1388 return;
1391 set_bit(HCI_INQUIRY, &hdev->flags);
1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1396 struct hci_cp_create_conn *cp;
1397 struct hci_conn *conn;
1399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1401 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 if (!cp)
1403 return;
1405 hci_dev_lock(hdev);
1407 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1409 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1411 if (status) {
1412 if (conn && conn->state == BT_CONNECT) {
1413 if (status != 0x0c || conn->attempt > 2) {
1414 conn->state = BT_CLOSED;
1415 hci_proto_connect_cfm(conn, status);
1416 hci_conn_del(conn);
1417 } else
1418 conn->state = BT_CONNECT2;
1420 } else {
1421 if (!conn) {
1422 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1423 HCI_ROLE_MASTER);
1424 if (!conn)
1425 BT_ERR("No memory for new connection");
1429 hci_dev_unlock(hdev);
1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1434 struct hci_cp_add_sco *cp;
1435 struct hci_conn *acl, *sco;
1436 __u16 handle;
1438 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1440 if (!status)
1441 return;
1443 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1444 if (!cp)
1445 return;
1447 handle = __le16_to_cpu(cp->handle);
1449 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1451 hci_dev_lock(hdev);
1453 acl = hci_conn_hash_lookup_handle(hdev, handle);
1454 if (acl) {
1455 sco = acl->link;
1456 if (sco) {
1457 sco->state = BT_CLOSED;
1459 hci_proto_connect_cfm(sco, status);
1460 hci_conn_del(sco);
1464 hci_dev_unlock(hdev);
1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1469 struct hci_cp_auth_requested *cp;
1470 struct hci_conn *conn;
1472 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1474 if (!status)
1475 return;
1477 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 if (!cp)
1479 return;
1481 hci_dev_lock(hdev);
1483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 if (conn) {
1485 if (conn->state == BT_CONFIG) {
1486 hci_proto_connect_cfm(conn, status);
1487 hci_conn_drop(conn);
1491 hci_dev_unlock(hdev);
1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1496 struct hci_cp_set_conn_encrypt *cp;
1497 struct hci_conn *conn;
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1501 if (!status)
1502 return;
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 if (!cp)
1506 return;
1508 hci_dev_lock(hdev);
1510 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1511 if (conn) {
1512 if (conn->state == BT_CONFIG) {
1513 hci_proto_connect_cfm(conn, status);
1514 hci_conn_drop(conn);
1518 hci_dev_unlock(hdev);
1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 struct hci_conn *conn)
1524 if (conn->state != BT_CONFIG || !conn->out)
1525 return 0;
1527 if (conn->pending_sec_level == BT_SECURITY_SDP)
1528 return 0;
1530 /* Only request authentication for SSP connections or non-SSP
1531 * devices with sec_level MEDIUM or HIGH or if MITM protection
1532 * is requested.
1534 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 return 0;
1540 return 1;
1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 struct inquiry_entry *e)
1546 struct hci_cp_remote_name_req cp;
1548 memset(&cp, 0, sizeof(cp));
1550 bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 cp.pscan_mode = e->data.pscan_mode;
1553 cp.clock_offset = e->data.clock_offset;
1555 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1560 struct discovery_state *discov = &hdev->discovery;
1561 struct inquiry_entry *e;
1563 if (list_empty(&discov->resolve))
1564 return false;
1566 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1567 if (!e)
1568 return false;
1570 if (hci_resolve_name(hdev, e) == 0) {
1571 e->name_state = NAME_PENDING;
1572 return true;
1575 return false;
1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1581 struct discovery_state *discov = &hdev->discovery;
1582 struct inquiry_entry *e;
1584 /* Update the mgmt connected state if necessary. Be careful with
1585 * conn objects that exist but are not (yet) connected however.
1586 * Only those in BT_CONFIG or BT_CONNECTED states can be
1587 * considered connected.
1589 if (conn &&
1590 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1591 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1592 mgmt_device_connected(hdev, conn, 0, name, name_len);
1594 if (discov->state == DISCOVERY_STOPPED)
1595 return;
1597 if (discov->state == DISCOVERY_STOPPING)
1598 goto discov_complete;
1600 if (discov->state != DISCOVERY_RESOLVING)
1601 return;
1603 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1604 /* If the device was not found in a list of found devices names of which
1605 * are pending. there is no need to continue resolving a next name as it
1606 * will be done upon receiving another Remote Name Request Complete
1607 * Event */
1608 if (!e)
1609 return;
1611 list_del(&e->list);
1612 if (name) {
1613 e->name_state = NAME_KNOWN;
1614 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1615 e->data.rssi, name, name_len);
1616 } else {
1617 e->name_state = NAME_NOT_KNOWN;
1620 if (hci_resolve_next_name(hdev))
1621 return;
1623 discov_complete:
1624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1627 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1629 struct hci_cp_remote_name_req *cp;
1630 struct hci_conn *conn;
1632 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1634 /* If successful wait for the name req complete event before
1635 * checking for the need to do authentication */
1636 if (!status)
1637 return;
1639 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1640 if (!cp)
1641 return;
1643 hci_dev_lock(hdev);
1645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1647 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1648 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1650 if (!conn)
1651 goto unlock;
1653 if (!hci_outgoing_auth_needed(hdev, conn))
1654 goto unlock;
1656 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1657 struct hci_cp_auth_requested auth_cp;
1659 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1661 auth_cp.handle = __cpu_to_le16(conn->handle);
1662 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1663 sizeof(auth_cp), &auth_cp);
1666 unlock:
1667 hci_dev_unlock(hdev);
1670 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1672 struct hci_cp_read_remote_features *cp;
1673 struct hci_conn *conn;
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1677 if (!status)
1678 return;
1680 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1681 if (!cp)
1682 return;
1684 hci_dev_lock(hdev);
1686 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1687 if (conn) {
1688 if (conn->state == BT_CONFIG) {
1689 hci_proto_connect_cfm(conn, status);
1690 hci_conn_drop(conn);
1694 hci_dev_unlock(hdev);
1697 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1699 struct hci_cp_read_remote_ext_features *cp;
1700 struct hci_conn *conn;
1702 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1704 if (!status)
1705 return;
1707 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1708 if (!cp)
1709 return;
1711 hci_dev_lock(hdev);
1713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1714 if (conn) {
1715 if (conn->state == BT_CONFIG) {
1716 hci_proto_connect_cfm(conn, status);
1717 hci_conn_drop(conn);
1721 hci_dev_unlock(hdev);
1724 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1726 struct hci_cp_setup_sync_conn *cp;
1727 struct hci_conn *acl, *sco;
1728 __u16 handle;
1730 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1732 if (!status)
1733 return;
1735 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1736 if (!cp)
1737 return;
1739 handle = __le16_to_cpu(cp->handle);
1741 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1743 hci_dev_lock(hdev);
1745 acl = hci_conn_hash_lookup_handle(hdev, handle);
1746 if (acl) {
1747 sco = acl->link;
1748 if (sco) {
1749 sco->state = BT_CLOSED;
1751 hci_proto_connect_cfm(sco, status);
1752 hci_conn_del(sco);
1756 hci_dev_unlock(hdev);
1759 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1761 struct hci_cp_sniff_mode *cp;
1762 struct hci_conn *conn;
1764 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1766 if (!status)
1767 return;
1769 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1770 if (!cp)
1771 return;
1773 hci_dev_lock(hdev);
1775 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1776 if (conn) {
1777 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1779 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1780 hci_sco_setup(conn, status);
1783 hci_dev_unlock(hdev);
1786 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1788 struct hci_cp_exit_sniff_mode *cp;
1789 struct hci_conn *conn;
1791 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793 if (!status)
1794 return;
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1797 if (!cp)
1798 return;
1800 hci_dev_lock(hdev);
1802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1803 if (conn) {
1804 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1806 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1807 hci_sco_setup(conn, status);
1810 hci_dev_unlock(hdev);
1813 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1815 struct hci_cp_disconnect *cp;
1816 struct hci_conn *conn;
1818 if (!status)
1819 return;
1821 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1822 if (!cp)
1823 return;
1825 hci_dev_lock(hdev);
1827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1828 if (conn)
1829 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1830 conn->dst_type, status);
1832 hci_dev_unlock(hdev);
1835 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1837 struct hci_cp_create_phy_link *cp;
1839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1841 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1842 if (!cp)
1843 return;
1845 hci_dev_lock(hdev);
1847 if (status) {
1848 struct hci_conn *hcon;
1850 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1851 if (hcon)
1852 hci_conn_del(hcon);
1853 } else {
1854 amp_write_remote_assoc(hdev, cp->phy_handle);
1857 hci_dev_unlock(hdev);
1860 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1862 struct hci_cp_accept_phy_link *cp;
1864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1866 if (status)
1867 return;
1869 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1870 if (!cp)
1871 return;
1873 amp_write_remote_assoc(hdev, cp->phy_handle);
1876 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1878 struct hci_cp_le_create_conn *cp;
1879 struct hci_conn *conn;
1881 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1883 /* All connection failure handling is taken care of by the
1884 * hci_le_conn_failed function which is triggered by the HCI
1885 * request completion callbacks used for connecting.
1887 if (status)
1888 return;
1890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1891 if (!cp)
1892 return;
1894 hci_dev_lock(hdev);
1896 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1897 if (!conn)
1898 goto unlock;
1900 /* Store the initiator and responder address information which
1901 * is needed for SMP. These values will not change during the
1902 * lifetime of the connection.
1904 conn->init_addr_type = cp->own_address_type;
1905 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1906 bacpy(&conn->init_addr, &hdev->random_addr);
1907 else
1908 bacpy(&conn->init_addr, &hdev->bdaddr);
1910 conn->resp_addr_type = cp->peer_addr_type;
1911 bacpy(&conn->resp_addr, &cp->peer_addr);
1913 /* We don't want the connection attempt to stick around
1914 * indefinitely since LE doesn't have a page timeout concept
1915 * like BR/EDR. Set a timer for any connection that doesn't use
1916 * the white list for connecting.
1918 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1919 queue_delayed_work(conn->hdev->workqueue,
1920 &conn->le_conn_timeout,
1921 conn->conn_timeout);
1923 unlock:
1924 hci_dev_unlock(hdev);
1927 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1929 struct hci_cp_le_start_enc *cp;
1930 struct hci_conn *conn;
1932 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1934 if (!status)
1935 return;
1937 hci_dev_lock(hdev);
1939 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1940 if (!cp)
1941 goto unlock;
1943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1944 if (!conn)
1945 goto unlock;
1947 if (conn->state != BT_CONNECTED)
1948 goto unlock;
1950 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1951 hci_conn_drop(conn);
1953 unlock:
1954 hci_dev_unlock(hdev);
1957 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1959 struct hci_cp_switch_role *cp;
1960 struct hci_conn *conn;
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1964 if (!status)
1965 return;
1967 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1968 if (!cp)
1969 return;
1971 hci_dev_lock(hdev);
1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1974 if (conn)
1975 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1977 hci_dev_unlock(hdev);
1980 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1982 __u8 status = *((__u8 *) skb->data);
1983 struct discovery_state *discov = &hdev->discovery;
1984 struct inquiry_entry *e;
1986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1988 hci_conn_check_pending(hdev);
1990 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1991 return;
1993 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1994 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1997 return;
1999 hci_dev_lock(hdev);
2001 if (discov->state != DISCOVERY_FINDING)
2002 goto unlock;
2004 if (list_empty(&discov->resolve)) {
2005 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2006 goto unlock;
2009 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2010 if (e && hci_resolve_name(hdev, e) == 0) {
2011 e->name_state = NAME_PENDING;
2012 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2013 } else {
2014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2017 unlock:
2018 hci_dev_unlock(hdev);
2021 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2023 struct inquiry_data data;
2024 struct inquiry_info *info = (void *) (skb->data + 1);
2025 int num_rsp = *((__u8 *) skb->data);
2027 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2029 if (!num_rsp)
2030 return;
2032 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2033 return;
2035 hci_dev_lock(hdev);
2037 for (; num_rsp; num_rsp--, info++) {
2038 u32 flags;
2040 bacpy(&data.bdaddr, &info->bdaddr);
2041 data.pscan_rep_mode = info->pscan_rep_mode;
2042 data.pscan_period_mode = info->pscan_period_mode;
2043 data.pscan_mode = info->pscan_mode;
2044 memcpy(data.dev_class, info->dev_class, 3);
2045 data.clock_offset = info->clock_offset;
2046 data.rssi = HCI_RSSI_INVALID;
2047 data.ssp_mode = 0x00;
2049 flags = hci_inquiry_cache_update(hdev, &data, false);
2051 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2052 info->dev_class, HCI_RSSI_INVALID,
2053 flags, NULL, 0, NULL, 0);
2056 hci_dev_unlock(hdev);
2059 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2061 struct hci_ev_conn_complete *ev = (void *) skb->data;
2062 struct hci_conn *conn;
2064 BT_DBG("%s", hdev->name);
2066 hci_dev_lock(hdev);
2068 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2069 if (!conn) {
2070 if (ev->link_type != SCO_LINK)
2071 goto unlock;
2073 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2074 if (!conn)
2075 goto unlock;
2077 conn->type = SCO_LINK;
2080 if (!ev->status) {
2081 conn->handle = __le16_to_cpu(ev->handle);
2083 if (conn->type == ACL_LINK) {
2084 conn->state = BT_CONFIG;
2085 hci_conn_hold(conn);
2087 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2088 !hci_find_link_key(hdev, &ev->bdaddr))
2089 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2090 else
2091 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2092 } else
2093 conn->state = BT_CONNECTED;
2095 hci_conn_add_sysfs(conn);
2097 if (test_bit(HCI_AUTH, &hdev->flags))
2098 set_bit(HCI_CONN_AUTH, &conn->flags);
2100 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2101 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2103 /* Get remote features */
2104 if (conn->type == ACL_LINK) {
2105 struct hci_cp_read_remote_features cp;
2106 cp.handle = ev->handle;
2107 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2108 sizeof(cp), &cp);
2110 hci_update_page_scan(hdev, NULL);
2113 /* Set packet type for incoming connection */
2114 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2115 struct hci_cp_change_conn_ptype cp;
2116 cp.handle = ev->handle;
2117 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2118 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2119 &cp);
2121 } else {
2122 conn->state = BT_CLOSED;
2123 if (conn->type == ACL_LINK)
2124 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2125 conn->dst_type, ev->status);
2128 if (conn->type == ACL_LINK)
2129 hci_sco_setup(conn, ev->status);
2131 if (ev->status) {
2132 hci_proto_connect_cfm(conn, ev->status);
2133 hci_conn_del(conn);
2134 } else if (ev->link_type != ACL_LINK)
2135 hci_proto_connect_cfm(conn, ev->status);
2137 unlock:
2138 hci_dev_unlock(hdev);
2140 hci_conn_check_pending(hdev);
2143 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2145 struct hci_cp_reject_conn_req cp;
2147 bacpy(&cp.bdaddr, bdaddr);
2148 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2149 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2152 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2154 struct hci_ev_conn_request *ev = (void *) skb->data;
2155 int mask = hdev->link_mode;
2156 struct inquiry_entry *ie;
2157 struct hci_conn *conn;
2158 __u8 flags = 0;
2160 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2161 ev->link_type);
2163 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2164 &flags);
2166 if (!(mask & HCI_LM_ACCEPT)) {
2167 hci_reject_conn(hdev, &ev->bdaddr);
2168 return;
2171 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2172 BDADDR_BREDR)) {
2173 hci_reject_conn(hdev, &ev->bdaddr);
2174 return;
2177 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2178 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2179 BDADDR_BREDR)) {
2180 hci_reject_conn(hdev, &ev->bdaddr);
2181 return;
2184 /* Connection accepted */
2186 hci_dev_lock(hdev);
2188 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2189 if (ie)
2190 memcpy(ie->data.dev_class, ev->dev_class, 3);
2192 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2193 &ev->bdaddr);
2194 if (!conn) {
2195 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2196 HCI_ROLE_SLAVE);
2197 if (!conn) {
2198 BT_ERR("No memory for new connection");
2199 hci_dev_unlock(hdev);
2200 return;
2204 memcpy(conn->dev_class, ev->dev_class, 3);
2206 hci_dev_unlock(hdev);
2208 if (ev->link_type == ACL_LINK ||
2209 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2210 struct hci_cp_accept_conn_req cp;
2211 conn->state = BT_CONNECT;
2213 bacpy(&cp.bdaddr, &ev->bdaddr);
2215 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2216 cp.role = 0x00; /* Become master */
2217 else
2218 cp.role = 0x01; /* Remain slave */
2220 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2221 } else if (!(flags & HCI_PROTO_DEFER)) {
2222 struct hci_cp_accept_sync_conn_req cp;
2223 conn->state = BT_CONNECT;
2225 bacpy(&cp.bdaddr, &ev->bdaddr);
2226 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2228 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2229 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2230 cp.max_latency = cpu_to_le16(0xffff);
2231 cp.content_format = cpu_to_le16(hdev->voice_setting);
2232 cp.retrans_effort = 0xff;
2234 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2235 &cp);
2236 } else {
2237 conn->state = BT_CONNECT2;
2238 hci_proto_connect_cfm(conn, 0);
2242 static u8 hci_to_mgmt_reason(u8 err)
2244 switch (err) {
2245 case HCI_ERROR_CONNECTION_TIMEOUT:
2246 return MGMT_DEV_DISCONN_TIMEOUT;
2247 case HCI_ERROR_REMOTE_USER_TERM:
2248 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2249 case HCI_ERROR_REMOTE_POWER_OFF:
2250 return MGMT_DEV_DISCONN_REMOTE;
2251 case HCI_ERROR_LOCAL_HOST_TERM:
2252 return MGMT_DEV_DISCONN_LOCAL_HOST;
2253 default:
2254 return MGMT_DEV_DISCONN_UNKNOWN;
2258 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2260 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2261 u8 reason = hci_to_mgmt_reason(ev->reason);
2262 struct hci_conn_params *params;
2263 struct hci_conn *conn;
2264 bool mgmt_connected;
2265 u8 type;
2267 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2269 hci_dev_lock(hdev);
2271 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2272 if (!conn)
2273 goto unlock;
2275 if (ev->status) {
2276 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2277 conn->dst_type, ev->status);
2278 goto unlock;
2281 conn->state = BT_CLOSED;
2283 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2284 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2285 reason, mgmt_connected);
2287 if (conn->type == ACL_LINK) {
2288 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2289 hci_remove_link_key(hdev, &conn->dst);
2291 hci_update_page_scan(hdev, NULL);
2294 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2295 if (params) {
2296 switch (params->auto_connect) {
2297 case HCI_AUTO_CONN_LINK_LOSS:
2298 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2299 break;
2300 /* Fall through */
2302 case HCI_AUTO_CONN_DIRECT:
2303 case HCI_AUTO_CONN_ALWAYS:
2304 list_del_init(&params->action);
2305 list_add(&params->action, &hdev->pend_le_conns);
2306 hci_update_background_scan(hdev);
2307 break;
2309 default:
2310 break;
2314 type = conn->type;
2316 hci_proto_disconn_cfm(conn, ev->reason);
2317 hci_conn_del(conn);
2319 /* Re-enable advertising if necessary, since it might
2320 * have been disabled by the connection. From the
2321 * HCI_LE_Set_Advertise_Enable command description in
2322 * the core specification (v4.0):
2323 * "The Controller shall continue advertising until the Host
2324 * issues an LE_Set_Advertise_Enable command with
2325 * Advertising_Enable set to 0x00 (Advertising is disabled)
2326 * or until a connection is created or until the Advertising
2327 * is timed out due to Directed Advertising."
2329 if (type == LE_LINK)
2330 mgmt_reenable_advertising(hdev);
2332 unlock:
2333 hci_dev_unlock(hdev);
2336 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2338 struct hci_ev_auth_complete *ev = (void *) skb->data;
2339 struct hci_conn *conn;
2341 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2343 hci_dev_lock(hdev);
2345 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2346 if (!conn)
2347 goto unlock;
2349 if (!ev->status) {
2350 if (!hci_conn_ssp_enabled(conn) &&
2351 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2352 BT_INFO("re-auth of legacy device is not possible.");
2353 } else {
2354 set_bit(HCI_CONN_AUTH, &conn->flags);
2355 conn->sec_level = conn->pending_sec_level;
2357 } else {
2358 mgmt_auth_failed(conn, ev->status);
2361 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2362 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2364 if (conn->state == BT_CONFIG) {
2365 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2366 struct hci_cp_set_conn_encrypt cp;
2367 cp.handle = ev->handle;
2368 cp.encrypt = 0x01;
2369 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2370 &cp);
2371 } else {
2372 conn->state = BT_CONNECTED;
2373 hci_proto_connect_cfm(conn, ev->status);
2374 hci_conn_drop(conn);
2376 } else {
2377 hci_auth_cfm(conn, ev->status);
2379 hci_conn_hold(conn);
2380 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2381 hci_conn_drop(conn);
2384 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2385 if (!ev->status) {
2386 struct hci_cp_set_conn_encrypt cp;
2387 cp.handle = ev->handle;
2388 cp.encrypt = 0x01;
2389 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2390 &cp);
2391 } else {
2392 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2393 hci_encrypt_cfm(conn, ev->status, 0x00);
2397 unlock:
2398 hci_dev_unlock(hdev);
2401 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2403 struct hci_ev_remote_name *ev = (void *) skb->data;
2404 struct hci_conn *conn;
2406 BT_DBG("%s", hdev->name);
2408 hci_conn_check_pending(hdev);
2410 hci_dev_lock(hdev);
2412 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2414 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2415 goto check_auth;
2417 if (ev->status == 0)
2418 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2419 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2420 else
2421 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2423 check_auth:
2424 if (!conn)
2425 goto unlock;
2427 if (!hci_outgoing_auth_needed(hdev, conn))
2428 goto unlock;
2430 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2431 struct hci_cp_auth_requested cp;
2433 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2435 cp.handle = __cpu_to_le16(conn->handle);
2436 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2439 unlock:
2440 hci_dev_unlock(hdev);
2443 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2445 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2446 struct hci_conn *conn;
2448 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2450 hci_dev_lock(hdev);
2452 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2453 if (!conn)
2454 goto unlock;
2456 if (!ev->status) {
2457 if (ev->encrypt) {
2458 /* Encryption implies authentication */
2459 set_bit(HCI_CONN_AUTH, &conn->flags);
2460 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2461 conn->sec_level = conn->pending_sec_level;
2463 /* P-256 authentication key implies FIPS */
2464 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2465 set_bit(HCI_CONN_FIPS, &conn->flags);
2467 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2468 conn->type == LE_LINK)
2469 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2470 } else {
2471 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2472 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2476 /* We should disregard the current RPA and generate a new one
2477 * whenever the encryption procedure fails.
2479 if (ev->status && conn->type == LE_LINK)
2480 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2482 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2484 if (ev->status && conn->state == BT_CONNECTED) {
2485 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2486 hci_conn_drop(conn);
2487 goto unlock;
2490 if (conn->state == BT_CONFIG) {
2491 if (!ev->status)
2492 conn->state = BT_CONNECTED;
2494 /* In Secure Connections Only mode, do not allow any
2495 * connections that are not encrypted with AES-CCM
2496 * using a P-256 authenticated combination key.
2498 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2499 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2500 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2501 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2502 hci_conn_drop(conn);
2503 goto unlock;
2506 hci_proto_connect_cfm(conn, ev->status);
2507 hci_conn_drop(conn);
2508 } else
2509 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2511 unlock:
2512 hci_dev_unlock(hdev);
2515 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2516 struct sk_buff *skb)
2518 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2519 struct hci_conn *conn;
2521 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2523 hci_dev_lock(hdev);
2525 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2526 if (conn) {
2527 if (!ev->status)
2528 set_bit(HCI_CONN_SECURE, &conn->flags);
2530 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2532 hci_key_change_cfm(conn, ev->status);
2535 hci_dev_unlock(hdev);
2538 static void hci_remote_features_evt(struct hci_dev *hdev,
2539 struct sk_buff *skb)
2541 struct hci_ev_remote_features *ev = (void *) skb->data;
2542 struct hci_conn *conn;
2544 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2546 hci_dev_lock(hdev);
2548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2549 if (!conn)
2550 goto unlock;
2552 if (!ev->status)
2553 memcpy(conn->features[0], ev->features, 8);
2555 if (conn->state != BT_CONFIG)
2556 goto unlock;
2558 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2559 struct hci_cp_read_remote_ext_features cp;
2560 cp.handle = ev->handle;
2561 cp.page = 0x01;
2562 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2563 sizeof(cp), &cp);
2564 goto unlock;
2567 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2568 struct hci_cp_remote_name_req cp;
2569 memset(&cp, 0, sizeof(cp));
2570 bacpy(&cp.bdaddr, &conn->dst);
2571 cp.pscan_rep_mode = 0x02;
2572 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2573 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2574 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2576 if (!hci_outgoing_auth_needed(hdev, conn)) {
2577 conn->state = BT_CONNECTED;
2578 hci_proto_connect_cfm(conn, ev->status);
2579 hci_conn_drop(conn);
2582 unlock:
2583 hci_dev_unlock(hdev);
2586 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2589 u8 status = skb->data[sizeof(*ev)];
2590 __u16 opcode;
2592 skb_pull(skb, sizeof(*ev));
2594 opcode = __le16_to_cpu(ev->opcode);
2596 switch (opcode) {
2597 case HCI_OP_INQUIRY_CANCEL:
2598 hci_cc_inquiry_cancel(hdev, skb);
2599 break;
2601 case HCI_OP_PERIODIC_INQ:
2602 hci_cc_periodic_inq(hdev, skb);
2603 break;
2605 case HCI_OP_EXIT_PERIODIC_INQ:
2606 hci_cc_exit_periodic_inq(hdev, skb);
2607 break;
2609 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2610 hci_cc_remote_name_req_cancel(hdev, skb);
2611 break;
2613 case HCI_OP_ROLE_DISCOVERY:
2614 hci_cc_role_discovery(hdev, skb);
2615 break;
2617 case HCI_OP_READ_LINK_POLICY:
2618 hci_cc_read_link_policy(hdev, skb);
2619 break;
2621 case HCI_OP_WRITE_LINK_POLICY:
2622 hci_cc_write_link_policy(hdev, skb);
2623 break;
2625 case HCI_OP_READ_DEF_LINK_POLICY:
2626 hci_cc_read_def_link_policy(hdev, skb);
2627 break;
2629 case HCI_OP_WRITE_DEF_LINK_POLICY:
2630 hci_cc_write_def_link_policy(hdev, skb);
2631 break;
2633 case HCI_OP_RESET:
2634 hci_cc_reset(hdev, skb);
2635 break;
2637 case HCI_OP_WRITE_LOCAL_NAME:
2638 hci_cc_write_local_name(hdev, skb);
2639 break;
2641 case HCI_OP_READ_LOCAL_NAME:
2642 hci_cc_read_local_name(hdev, skb);
2643 break;
2645 case HCI_OP_WRITE_AUTH_ENABLE:
2646 hci_cc_write_auth_enable(hdev, skb);
2647 break;
2649 case HCI_OP_WRITE_ENCRYPT_MODE:
2650 hci_cc_write_encrypt_mode(hdev, skb);
2651 break;
2653 case HCI_OP_WRITE_SCAN_ENABLE:
2654 hci_cc_write_scan_enable(hdev, skb);
2655 break;
2657 case HCI_OP_READ_CLASS_OF_DEV:
2658 hci_cc_read_class_of_dev(hdev, skb);
2659 break;
2661 case HCI_OP_WRITE_CLASS_OF_DEV:
2662 hci_cc_write_class_of_dev(hdev, skb);
2663 break;
2665 case HCI_OP_READ_VOICE_SETTING:
2666 hci_cc_read_voice_setting(hdev, skb);
2667 break;
2669 case HCI_OP_WRITE_VOICE_SETTING:
2670 hci_cc_write_voice_setting(hdev, skb);
2671 break;
2673 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2674 hci_cc_read_num_supported_iac(hdev, skb);
2675 break;
2677 case HCI_OP_WRITE_SSP_MODE:
2678 hci_cc_write_ssp_mode(hdev, skb);
2679 break;
2681 case HCI_OP_WRITE_SC_SUPPORT:
2682 hci_cc_write_sc_support(hdev, skb);
2683 break;
2685 case HCI_OP_READ_LOCAL_VERSION:
2686 hci_cc_read_local_version(hdev, skb);
2687 break;
2689 case HCI_OP_READ_LOCAL_COMMANDS:
2690 hci_cc_read_local_commands(hdev, skb);
2691 break;
2693 case HCI_OP_READ_LOCAL_FEATURES:
2694 hci_cc_read_local_features(hdev, skb);
2695 break;
2697 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2698 hci_cc_read_local_ext_features(hdev, skb);
2699 break;
2701 case HCI_OP_READ_BUFFER_SIZE:
2702 hci_cc_read_buffer_size(hdev, skb);
2703 break;
2705 case HCI_OP_READ_BD_ADDR:
2706 hci_cc_read_bd_addr(hdev, skb);
2707 break;
2709 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2710 hci_cc_read_page_scan_activity(hdev, skb);
2711 break;
2713 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2714 hci_cc_write_page_scan_activity(hdev, skb);
2715 break;
2717 case HCI_OP_READ_PAGE_SCAN_TYPE:
2718 hci_cc_read_page_scan_type(hdev, skb);
2719 break;
2721 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2722 hci_cc_write_page_scan_type(hdev, skb);
2723 break;
2725 case HCI_OP_READ_DATA_BLOCK_SIZE:
2726 hci_cc_read_data_block_size(hdev, skb);
2727 break;
2729 case HCI_OP_READ_FLOW_CONTROL_MODE:
2730 hci_cc_read_flow_control_mode(hdev, skb);
2731 break;
2733 case HCI_OP_READ_LOCAL_AMP_INFO:
2734 hci_cc_read_local_amp_info(hdev, skb);
2735 break;
2737 case HCI_OP_READ_CLOCK:
2738 hci_cc_read_clock(hdev, skb);
2739 break;
2741 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2742 hci_cc_read_local_amp_assoc(hdev, skb);
2743 break;
2745 case HCI_OP_READ_INQ_RSP_TX_POWER:
2746 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2747 break;
2749 case HCI_OP_PIN_CODE_REPLY:
2750 hci_cc_pin_code_reply(hdev, skb);
2751 break;
2753 case HCI_OP_PIN_CODE_NEG_REPLY:
2754 hci_cc_pin_code_neg_reply(hdev, skb);
2755 break;
2757 case HCI_OP_READ_LOCAL_OOB_DATA:
2758 hci_cc_read_local_oob_data(hdev, skb);
2759 break;
2761 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2762 hci_cc_read_local_oob_ext_data(hdev, skb);
2763 break;
2765 case HCI_OP_LE_READ_BUFFER_SIZE:
2766 hci_cc_le_read_buffer_size(hdev, skb);
2767 break;
2769 case HCI_OP_LE_READ_LOCAL_FEATURES:
2770 hci_cc_le_read_local_features(hdev, skb);
2771 break;
2773 case HCI_OP_LE_READ_ADV_TX_POWER:
2774 hci_cc_le_read_adv_tx_power(hdev, skb);
2775 break;
2777 case HCI_OP_USER_CONFIRM_REPLY:
2778 hci_cc_user_confirm_reply(hdev, skb);
2779 break;
2781 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2782 hci_cc_user_confirm_neg_reply(hdev, skb);
2783 break;
2785 case HCI_OP_USER_PASSKEY_REPLY:
2786 hci_cc_user_passkey_reply(hdev, skb);
2787 break;
2789 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2790 hci_cc_user_passkey_neg_reply(hdev, skb);
2791 break;
2793 case HCI_OP_LE_SET_RANDOM_ADDR:
2794 hci_cc_le_set_random_addr(hdev, skb);
2795 break;
2797 case HCI_OP_LE_SET_ADV_ENABLE:
2798 hci_cc_le_set_adv_enable(hdev, skb);
2799 break;
2801 case HCI_OP_LE_SET_SCAN_PARAM:
2802 hci_cc_le_set_scan_param(hdev, skb);
2803 break;
2805 case HCI_OP_LE_SET_SCAN_ENABLE:
2806 hci_cc_le_set_scan_enable(hdev, skb);
2807 break;
2809 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2810 hci_cc_le_read_white_list_size(hdev, skb);
2811 break;
2813 case HCI_OP_LE_CLEAR_WHITE_LIST:
2814 hci_cc_le_clear_white_list(hdev, skb);
2815 break;
2817 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2818 hci_cc_le_add_to_white_list(hdev, skb);
2819 break;
2821 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2822 hci_cc_le_del_from_white_list(hdev, skb);
2823 break;
2825 case HCI_OP_LE_READ_SUPPORTED_STATES:
2826 hci_cc_le_read_supported_states(hdev, skb);
2827 break;
2829 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2830 hci_cc_write_le_host_supported(hdev, skb);
2831 break;
2833 case HCI_OP_LE_SET_ADV_PARAM:
2834 hci_cc_set_adv_param(hdev, skb);
2835 break;
2837 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2838 hci_cc_write_remote_amp_assoc(hdev, skb);
2839 break;
2841 case HCI_OP_READ_RSSI:
2842 hci_cc_read_rssi(hdev, skb);
2843 break;
2845 case HCI_OP_READ_TX_POWER:
2846 hci_cc_read_tx_power(hdev, skb);
2847 break;
2849 default:
2850 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2851 break;
2854 if (opcode != HCI_OP_NOP)
2855 cancel_delayed_work(&hdev->cmd_timer);
2857 hci_req_cmd_complete(hdev, opcode, status);
2859 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2860 atomic_set(&hdev->cmd_cnt, 1);
2861 if (!skb_queue_empty(&hdev->cmd_q))
2862 queue_work(hdev->workqueue, &hdev->cmd_work);
2866 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2868 struct hci_ev_cmd_status *ev = (void *) skb->data;
2869 __u16 opcode;
2871 skb_pull(skb, sizeof(*ev));
2873 opcode = __le16_to_cpu(ev->opcode);
2875 switch (opcode) {
2876 case HCI_OP_INQUIRY:
2877 hci_cs_inquiry(hdev, ev->status);
2878 break;
2880 case HCI_OP_CREATE_CONN:
2881 hci_cs_create_conn(hdev, ev->status);
2882 break;
2884 case HCI_OP_DISCONNECT:
2885 hci_cs_disconnect(hdev, ev->status);
2886 break;
2888 case HCI_OP_ADD_SCO:
2889 hci_cs_add_sco(hdev, ev->status);
2890 break;
2892 case HCI_OP_AUTH_REQUESTED:
2893 hci_cs_auth_requested(hdev, ev->status);
2894 break;
2896 case HCI_OP_SET_CONN_ENCRYPT:
2897 hci_cs_set_conn_encrypt(hdev, ev->status);
2898 break;
2900 case HCI_OP_REMOTE_NAME_REQ:
2901 hci_cs_remote_name_req(hdev, ev->status);
2902 break;
2904 case HCI_OP_READ_REMOTE_FEATURES:
2905 hci_cs_read_remote_features(hdev, ev->status);
2906 break;
2908 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2909 hci_cs_read_remote_ext_features(hdev, ev->status);
2910 break;
2912 case HCI_OP_SETUP_SYNC_CONN:
2913 hci_cs_setup_sync_conn(hdev, ev->status);
2914 break;
2916 case HCI_OP_CREATE_PHY_LINK:
2917 hci_cs_create_phylink(hdev, ev->status);
2918 break;
2920 case HCI_OP_ACCEPT_PHY_LINK:
2921 hci_cs_accept_phylink(hdev, ev->status);
2922 break;
2924 case HCI_OP_SNIFF_MODE:
2925 hci_cs_sniff_mode(hdev, ev->status);
2926 break;
2928 case HCI_OP_EXIT_SNIFF_MODE:
2929 hci_cs_exit_sniff_mode(hdev, ev->status);
2930 break;
2932 case HCI_OP_SWITCH_ROLE:
2933 hci_cs_switch_role(hdev, ev->status);
2934 break;
2936 case HCI_OP_LE_CREATE_CONN:
2937 hci_cs_le_create_conn(hdev, ev->status);
2938 break;
2940 case HCI_OP_LE_START_ENC:
2941 hci_cs_le_start_enc(hdev, ev->status);
2942 break;
2944 default:
2945 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2946 break;
2949 if (opcode != HCI_OP_NOP)
2950 cancel_delayed_work(&hdev->cmd_timer);
2952 if (ev->status ||
2953 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2954 hci_req_cmd_complete(hdev, opcode, ev->status);
2956 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2957 atomic_set(&hdev->cmd_cnt, 1);
2958 if (!skb_queue_empty(&hdev->cmd_q))
2959 queue_work(hdev->workqueue, &hdev->cmd_work);
2963 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2965 struct hci_ev_hardware_error *ev = (void *) skb->data;
2967 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2970 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2972 struct hci_ev_role_change *ev = (void *) skb->data;
2973 struct hci_conn *conn;
2975 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2977 hci_dev_lock(hdev);
2979 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2980 if (conn) {
2981 if (!ev->status)
2982 conn->role = ev->role;
2984 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2986 hci_role_switch_cfm(conn, ev->status, ev->role);
2989 hci_dev_unlock(hdev);
2992 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2994 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2995 int i;
2997 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2998 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2999 return;
3002 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3003 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3004 BT_DBG("%s bad parameters", hdev->name);
3005 return;
3008 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3010 for (i = 0; i < ev->num_hndl; i++) {
3011 struct hci_comp_pkts_info *info = &ev->handles[i];
3012 struct hci_conn *conn;
3013 __u16 handle, count;
3015 handle = __le16_to_cpu(info->handle);
3016 count = __le16_to_cpu(info->count);
3018 conn = hci_conn_hash_lookup_handle(hdev, handle);
3019 if (!conn)
3020 continue;
3022 conn->sent -= count;
3024 switch (conn->type) {
3025 case ACL_LINK:
3026 hdev->acl_cnt += count;
3027 if (hdev->acl_cnt > hdev->acl_pkts)
3028 hdev->acl_cnt = hdev->acl_pkts;
3029 break;
3031 case LE_LINK:
3032 if (hdev->le_pkts) {
3033 hdev->le_cnt += count;
3034 if (hdev->le_cnt > hdev->le_pkts)
3035 hdev->le_cnt = hdev->le_pkts;
3036 } else {
3037 hdev->acl_cnt += count;
3038 if (hdev->acl_cnt > hdev->acl_pkts)
3039 hdev->acl_cnt = hdev->acl_pkts;
3041 break;
3043 case SCO_LINK:
3044 hdev->sco_cnt += count;
3045 if (hdev->sco_cnt > hdev->sco_pkts)
3046 hdev->sco_cnt = hdev->sco_pkts;
3047 break;
3049 default:
3050 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3051 break;
3055 queue_work(hdev->workqueue, &hdev->tx_work);
3058 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3059 __u16 handle)
3061 struct hci_chan *chan;
3063 switch (hdev->dev_type) {
3064 case HCI_BREDR:
3065 return hci_conn_hash_lookup_handle(hdev, handle);
3066 case HCI_AMP:
3067 chan = hci_chan_lookup_handle(hdev, handle);
3068 if (chan)
3069 return chan->conn;
3070 break;
3071 default:
3072 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3073 break;
3076 return NULL;
3079 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3081 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3082 int i;
3084 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3085 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3086 return;
3089 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3090 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3091 BT_DBG("%s bad parameters", hdev->name);
3092 return;
3095 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3096 ev->num_hndl);
3098 for (i = 0; i < ev->num_hndl; i++) {
3099 struct hci_comp_blocks_info *info = &ev->handles[i];
3100 struct hci_conn *conn = NULL;
3101 __u16 handle, block_count;
3103 handle = __le16_to_cpu(info->handle);
3104 block_count = __le16_to_cpu(info->blocks);
3106 conn = __hci_conn_lookup_handle(hdev, handle);
3107 if (!conn)
3108 continue;
3110 conn->sent -= block_count;
3112 switch (conn->type) {
3113 case ACL_LINK:
3114 case AMP_LINK:
3115 hdev->block_cnt += block_count;
3116 if (hdev->block_cnt > hdev->num_blocks)
3117 hdev->block_cnt = hdev->num_blocks;
3118 break;
3120 default:
3121 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3122 break;
3126 queue_work(hdev->workqueue, &hdev->tx_work);
3129 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3131 struct hci_ev_mode_change *ev = (void *) skb->data;
3132 struct hci_conn *conn;
3134 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3136 hci_dev_lock(hdev);
3138 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3139 if (conn) {
3140 conn->mode = ev->mode;
3142 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3143 &conn->flags)) {
3144 if (conn->mode == HCI_CM_ACTIVE)
3145 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3146 else
3147 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3150 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3151 hci_sco_setup(conn, ev->status);
3154 hci_dev_unlock(hdev);
3157 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3160 struct hci_conn *conn;
3162 BT_DBG("%s", hdev->name);
3164 hci_dev_lock(hdev);
3166 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3167 if (!conn)
3168 goto unlock;
3170 if (conn->state == BT_CONNECTED) {
3171 hci_conn_hold(conn);
3172 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3173 hci_conn_drop(conn);
3176 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3177 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3178 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3179 sizeof(ev->bdaddr), &ev->bdaddr);
3180 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3181 u8 secure;
3183 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3184 secure = 1;
3185 else
3186 secure = 0;
3188 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3191 unlock:
3192 hci_dev_unlock(hdev);
3195 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3197 if (key_type == HCI_LK_CHANGED_COMBINATION)
3198 return;
3200 conn->pin_length = pin_len;
3201 conn->key_type = key_type;
3203 switch (key_type) {
3204 case HCI_LK_LOCAL_UNIT:
3205 case HCI_LK_REMOTE_UNIT:
3206 case HCI_LK_DEBUG_COMBINATION:
3207 return;
3208 case HCI_LK_COMBINATION:
3209 if (pin_len == 16)
3210 conn->pending_sec_level = BT_SECURITY_HIGH;
3211 else
3212 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3213 break;
3214 case HCI_LK_UNAUTH_COMBINATION_P192:
3215 case HCI_LK_UNAUTH_COMBINATION_P256:
3216 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3217 break;
3218 case HCI_LK_AUTH_COMBINATION_P192:
3219 conn->pending_sec_level = BT_SECURITY_HIGH;
3220 break;
3221 case HCI_LK_AUTH_COMBINATION_P256:
3222 conn->pending_sec_level = BT_SECURITY_FIPS;
3223 break;
3227 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3229 struct hci_ev_link_key_req *ev = (void *) skb->data;
3230 struct hci_cp_link_key_reply cp;
3231 struct hci_conn *conn;
3232 struct link_key *key;
3234 BT_DBG("%s", hdev->name);
3236 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3237 return;
3239 hci_dev_lock(hdev);
3241 key = hci_find_link_key(hdev, &ev->bdaddr);
3242 if (!key) {
3243 BT_DBG("%s link key not found for %pMR", hdev->name,
3244 &ev->bdaddr);
3245 goto not_found;
3248 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3249 &ev->bdaddr);
3251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3252 if (conn) {
3253 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3255 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3256 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3257 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3258 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3259 goto not_found;
3262 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3263 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3264 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3265 BT_DBG("%s ignoring key unauthenticated for high security",
3266 hdev->name);
3267 goto not_found;
3270 conn_set_key(conn, key->type, key->pin_len);
3273 bacpy(&cp.bdaddr, &ev->bdaddr);
3274 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3276 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3278 hci_dev_unlock(hdev);
3280 return;
3282 not_found:
3283 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3284 hci_dev_unlock(hdev);
3287 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3289 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3290 struct hci_conn *conn;
3291 struct link_key *key;
3292 bool persistent;
3293 u8 pin_len = 0;
3295 BT_DBG("%s", hdev->name);
3297 hci_dev_lock(hdev);
3299 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3300 if (!conn)
3301 goto unlock;
3303 hci_conn_hold(conn);
3304 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3305 hci_conn_drop(conn);
3307 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3308 conn_set_key(conn, ev->key_type, conn->pin_length);
3310 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3311 goto unlock;
3313 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3314 ev->key_type, pin_len, &persistent);
3315 if (!key)
3316 goto unlock;
3318 /* Update connection information since adding the key will have
3319 * fixed up the type in the case of changed combination keys.
3321 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3322 conn_set_key(conn, key->type, key->pin_len);
3324 mgmt_new_link_key(hdev, key, persistent);
3326 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3327 * is set. If it's not set simply remove the key from the kernel
3328 * list (we've still notified user space about it but with
3329 * store_hint being 0).
3331 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3332 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3333 list_del_rcu(&key->list);
3334 kfree_rcu(key, rcu);
3335 goto unlock;
3338 if (persistent)
3339 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3340 else
3341 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3343 unlock:
3344 hci_dev_unlock(hdev);
3347 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3349 struct hci_ev_clock_offset *ev = (void *) skb->data;
3350 struct hci_conn *conn;
3352 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3354 hci_dev_lock(hdev);
3356 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3357 if (conn && !ev->status) {
3358 struct inquiry_entry *ie;
3360 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3361 if (ie) {
3362 ie->data.clock_offset = ev->clock_offset;
3363 ie->timestamp = jiffies;
3367 hci_dev_unlock(hdev);
3370 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3372 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3373 struct hci_conn *conn;
3375 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3377 hci_dev_lock(hdev);
3379 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3380 if (conn && !ev->status)
3381 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3383 hci_dev_unlock(hdev);
3386 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3388 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3389 struct inquiry_entry *ie;
3391 BT_DBG("%s", hdev->name);
3393 hci_dev_lock(hdev);
3395 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3396 if (ie) {
3397 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3398 ie->timestamp = jiffies;
3401 hci_dev_unlock(hdev);
3404 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3405 struct sk_buff *skb)
3407 struct inquiry_data data;
3408 int num_rsp = *((__u8 *) skb->data);
3410 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3412 if (!num_rsp)
3413 return;
3415 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3416 return;
3418 hci_dev_lock(hdev);
3420 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3421 struct inquiry_info_with_rssi_and_pscan_mode *info;
3422 info = (void *) (skb->data + 1);
3424 for (; num_rsp; num_rsp--, info++) {
3425 u32 flags;
3427 bacpy(&data.bdaddr, &info->bdaddr);
3428 data.pscan_rep_mode = info->pscan_rep_mode;
3429 data.pscan_period_mode = info->pscan_period_mode;
3430 data.pscan_mode = info->pscan_mode;
3431 memcpy(data.dev_class, info->dev_class, 3);
3432 data.clock_offset = info->clock_offset;
3433 data.rssi = info->rssi;
3434 data.ssp_mode = 0x00;
3436 flags = hci_inquiry_cache_update(hdev, &data, false);
3438 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3439 info->dev_class, info->rssi,
3440 flags, NULL, 0, NULL, 0);
3442 } else {
3443 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3445 for (; num_rsp; num_rsp--, info++) {
3446 u32 flags;
3448 bacpy(&data.bdaddr, &info->bdaddr);
3449 data.pscan_rep_mode = info->pscan_rep_mode;
3450 data.pscan_period_mode = info->pscan_period_mode;
3451 data.pscan_mode = 0x00;
3452 memcpy(data.dev_class, info->dev_class, 3);
3453 data.clock_offset = info->clock_offset;
3454 data.rssi = info->rssi;
3455 data.ssp_mode = 0x00;
3457 flags = hci_inquiry_cache_update(hdev, &data, false);
3459 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3460 info->dev_class, info->rssi,
3461 flags, NULL, 0, NULL, 0);
3465 hci_dev_unlock(hdev);
3468 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3469 struct sk_buff *skb)
3471 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3472 struct hci_conn *conn;
3474 BT_DBG("%s", hdev->name);
3476 hci_dev_lock(hdev);
3478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3479 if (!conn)
3480 goto unlock;
3482 if (ev->page < HCI_MAX_PAGES)
3483 memcpy(conn->features[ev->page], ev->features, 8);
3485 if (!ev->status && ev->page == 0x01) {
3486 struct inquiry_entry *ie;
3488 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3489 if (ie)
3490 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3492 if (ev->features[0] & LMP_HOST_SSP) {
3493 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3494 } else {
3495 /* It is mandatory by the Bluetooth specification that
3496 * Extended Inquiry Results are only used when Secure
3497 * Simple Pairing is enabled, but some devices violate
3498 * this.
3500 * To make these devices work, the internal SSP
3501 * enabled flag needs to be cleared if the remote host
3502 * features do not indicate SSP support */
3503 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3506 if (ev->features[0] & LMP_HOST_SC)
3507 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3510 if (conn->state != BT_CONFIG)
3511 goto unlock;
3513 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3514 struct hci_cp_remote_name_req cp;
3515 memset(&cp, 0, sizeof(cp));
3516 bacpy(&cp.bdaddr, &conn->dst);
3517 cp.pscan_rep_mode = 0x02;
3518 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3519 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3520 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3522 if (!hci_outgoing_auth_needed(hdev, conn)) {
3523 conn->state = BT_CONNECTED;
3524 hci_proto_connect_cfm(conn, ev->status);
3525 hci_conn_drop(conn);
3528 unlock:
3529 hci_dev_unlock(hdev);
3532 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3533 struct sk_buff *skb)
3535 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3536 struct hci_conn *conn;
3538 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3540 hci_dev_lock(hdev);
3542 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3543 if (!conn) {
3544 if (ev->link_type == ESCO_LINK)
3545 goto unlock;
3547 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3548 if (!conn)
3549 goto unlock;
3551 conn->type = SCO_LINK;
3554 switch (ev->status) {
3555 case 0x00:
3556 conn->handle = __le16_to_cpu(ev->handle);
3557 conn->state = BT_CONNECTED;
3559 hci_conn_add_sysfs(conn);
3560 break;
3562 case 0x10: /* Connection Accept Timeout */
3563 case 0x0d: /* Connection Rejected due to Limited Resources */
3564 case 0x11: /* Unsupported Feature or Parameter Value */
3565 case 0x1c: /* SCO interval rejected */
3566 case 0x1a: /* Unsupported Remote Feature */
3567 case 0x1f: /* Unspecified error */
3568 case 0x20: /* Unsupported LMP Parameter value */
3569 if (conn->out) {
3570 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3571 (hdev->esco_type & EDR_ESCO_MASK);
3572 if (hci_setup_sync(conn, conn->link->handle))
3573 goto unlock;
3575 /* fall through */
3577 default:
3578 conn->state = BT_CLOSED;
3579 break;
3582 hci_proto_connect_cfm(conn, ev->status);
3583 if (ev->status)
3584 hci_conn_del(conn);
3586 unlock:
3587 hci_dev_unlock(hdev);
3590 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3592 size_t parsed = 0;
3594 while (parsed < eir_len) {
3595 u8 field_len = eir[0];
3597 if (field_len == 0)
3598 return parsed;
3600 parsed += field_len + 1;
3601 eir += field_len + 1;
3604 return eir_len;
3607 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3608 struct sk_buff *skb)
3610 struct inquiry_data data;
3611 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3612 int num_rsp = *((__u8 *) skb->data);
3613 size_t eir_len;
3615 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3617 if (!num_rsp)
3618 return;
3620 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3621 return;
3623 hci_dev_lock(hdev);
3625 for (; num_rsp; num_rsp--, info++) {
3626 u32 flags;
3627 bool name_known;
3629 bacpy(&data.bdaddr, &info->bdaddr);
3630 data.pscan_rep_mode = info->pscan_rep_mode;
3631 data.pscan_period_mode = info->pscan_period_mode;
3632 data.pscan_mode = 0x00;
3633 memcpy(data.dev_class, info->dev_class, 3);
3634 data.clock_offset = info->clock_offset;
3635 data.rssi = info->rssi;
3636 data.ssp_mode = 0x01;
3638 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3639 name_known = eir_has_data_type(info->data,
3640 sizeof(info->data),
3641 EIR_NAME_COMPLETE);
3642 else
3643 name_known = true;
3645 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3647 eir_len = eir_get_length(info->data, sizeof(info->data));
3649 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3650 info->dev_class, info->rssi,
3651 flags, info->data, eir_len, NULL, 0);
3654 hci_dev_unlock(hdev);
3657 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3658 struct sk_buff *skb)
3660 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3661 struct hci_conn *conn;
3663 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3664 __le16_to_cpu(ev->handle));
3666 hci_dev_lock(hdev);
3668 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3669 if (!conn)
3670 goto unlock;
3672 /* For BR/EDR the necessary steps are taken through the
3673 * auth_complete event.
3675 if (conn->type != LE_LINK)
3676 goto unlock;
3678 if (!ev->status)
3679 conn->sec_level = conn->pending_sec_level;
3681 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3683 if (ev->status && conn->state == BT_CONNECTED) {
3684 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3685 hci_conn_drop(conn);
3686 goto unlock;
3689 if (conn->state == BT_CONFIG) {
3690 if (!ev->status)
3691 conn->state = BT_CONNECTED;
3693 hci_proto_connect_cfm(conn, ev->status);
3694 hci_conn_drop(conn);
3695 } else {
3696 hci_auth_cfm(conn, ev->status);
3698 hci_conn_hold(conn);
3699 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3700 hci_conn_drop(conn);
3703 unlock:
3704 hci_dev_unlock(hdev);
3707 static u8 hci_get_auth_req(struct hci_conn *conn)
3709 /* If remote requests no-bonding follow that lead */
3710 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3711 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3712 return conn->remote_auth | (conn->auth_type & 0x01);
3714 /* If both remote and local have enough IO capabilities, require
3715 * MITM protection
3717 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3718 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3719 return conn->remote_auth | 0x01;
3721 /* No MITM protection possible so ignore remote requirement */
3722 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3725 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3727 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3728 struct hci_conn *conn;
3730 BT_DBG("%s", hdev->name);
3732 hci_dev_lock(hdev);
3734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3735 if (!conn)
3736 goto unlock;
3738 hci_conn_hold(conn);
3740 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3741 goto unlock;
3743 /* Allow pairing if we're pairable, the initiators of the
3744 * pairing or if the remote is not requesting bonding.
3746 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3747 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3748 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3749 struct hci_cp_io_capability_reply cp;
3751 bacpy(&cp.bdaddr, &ev->bdaddr);
3752 /* Change the IO capability from KeyboardDisplay
3753 * to DisplayYesNo as it is not supported by BT spec. */
3754 cp.capability = (conn->io_capability == 0x04) ?
3755 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3757 /* If we are initiators, there is no remote information yet */
3758 if (conn->remote_auth == 0xff) {
3759 /* Request MITM protection if our IO caps allow it
3760 * except for the no-bonding case.
3762 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3763 conn->auth_type != HCI_AT_NO_BONDING)
3764 conn->auth_type |= 0x01;
3765 } else {
3766 conn->auth_type = hci_get_auth_req(conn);
3769 /* If we're not bondable, force one of the non-bondable
3770 * authentication requirement values.
3772 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3773 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3775 cp.authentication = conn->auth_type;
3777 if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3778 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3779 cp.oob_data = 0x01;
3780 else
3781 cp.oob_data = 0x00;
3783 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3784 sizeof(cp), &cp);
3785 } else {
3786 struct hci_cp_io_capability_neg_reply cp;
3788 bacpy(&cp.bdaddr, &ev->bdaddr);
3789 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3791 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3792 sizeof(cp), &cp);
3795 unlock:
3796 hci_dev_unlock(hdev);
3799 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3801 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3802 struct hci_conn *conn;
3804 BT_DBG("%s", hdev->name);
3806 hci_dev_lock(hdev);
3808 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3809 if (!conn)
3810 goto unlock;
3812 conn->remote_cap = ev->capability;
3813 conn->remote_auth = ev->authentication;
3814 if (ev->oob_data)
3815 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3817 unlock:
3818 hci_dev_unlock(hdev);
3821 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3822 struct sk_buff *skb)
3824 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3825 int loc_mitm, rem_mitm, confirm_hint = 0;
3826 struct hci_conn *conn;
3828 BT_DBG("%s", hdev->name);
3830 hci_dev_lock(hdev);
3832 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3833 goto unlock;
3835 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3836 if (!conn)
3837 goto unlock;
3839 loc_mitm = (conn->auth_type & 0x01);
3840 rem_mitm = (conn->remote_auth & 0x01);
3842 /* If we require MITM but the remote device can't provide that
3843 * (it has NoInputNoOutput) then reject the confirmation
3844 * request. We check the security level here since it doesn't
3845 * necessarily match conn->auth_type.
3847 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3848 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3849 BT_DBG("Rejecting request: remote device can't provide MITM");
3850 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3851 sizeof(ev->bdaddr), &ev->bdaddr);
3852 goto unlock;
3855 /* If no side requires MITM protection; auto-accept */
3856 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3857 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3859 /* If we're not the initiators request authorization to
3860 * proceed from user space (mgmt_user_confirm with
3861 * confirm_hint set to 1). The exception is if neither
3862 * side had MITM or if the local IO capability is
3863 * NoInputNoOutput, in which case we do auto-accept
3865 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3866 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3867 (loc_mitm || rem_mitm)) {
3868 BT_DBG("Confirming auto-accept as acceptor");
3869 confirm_hint = 1;
3870 goto confirm;
3873 BT_DBG("Auto-accept of user confirmation with %ums delay",
3874 hdev->auto_accept_delay);
3876 if (hdev->auto_accept_delay > 0) {
3877 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3878 queue_delayed_work(conn->hdev->workqueue,
3879 &conn->auto_accept_work, delay);
3880 goto unlock;
3883 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3884 sizeof(ev->bdaddr), &ev->bdaddr);
3885 goto unlock;
3888 confirm:
3889 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3890 le32_to_cpu(ev->passkey), confirm_hint);
3892 unlock:
3893 hci_dev_unlock(hdev);
3896 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3897 struct sk_buff *skb)
3899 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3901 BT_DBG("%s", hdev->name);
3903 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3904 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3907 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3908 struct sk_buff *skb)
3910 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3911 struct hci_conn *conn;
3913 BT_DBG("%s", hdev->name);
3915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3916 if (!conn)
3917 return;
3919 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3920 conn->passkey_entered = 0;
3922 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3923 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3924 conn->dst_type, conn->passkey_notify,
3925 conn->passkey_entered);
3928 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3930 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3931 struct hci_conn *conn;
3933 BT_DBG("%s", hdev->name);
3935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3936 if (!conn)
3937 return;
3939 switch (ev->type) {
3940 case HCI_KEYPRESS_STARTED:
3941 conn->passkey_entered = 0;
3942 return;
3944 case HCI_KEYPRESS_ENTERED:
3945 conn->passkey_entered++;
3946 break;
3948 case HCI_KEYPRESS_ERASED:
3949 conn->passkey_entered--;
3950 break;
3952 case HCI_KEYPRESS_CLEARED:
3953 conn->passkey_entered = 0;
3954 break;
3956 case HCI_KEYPRESS_COMPLETED:
3957 return;
3960 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3961 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3962 conn->dst_type, conn->passkey_notify,
3963 conn->passkey_entered);
3966 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3967 struct sk_buff *skb)
3969 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3970 struct hci_conn *conn;
3972 BT_DBG("%s", hdev->name);
3974 hci_dev_lock(hdev);
3976 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3977 if (!conn)
3978 goto unlock;
3980 /* Reset the authentication requirement to unknown */
3981 conn->remote_auth = 0xff;
3983 /* To avoid duplicate auth_failed events to user space we check
3984 * the HCI_CONN_AUTH_PEND flag which will be set if we
3985 * initiated the authentication. A traditional auth_complete
3986 * event gets always produced as initiator and is also mapped to
3987 * the mgmt_auth_failed event */
3988 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3989 mgmt_auth_failed(conn, ev->status);
3991 hci_conn_drop(conn);
3993 unlock:
3994 hci_dev_unlock(hdev);
3997 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3998 struct sk_buff *skb)
4000 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4001 struct inquiry_entry *ie;
4002 struct hci_conn *conn;
4004 BT_DBG("%s", hdev->name);
4006 hci_dev_lock(hdev);
4008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4009 if (conn)
4010 memcpy(conn->features[1], ev->features, 8);
4012 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4013 if (ie)
4014 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4016 hci_dev_unlock(hdev);
4019 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4020 struct sk_buff *skb)
4022 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4023 struct oob_data *data;
4025 BT_DBG("%s", hdev->name);
4027 hci_dev_lock(hdev);
4029 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4030 goto unlock;
4032 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4033 if (data) {
4034 if (bredr_sc_enabled(hdev)) {
4035 struct hci_cp_remote_oob_ext_data_reply cp;
4037 bacpy(&cp.bdaddr, &ev->bdaddr);
4038 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4039 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4040 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4041 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4043 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4044 sizeof(cp), &cp);
4045 } else {
4046 struct hci_cp_remote_oob_data_reply cp;
4048 bacpy(&cp.bdaddr, &ev->bdaddr);
4049 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4050 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4052 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4053 sizeof(cp), &cp);
4055 } else {
4056 struct hci_cp_remote_oob_data_neg_reply cp;
4058 bacpy(&cp.bdaddr, &ev->bdaddr);
4059 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4060 sizeof(cp), &cp);
4063 unlock:
4064 hci_dev_unlock(hdev);
4067 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4068 struct sk_buff *skb)
4070 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4071 struct hci_conn *hcon, *bredr_hcon;
4073 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4074 ev->status);
4076 hci_dev_lock(hdev);
4078 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4079 if (!hcon) {
4080 hci_dev_unlock(hdev);
4081 return;
4084 if (ev->status) {
4085 hci_conn_del(hcon);
4086 hci_dev_unlock(hdev);
4087 return;
4090 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4092 hcon->state = BT_CONNECTED;
4093 bacpy(&hcon->dst, &bredr_hcon->dst);
4095 hci_conn_hold(hcon);
4096 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4097 hci_conn_drop(hcon);
4099 hci_conn_add_sysfs(hcon);
4101 amp_physical_cfm(bredr_hcon, hcon);
4103 hci_dev_unlock(hdev);
4106 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4108 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4109 struct hci_conn *hcon;
4110 struct hci_chan *hchan;
4111 struct amp_mgr *mgr;
4113 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4114 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4115 ev->status);
4117 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4118 if (!hcon)
4119 return;
4121 /* Create AMP hchan */
4122 hchan = hci_chan_create(hcon);
4123 if (!hchan)
4124 return;
4126 hchan->handle = le16_to_cpu(ev->handle);
4128 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4130 mgr = hcon->amp_mgr;
4131 if (mgr && mgr->bredr_chan) {
4132 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4134 l2cap_chan_lock(bredr_chan);
4136 bredr_chan->conn->mtu = hdev->block_mtu;
4137 l2cap_logical_cfm(bredr_chan, hchan, 0);
4138 hci_conn_hold(hcon);
4140 l2cap_chan_unlock(bredr_chan);
4144 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4145 struct sk_buff *skb)
4147 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4148 struct hci_chan *hchan;
4150 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4151 le16_to_cpu(ev->handle), ev->status);
4153 if (ev->status)
4154 return;
4156 hci_dev_lock(hdev);
4158 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4159 if (!hchan)
4160 goto unlock;
4162 amp_destroy_logical_link(hchan, ev->reason);
4164 unlock:
4165 hci_dev_unlock(hdev);
4168 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4169 struct sk_buff *skb)
4171 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4172 struct hci_conn *hcon;
4174 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4176 if (ev->status)
4177 return;
4179 hci_dev_lock(hdev);
4181 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4182 if (hcon) {
4183 hcon->state = BT_CLOSED;
4184 hci_conn_del(hcon);
4187 hci_dev_unlock(hdev);
4190 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4192 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4193 struct hci_conn_params *params;
4194 struct hci_conn *conn;
4195 struct smp_irk *irk;
4196 u8 addr_type;
4198 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4200 hci_dev_lock(hdev);
4202 /* All controllers implicitly stop advertising in the event of a
4203 * connection, so ensure that the state bit is cleared.
4205 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4207 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4208 if (!conn) {
4209 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4210 if (!conn) {
4211 BT_ERR("No memory for new connection");
4212 goto unlock;
4215 conn->dst_type = ev->bdaddr_type;
4217 /* If we didn't have a hci_conn object previously
4218 * but we're in master role this must be something
4219 * initiated using a white list. Since white list based
4220 * connections are not "first class citizens" we don't
4221 * have full tracking of them. Therefore, we go ahead
4222 * with a "best effort" approach of determining the
4223 * initiator address based on the HCI_PRIVACY flag.
4225 if (conn->out) {
4226 conn->resp_addr_type = ev->bdaddr_type;
4227 bacpy(&conn->resp_addr, &ev->bdaddr);
4228 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4229 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4230 bacpy(&conn->init_addr, &hdev->rpa);
4231 } else {
4232 hci_copy_identity_address(hdev,
4233 &conn->init_addr,
4234 &conn->init_addr_type);
4237 } else {
4238 cancel_delayed_work(&conn->le_conn_timeout);
4241 if (!conn->out) {
4242 /* Set the responder (our side) address type based on
4243 * the advertising address type.
4245 conn->resp_addr_type = hdev->adv_addr_type;
4246 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4247 bacpy(&conn->resp_addr, &hdev->random_addr);
4248 else
4249 bacpy(&conn->resp_addr, &hdev->bdaddr);
4251 conn->init_addr_type = ev->bdaddr_type;
4252 bacpy(&conn->init_addr, &ev->bdaddr);
4254 /* For incoming connections, set the default minimum
4255 * and maximum connection interval. They will be used
4256 * to check if the parameters are in range and if not
4257 * trigger the connection update procedure.
4259 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4260 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4263 /* Lookup the identity address from the stored connection
4264 * address and address type.
4266 * When establishing connections to an identity address, the
4267 * connection procedure will store the resolvable random
4268 * address first. Now if it can be converted back into the
4269 * identity address, start using the identity address from
4270 * now on.
4272 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4273 if (irk) {
4274 bacpy(&conn->dst, &irk->bdaddr);
4275 conn->dst_type = irk->addr_type;
4278 if (ev->status) {
4279 hci_le_conn_failed(conn, ev->status);
4280 goto unlock;
4283 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4284 addr_type = BDADDR_LE_PUBLIC;
4285 else
4286 addr_type = BDADDR_LE_RANDOM;
4288 /* Drop the connection if the device is blocked */
4289 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4290 hci_conn_drop(conn);
4291 goto unlock;
4294 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4295 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4297 conn->sec_level = BT_SECURITY_LOW;
4298 conn->handle = __le16_to_cpu(ev->handle);
4299 conn->state = BT_CONNECTED;
4301 conn->le_conn_interval = le16_to_cpu(ev->interval);
4302 conn->le_conn_latency = le16_to_cpu(ev->latency);
4303 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4305 hci_conn_add_sysfs(conn);
4307 hci_proto_connect_cfm(conn, ev->status);
4309 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4310 conn->dst_type);
4311 if (params) {
4312 list_del_init(&params->action);
4313 if (params->conn) {
4314 hci_conn_drop(params->conn);
4315 hci_conn_put(params->conn);
4316 params->conn = NULL;
4320 unlock:
4321 hci_update_background_scan(hdev);
4322 hci_dev_unlock(hdev);
4325 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4326 struct sk_buff *skb)
4328 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4329 struct hci_conn *conn;
4331 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4333 if (ev->status)
4334 return;
4336 hci_dev_lock(hdev);
4338 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4339 if (conn) {
4340 conn->le_conn_interval = le16_to_cpu(ev->interval);
4341 conn->le_conn_latency = le16_to_cpu(ev->latency);
4342 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4345 hci_dev_unlock(hdev);
4348 /* This function requires the caller holds hdev->lock */
4349 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4350 bdaddr_t *addr,
4351 u8 addr_type, u8 adv_type)
4353 struct hci_conn *conn;
4354 struct hci_conn_params *params;
4356 /* If the event is not connectable don't proceed further */
4357 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4358 return NULL;
4360 /* Ignore if the device is blocked */
4361 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4362 return NULL;
4364 /* Most controller will fail if we try to create new connections
4365 * while we have an existing one in slave role.
4367 if (hdev->conn_hash.le_num_slave > 0)
4368 return NULL;
4370 /* If we're not connectable only connect devices that we have in
4371 * our pend_le_conns list.
4373 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4374 addr, addr_type);
4375 if (!params)
4376 return NULL;
4378 switch (params->auto_connect) {
4379 case HCI_AUTO_CONN_DIRECT:
4380 /* Only devices advertising with ADV_DIRECT_IND are
4381 * triggering a connection attempt. This is allowing
4382 * incoming connections from slave devices.
4384 if (adv_type != LE_ADV_DIRECT_IND)
4385 return NULL;
4386 break;
4387 case HCI_AUTO_CONN_ALWAYS:
4388 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4389 * are triggering a connection attempt. This means
4390 * that incoming connectioms from slave device are
4391 * accepted and also outgoing connections to slave
4392 * devices are established when found.
4394 break;
4395 default:
4396 return NULL;
4399 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4400 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4401 if (!IS_ERR(conn)) {
4402 /* Store the pointer since we don't really have any
4403 * other owner of the object besides the params that
4404 * triggered it. This way we can abort the connection if
4405 * the parameters get removed and keep the reference
4406 * count consistent once the connection is established.
4408 params->conn = hci_conn_get(conn);
4409 return conn;
4412 switch (PTR_ERR(conn)) {
4413 case -EBUSY:
4414 /* If hci_connect() returns -EBUSY it means there is already
4415 * an LE connection attempt going on. Since controllers don't
4416 * support more than one connection attempt at the time, we
4417 * don't consider this an error case.
4419 break;
4420 default:
4421 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4422 return NULL;
4425 return NULL;
4428 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4429 u8 bdaddr_type, bdaddr_t *direct_addr,
4430 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4432 struct discovery_state *d = &hdev->discovery;
4433 struct smp_irk *irk;
4434 struct hci_conn *conn;
4435 bool match;
4436 u32 flags;
4438 /* If the direct address is present, then this report is from
4439 * a LE Direct Advertising Report event. In that case it is
4440 * important to see if the address is matching the local
4441 * controller address.
4443 if (direct_addr) {
4444 /* Only resolvable random addresses are valid for these
4445 * kind of reports and others can be ignored.
4447 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4448 return;
4450 /* If the controller is not using resolvable random
4451 * addresses, then this report can be ignored.
4453 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4454 return;
4456 /* If the local IRK of the controller does not match
4457 * with the resolvable random address provided, then
4458 * this report can be ignored.
4460 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4461 return;
4464 /* Check if we need to convert to identity address */
4465 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4466 if (irk) {
4467 bdaddr = &irk->bdaddr;
4468 bdaddr_type = irk->addr_type;
4471 /* Check if we have been requested to connect to this device */
4472 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4473 if (conn && type == LE_ADV_IND) {
4474 /* Store report for later inclusion by
4475 * mgmt_device_connected
4477 memcpy(conn->le_adv_data, data, len);
4478 conn->le_adv_data_len = len;
4481 /* Passive scanning shouldn't trigger any device found events,
4482 * except for devices marked as CONN_REPORT for which we do send
4483 * device found events.
4485 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4486 if (type == LE_ADV_DIRECT_IND)
4487 return;
4489 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4490 bdaddr, bdaddr_type))
4491 return;
4493 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4494 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4495 else
4496 flags = 0;
4497 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4498 rssi, flags, data, len, NULL, 0);
4499 return;
4502 /* When receiving non-connectable or scannable undirected
4503 * advertising reports, this means that the remote device is
4504 * not connectable and then clearly indicate this in the
4505 * device found event.
4507 * When receiving a scan response, then there is no way to
4508 * know if the remote device is connectable or not. However
4509 * since scan responses are merged with a previously seen
4510 * advertising report, the flags field from that report
4511 * will be used.
4513 * In the really unlikely case that a controller get confused
4514 * and just sends a scan response event, then it is marked as
4515 * not connectable as well.
4517 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4518 type == LE_ADV_SCAN_RSP)
4519 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4520 else
4521 flags = 0;
4523 /* If there's nothing pending either store the data from this
4524 * event or send an immediate device found event if the data
4525 * should not be stored for later.
4527 if (!has_pending_adv_report(hdev)) {
4528 /* If the report will trigger a SCAN_REQ store it for
4529 * later merging.
4531 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4532 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4533 rssi, flags, data, len);
4534 return;
4537 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4538 rssi, flags, data, len, NULL, 0);
4539 return;
4542 /* Check if the pending report is for the same device as the new one */
4543 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4544 bdaddr_type == d->last_adv_addr_type);
4546 /* If the pending data doesn't match this report or this isn't a
4547 * scan response (e.g. we got a duplicate ADV_IND) then force
4548 * sending of the pending data.
4550 if (type != LE_ADV_SCAN_RSP || !match) {
4551 /* Send out whatever is in the cache, but skip duplicates */
4552 if (!match)
4553 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4554 d->last_adv_addr_type, NULL,
4555 d->last_adv_rssi, d->last_adv_flags,
4556 d->last_adv_data,
4557 d->last_adv_data_len, NULL, 0);
4559 /* If the new report will trigger a SCAN_REQ store it for
4560 * later merging.
4562 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4563 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4564 rssi, flags, data, len);
4565 return;
4568 /* The advertising reports cannot be merged, so clear
4569 * the pending report and send out a device found event.
4571 clear_pending_adv_report(hdev);
4572 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4573 rssi, flags, data, len, NULL, 0);
4574 return;
4577 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4578 * the new event is a SCAN_RSP. We can therefore proceed with
4579 * sending a merged device found event.
4581 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4582 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4583 d->last_adv_data, d->last_adv_data_len, data, len);
4584 clear_pending_adv_report(hdev);
4587 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4589 u8 num_reports = skb->data[0];
4590 void *ptr = &skb->data[1];
4592 hci_dev_lock(hdev);
4594 while (num_reports--) {
4595 struct hci_ev_le_advertising_info *ev = ptr;
4596 s8 rssi;
4598 rssi = ev->data[ev->length];
4599 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4600 ev->bdaddr_type, NULL, 0, rssi,
4601 ev->data, ev->length);
4603 ptr += sizeof(*ev) + ev->length + 1;
4606 hci_dev_unlock(hdev);
4609 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4611 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4612 struct hci_cp_le_ltk_reply cp;
4613 struct hci_cp_le_ltk_neg_reply neg;
4614 struct hci_conn *conn;
4615 struct smp_ltk *ltk;
4617 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4619 hci_dev_lock(hdev);
4621 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4622 if (conn == NULL)
4623 goto not_found;
4625 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4626 if (!ltk)
4627 goto not_found;
4629 if (smp_ltk_is_sc(ltk)) {
4630 /* With SC both EDiv and Rand are set to zero */
4631 if (ev->ediv || ev->rand)
4632 goto not_found;
4633 } else {
4634 /* For non-SC keys check that EDiv and Rand match */
4635 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4636 goto not_found;
4639 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4640 cp.handle = cpu_to_le16(conn->handle);
4642 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4644 conn->enc_key_size = ltk->enc_size;
4646 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4648 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4649 * temporary key used to encrypt a connection following
4650 * pairing. It is used during the Encrypted Session Setup to
4651 * distribute the keys. Later, security can be re-established
4652 * using a distributed LTK.
4654 if (ltk->type == SMP_STK) {
4655 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4656 list_del_rcu(&ltk->list);
4657 kfree_rcu(ltk, rcu);
4658 } else {
4659 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4662 hci_dev_unlock(hdev);
4664 return;
4666 not_found:
4667 neg.handle = ev->handle;
4668 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4669 hci_dev_unlock(hdev);
4672 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4673 u8 reason)
4675 struct hci_cp_le_conn_param_req_neg_reply cp;
4677 cp.handle = cpu_to_le16(handle);
4678 cp.reason = reason;
4680 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4681 &cp);
4684 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4685 struct sk_buff *skb)
4687 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4688 struct hci_cp_le_conn_param_req_reply cp;
4689 struct hci_conn *hcon;
4690 u16 handle, min, max, latency, timeout;
4692 handle = le16_to_cpu(ev->handle);
4693 min = le16_to_cpu(ev->interval_min);
4694 max = le16_to_cpu(ev->interval_max);
4695 latency = le16_to_cpu(ev->latency);
4696 timeout = le16_to_cpu(ev->timeout);
4698 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4699 if (!hcon || hcon->state != BT_CONNECTED)
4700 return send_conn_param_neg_reply(hdev, handle,
4701 HCI_ERROR_UNKNOWN_CONN_ID);
4703 if (hci_check_conn_params(min, max, latency, timeout))
4704 return send_conn_param_neg_reply(hdev, handle,
4705 HCI_ERROR_INVALID_LL_PARAMS);
4707 if (hcon->role == HCI_ROLE_MASTER) {
4708 struct hci_conn_params *params;
4709 u8 store_hint;
4711 hci_dev_lock(hdev);
4713 params = hci_conn_params_lookup(hdev, &hcon->dst,
4714 hcon->dst_type);
4715 if (params) {
4716 params->conn_min_interval = min;
4717 params->conn_max_interval = max;
4718 params->conn_latency = latency;
4719 params->supervision_timeout = timeout;
4720 store_hint = 0x01;
4721 } else{
4722 store_hint = 0x00;
4725 hci_dev_unlock(hdev);
4727 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4728 store_hint, min, max, latency, timeout);
4731 cp.handle = ev->handle;
4732 cp.interval_min = ev->interval_min;
4733 cp.interval_max = ev->interval_max;
4734 cp.latency = ev->latency;
4735 cp.timeout = ev->timeout;
4736 cp.min_ce_len = 0;
4737 cp.max_ce_len = 0;
4739 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4742 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4743 struct sk_buff *skb)
4745 u8 num_reports = skb->data[0];
4746 void *ptr = &skb->data[1];
4748 hci_dev_lock(hdev);
4750 while (num_reports--) {
4751 struct hci_ev_le_direct_adv_info *ev = ptr;
4753 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4754 ev->bdaddr_type, &ev->direct_addr,
4755 ev->direct_addr_type, ev->rssi, NULL, 0);
4757 ptr += sizeof(*ev);
4760 hci_dev_unlock(hdev);
4763 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4765 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4767 skb_pull(skb, sizeof(*le_ev));
4769 switch (le_ev->subevent) {
4770 case HCI_EV_LE_CONN_COMPLETE:
4771 hci_le_conn_complete_evt(hdev, skb);
4772 break;
4774 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4775 hci_le_conn_update_complete_evt(hdev, skb);
4776 break;
4778 case HCI_EV_LE_ADVERTISING_REPORT:
4779 hci_le_adv_report_evt(hdev, skb);
4780 break;
4782 case HCI_EV_LE_LTK_REQ:
4783 hci_le_ltk_request_evt(hdev, skb);
4784 break;
4786 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4787 hci_le_remote_conn_param_req_evt(hdev, skb);
4788 break;
4790 case HCI_EV_LE_DIRECT_ADV_REPORT:
4791 hci_le_direct_adv_report_evt(hdev, skb);
4792 break;
4794 default:
4795 break;
4799 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4801 struct hci_ev_channel_selected *ev = (void *) skb->data;
4802 struct hci_conn *hcon;
4804 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4806 skb_pull(skb, sizeof(*ev));
4808 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4809 if (!hcon)
4810 return;
4812 amp_read_loc_assoc_final_data(hdev, hcon);
4815 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4817 struct hci_event_hdr *hdr = (void *) skb->data;
4818 __u8 event = hdr->evt;
4820 hci_dev_lock(hdev);
4822 /* Received events are (currently) only needed when a request is
4823 * ongoing so avoid unnecessary memory allocation.
4825 if (hci_req_pending(hdev)) {
4826 kfree_skb(hdev->recv_evt);
4827 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4830 hci_dev_unlock(hdev);
4832 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4834 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4835 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4836 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4838 hci_req_cmd_complete(hdev, opcode, 0);
4841 switch (event) {
4842 case HCI_EV_INQUIRY_COMPLETE:
4843 hci_inquiry_complete_evt(hdev, skb);
4844 break;
4846 case HCI_EV_INQUIRY_RESULT:
4847 hci_inquiry_result_evt(hdev, skb);
4848 break;
4850 case HCI_EV_CONN_COMPLETE:
4851 hci_conn_complete_evt(hdev, skb);
4852 break;
4854 case HCI_EV_CONN_REQUEST:
4855 hci_conn_request_evt(hdev, skb);
4856 break;
4858 case HCI_EV_DISCONN_COMPLETE:
4859 hci_disconn_complete_evt(hdev, skb);
4860 break;
4862 case HCI_EV_AUTH_COMPLETE:
4863 hci_auth_complete_evt(hdev, skb);
4864 break;
4866 case HCI_EV_REMOTE_NAME:
4867 hci_remote_name_evt(hdev, skb);
4868 break;
4870 case HCI_EV_ENCRYPT_CHANGE:
4871 hci_encrypt_change_evt(hdev, skb);
4872 break;
4874 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4875 hci_change_link_key_complete_evt(hdev, skb);
4876 break;
4878 case HCI_EV_REMOTE_FEATURES:
4879 hci_remote_features_evt(hdev, skb);
4880 break;
4882 case HCI_EV_CMD_COMPLETE:
4883 hci_cmd_complete_evt(hdev, skb);
4884 break;
4886 case HCI_EV_CMD_STATUS:
4887 hci_cmd_status_evt(hdev, skb);
4888 break;
4890 case HCI_EV_HARDWARE_ERROR:
4891 hci_hardware_error_evt(hdev, skb);
4892 break;
4894 case HCI_EV_ROLE_CHANGE:
4895 hci_role_change_evt(hdev, skb);
4896 break;
4898 case HCI_EV_NUM_COMP_PKTS:
4899 hci_num_comp_pkts_evt(hdev, skb);
4900 break;
4902 case HCI_EV_MODE_CHANGE:
4903 hci_mode_change_evt(hdev, skb);
4904 break;
4906 case HCI_EV_PIN_CODE_REQ:
4907 hci_pin_code_request_evt(hdev, skb);
4908 break;
4910 case HCI_EV_LINK_KEY_REQ:
4911 hci_link_key_request_evt(hdev, skb);
4912 break;
4914 case HCI_EV_LINK_KEY_NOTIFY:
4915 hci_link_key_notify_evt(hdev, skb);
4916 break;
4918 case HCI_EV_CLOCK_OFFSET:
4919 hci_clock_offset_evt(hdev, skb);
4920 break;
4922 case HCI_EV_PKT_TYPE_CHANGE:
4923 hci_pkt_type_change_evt(hdev, skb);
4924 break;
4926 case HCI_EV_PSCAN_REP_MODE:
4927 hci_pscan_rep_mode_evt(hdev, skb);
4928 break;
4930 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4931 hci_inquiry_result_with_rssi_evt(hdev, skb);
4932 break;
4934 case HCI_EV_REMOTE_EXT_FEATURES:
4935 hci_remote_ext_features_evt(hdev, skb);
4936 break;
4938 case HCI_EV_SYNC_CONN_COMPLETE:
4939 hci_sync_conn_complete_evt(hdev, skb);
4940 break;
4942 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4943 hci_extended_inquiry_result_evt(hdev, skb);
4944 break;
4946 case HCI_EV_KEY_REFRESH_COMPLETE:
4947 hci_key_refresh_complete_evt(hdev, skb);
4948 break;
4950 case HCI_EV_IO_CAPA_REQUEST:
4951 hci_io_capa_request_evt(hdev, skb);
4952 break;
4954 case HCI_EV_IO_CAPA_REPLY:
4955 hci_io_capa_reply_evt(hdev, skb);
4956 break;
4958 case HCI_EV_USER_CONFIRM_REQUEST:
4959 hci_user_confirm_request_evt(hdev, skb);
4960 break;
4962 case HCI_EV_USER_PASSKEY_REQUEST:
4963 hci_user_passkey_request_evt(hdev, skb);
4964 break;
4966 case HCI_EV_USER_PASSKEY_NOTIFY:
4967 hci_user_passkey_notify_evt(hdev, skb);
4968 break;
4970 case HCI_EV_KEYPRESS_NOTIFY:
4971 hci_keypress_notify_evt(hdev, skb);
4972 break;
4974 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4975 hci_simple_pair_complete_evt(hdev, skb);
4976 break;
4978 case HCI_EV_REMOTE_HOST_FEATURES:
4979 hci_remote_host_features_evt(hdev, skb);
4980 break;
4982 case HCI_EV_LE_META:
4983 hci_le_meta_evt(hdev, skb);
4984 break;
4986 case HCI_EV_CHANNEL_SELECTED:
4987 hci_chan_selected_evt(hdev, skb);
4988 break;
4990 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4991 hci_remote_oob_data_request_evt(hdev, skb);
4992 break;
4994 case HCI_EV_PHY_LINK_COMPLETE:
4995 hci_phy_link_complete_evt(hdev, skb);
4996 break;
4998 case HCI_EV_LOGICAL_LINK_COMPLETE:
4999 hci_loglink_complete_evt(hdev, skb);
5000 break;
5002 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5003 hci_disconn_loglink_complete_evt(hdev, skb);
5004 break;
5006 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5007 hci_disconn_phylink_complete_evt(hdev, skb);
5008 break;
5010 case HCI_EV_NUM_COMP_BLOCKS:
5011 hci_num_comp_blocks_evt(hdev, skb);
5012 break;
5014 default:
5015 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5016 break;
5019 kfree_skb(skb);
5020 hdev->stat.evt_rx++;