mm: page_alloc: do not treat a zone that cannot be used for dirty pages as "full"
[linux/fpc-iii.git] / net / bluetooth / hci_event.c
blob2e8c5765e5ea9799d57091e90e1e6a8130f4293a
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "a2mp.h"
34 #include "amp.h"
36 /* Handle HCI Event packets */
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 __u8 status = *((__u8 *) skb->data);
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44 if (status)
45 return;
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
55 hci_conn_check_pending(hdev);
58 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 __u8 status = *((__u8 *) skb->data);
62 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64 if (status)
65 return;
67 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
70 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 __u8 status = *((__u8 *) skb->data);
74 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76 if (status)
77 return;
79 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
85 struct sk_buff *skb)
87 BT_DBG("%s", hdev->name);
90 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 struct hci_rp_role_discovery *rp = (void *) skb->data;
93 struct hci_conn *conn;
95 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97 if (rp->status)
98 return;
100 hci_dev_lock(hdev);
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) {
104 if (rp->role)
105 conn->link_mode &= ~HCI_LM_MASTER;
106 else
107 conn->link_mode |= HCI_LM_MASTER;
110 hci_dev_unlock(hdev);
113 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 struct hci_rp_read_link_policy *rp = (void *) skb->data;
116 struct hci_conn *conn;
118 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120 if (rp->status)
121 return;
123 hci_dev_lock(hdev);
125 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 if (conn)
127 conn->link_policy = __le16_to_cpu(rp->policy);
129 hci_dev_unlock(hdev);
132 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 struct hci_rp_write_link_policy *rp = (void *) skb->data;
135 struct hci_conn *conn;
136 void *sent;
138 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140 if (rp->status)
141 return;
143 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
144 if (!sent)
145 return;
147 hci_dev_lock(hdev);
149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 if (conn)
151 conn->link_policy = get_unaligned_le16(sent + 2);
153 hci_dev_unlock(hdev);
156 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
157 struct sk_buff *skb)
159 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163 if (rp->status)
164 return;
166 hdev->link_policy = __le16_to_cpu(rp->policy);
169 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
170 struct sk_buff *skb)
172 __u8 status = *((__u8 *) skb->data);
173 void *sent;
175 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
181 if (!status)
182 hdev->link_policy = get_unaligned_le16(sent);
185 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 __u8 status = *((__u8 *) skb->data);
189 BT_DBG("%s status 0x%2.2x", hdev->name, status);
191 clear_bit(HCI_RESET, &hdev->flags);
193 /* Reset all non-persistent flags */
194 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
196 hdev->discovery.state = DISCOVERY_STOPPED;
197 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
198 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
200 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
201 hdev->adv_data_len = 0;
203 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
204 hdev->scan_rsp_data_len = 0;
206 hdev->ssp_debug_mode = 0;
209 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211 __u8 status = *((__u8 *) skb->data);
212 void *sent;
214 BT_DBG("%s status 0x%2.2x", hdev->name, status);
216 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
217 if (!sent)
218 return;
220 hci_dev_lock(hdev);
222 if (test_bit(HCI_MGMT, &hdev->dev_flags))
223 mgmt_set_local_name_complete(hdev, sent, status);
224 else if (!status)
225 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
227 hci_dev_unlock(hdev);
230 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232 struct hci_rp_read_local_name *rp = (void *) skb->data;
234 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236 if (rp->status)
237 return;
239 if (test_bit(HCI_SETUP, &hdev->dev_flags))
240 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
243 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245 __u8 status = *((__u8 *) skb->data);
246 void *sent;
248 BT_DBG("%s status 0x%2.2x", hdev->name, status);
250 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
251 if (!sent)
252 return;
254 if (!status) {
255 __u8 param = *((__u8 *) sent);
257 if (param == AUTH_ENABLED)
258 set_bit(HCI_AUTH, &hdev->flags);
259 else
260 clear_bit(HCI_AUTH, &hdev->flags);
263 if (test_bit(HCI_MGMT, &hdev->dev_flags))
264 mgmt_auth_enable_complete(hdev, status);
267 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269 __u8 status = *((__u8 *) skb->data);
270 void *sent;
272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
274 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
275 if (!sent)
276 return;
278 if (!status) {
279 __u8 param = *((__u8 *) sent);
281 if (param)
282 set_bit(HCI_ENCRYPT, &hdev->flags);
283 else
284 clear_bit(HCI_ENCRYPT, &hdev->flags);
288 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
290 __u8 param, status = *((__u8 *) skb->data);
291 int old_pscan, old_iscan;
292 void *sent;
294 BT_DBG("%s status 0x%2.2x", hdev->name, status);
296 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
297 if (!sent)
298 return;
300 param = *((__u8 *) sent);
302 hci_dev_lock(hdev);
304 if (status) {
305 mgmt_write_scan_failed(hdev, param, status);
306 hdev->discov_timeout = 0;
307 goto done;
310 /* We need to ensure that we set this back on if someone changed
311 * the scan mode through a raw HCI socket.
313 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
315 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
316 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
318 if (param & SCAN_INQUIRY) {
319 set_bit(HCI_ISCAN, &hdev->flags);
320 if (!old_iscan)
321 mgmt_discoverable(hdev, 1);
322 } else if (old_iscan)
323 mgmt_discoverable(hdev, 0);
325 if (param & SCAN_PAGE) {
326 set_bit(HCI_PSCAN, &hdev->flags);
327 if (!old_pscan)
328 mgmt_connectable(hdev, 1);
329 } else if (old_pscan)
330 mgmt_connectable(hdev, 0);
332 done:
333 hci_dev_unlock(hdev);
336 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
338 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
340 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
342 if (rp->status)
343 return;
345 memcpy(hdev->dev_class, rp->dev_class, 3);
347 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
348 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
351 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
353 __u8 status = *((__u8 *) skb->data);
354 void *sent;
356 BT_DBG("%s status 0x%2.2x", hdev->name, status);
358 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
359 if (!sent)
360 return;
362 hci_dev_lock(hdev);
364 if (status == 0)
365 memcpy(hdev->dev_class, sent, 3);
367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
368 mgmt_set_class_of_dev_complete(hdev, sent, status);
370 hci_dev_unlock(hdev);
373 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
375 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 __u16 setting;
378 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
380 if (rp->status)
381 return;
383 setting = __le16_to_cpu(rp->voice_setting);
385 if (hdev->voice_setting == setting)
386 return;
388 hdev->voice_setting = setting;
390 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
392 if (hdev->notify)
393 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
396 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
397 struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
400 __u16 setting;
401 void *sent;
403 BT_DBG("%s status 0x%2.2x", hdev->name, status);
405 if (status)
406 return;
408 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
409 if (!sent)
410 return;
412 setting = get_unaligned_le16(sent);
414 if (hdev->voice_setting == setting)
415 return;
417 hdev->voice_setting = setting;
419 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
421 if (hdev->notify)
422 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
425 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
426 struct sk_buff *skb)
428 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
430 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
432 if (rp->status)
433 return;
435 hdev->num_iac = rp->num_iac;
437 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
440 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
442 __u8 status = *((__u8 *) skb->data);
443 struct hci_cp_write_ssp_mode *sent;
445 BT_DBG("%s status 0x%2.2x", hdev->name, status);
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
448 if (!sent)
449 return;
451 if (!status) {
452 if (sent->mode)
453 hdev->features[1][0] |= LMP_HOST_SSP;
454 else
455 hdev->features[1][0] &= ~LMP_HOST_SSP;
458 if (test_bit(HCI_MGMT, &hdev->dev_flags))
459 mgmt_ssp_enable_complete(hdev, sent->mode, status);
460 else if (!status) {
461 if (sent->mode)
462 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 else
464 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
468 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
470 struct hci_rp_read_local_version *rp = (void *) skb->data;
472 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
474 if (rp->status)
475 return;
477 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
478 hdev->hci_ver = rp->hci_ver;
479 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
480 hdev->lmp_ver = rp->lmp_ver;
481 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
482 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
486 static void hci_cc_read_local_commands(struct hci_dev *hdev,
487 struct sk_buff *skb)
489 struct hci_rp_read_local_commands *rp = (void *) skb->data;
491 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
493 if (rp->status)
494 return;
496 if (test_bit(HCI_SETUP, &hdev->dev_flags))
497 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
500 static void hci_cc_read_local_features(struct hci_dev *hdev,
501 struct sk_buff *skb)
503 struct hci_rp_read_local_features *rp = (void *) skb->data;
505 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
507 if (rp->status)
508 return;
510 memcpy(hdev->features, rp->features, 8);
512 /* Adjust default settings according to features
513 * supported by device. */
515 if (hdev->features[0][0] & LMP_3SLOT)
516 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
518 if (hdev->features[0][0] & LMP_5SLOT)
519 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
521 if (hdev->features[0][1] & LMP_HV2) {
522 hdev->pkt_type |= (HCI_HV2);
523 hdev->esco_type |= (ESCO_HV2);
526 if (hdev->features[0][1] & LMP_HV3) {
527 hdev->pkt_type |= (HCI_HV3);
528 hdev->esco_type |= (ESCO_HV3);
531 if (lmp_esco_capable(hdev))
532 hdev->esco_type |= (ESCO_EV3);
534 if (hdev->features[0][4] & LMP_EV4)
535 hdev->esco_type |= (ESCO_EV4);
537 if (hdev->features[0][4] & LMP_EV5)
538 hdev->esco_type |= (ESCO_EV5);
540 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
541 hdev->esco_type |= (ESCO_2EV3);
543 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
544 hdev->esco_type |= (ESCO_3EV3);
546 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
547 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
550 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
551 struct sk_buff *skb)
553 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
555 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557 if (rp->status)
558 return;
560 if (hdev->max_page < rp->max_page)
561 hdev->max_page = rp->max_page;
563 if (rp->page < HCI_MAX_PAGES)
564 memcpy(hdev->features[rp->page], rp->features, 8);
567 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
568 struct sk_buff *skb)
570 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574 if (!rp->status)
575 hdev->flow_ctl_mode = rp->mode;
578 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
580 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
582 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
584 if (rp->status)
585 return;
587 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
588 hdev->sco_mtu = rp->sco_mtu;
589 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
590 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
592 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
593 hdev->sco_mtu = 64;
594 hdev->sco_pkts = 8;
597 hdev->acl_cnt = hdev->acl_pkts;
598 hdev->sco_cnt = hdev->sco_pkts;
600 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
601 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
604 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610 if (!rp->status)
611 bacpy(&hdev->bdaddr, &rp->bdaddr);
614 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
615 struct sk_buff *skb)
617 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
619 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
621 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
622 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
623 hdev->page_scan_window = __le16_to_cpu(rp->window);
627 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
628 struct sk_buff *skb)
630 u8 status = *((u8 *) skb->data);
631 struct hci_cp_write_page_scan_activity *sent;
633 BT_DBG("%s status 0x%2.2x", hdev->name, status);
635 if (status)
636 return;
638 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
639 if (!sent)
640 return;
642 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
643 hdev->page_scan_window = __le16_to_cpu(sent->window);
646 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
647 struct sk_buff *skb)
649 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
651 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
653 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
654 hdev->page_scan_type = rp->type;
657 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
658 struct sk_buff *skb)
660 u8 status = *((u8 *) skb->data);
661 u8 *type;
663 BT_DBG("%s status 0x%2.2x", hdev->name, status);
665 if (status)
666 return;
668 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
669 if (type)
670 hdev->page_scan_type = *type;
673 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
674 struct sk_buff *skb)
676 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
678 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
680 if (rp->status)
681 return;
683 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
684 hdev->block_len = __le16_to_cpu(rp->block_len);
685 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
687 hdev->block_cnt = hdev->num_blocks;
689 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
690 hdev->block_cnt, hdev->block_len);
693 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
694 struct sk_buff *skb)
696 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
700 if (rp->status)
701 goto a2mp_rsp;
703 hdev->amp_status = rp->amp_status;
704 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
705 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
706 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
707 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
708 hdev->amp_type = rp->amp_type;
709 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
710 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
711 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
712 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
714 a2mp_rsp:
715 a2mp_send_getinfo_rsp(hdev);
718 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
719 struct sk_buff *skb)
721 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
722 struct amp_assoc *assoc = &hdev->loc_assoc;
723 size_t rem_len, frag_len;
725 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
727 if (rp->status)
728 goto a2mp_rsp;
730 frag_len = skb->len - sizeof(*rp);
731 rem_len = __le16_to_cpu(rp->rem_len);
733 if (rem_len > frag_len) {
734 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
736 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
737 assoc->offset += frag_len;
739 /* Read other fragments */
740 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
742 return;
745 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
746 assoc->len = assoc->offset + rem_len;
747 assoc->offset = 0;
749 a2mp_rsp:
750 /* Send A2MP Rsp when all fragments are received */
751 a2mp_send_getampassoc_rsp(hdev, rp->status);
752 a2mp_send_create_phy_link_req(hdev, rp->status);
755 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
756 struct sk_buff *skb)
758 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
760 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
762 if (!rp->status)
763 hdev->inq_tx_power = rp->tx_power;
766 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
768 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
769 struct hci_cp_pin_code_reply *cp;
770 struct hci_conn *conn;
772 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
774 hci_dev_lock(hdev);
776 if (test_bit(HCI_MGMT, &hdev->dev_flags))
777 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
779 if (rp->status)
780 goto unlock;
782 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
783 if (!cp)
784 goto unlock;
786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
787 if (conn)
788 conn->pin_length = cp->pin_len;
790 unlock:
791 hci_dev_unlock(hdev);
794 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
796 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
800 hci_dev_lock(hdev);
802 if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
804 rp->status);
806 hci_dev_unlock(hdev);
809 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
810 struct sk_buff *skb)
812 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
816 if (rp->status)
817 return;
819 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
820 hdev->le_pkts = rp->le_max_pkt;
822 hdev->le_cnt = hdev->le_pkts;
824 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
827 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
828 struct sk_buff *skb)
830 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834 if (!rp->status)
835 memcpy(hdev->le_features, rp->features, 8);
838 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
839 struct sk_buff *skb)
841 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 if (!rp->status)
846 hdev->adv_tx_power = rp->tx_power;
849 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
851 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
853 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
855 hci_dev_lock(hdev);
857 if (test_bit(HCI_MGMT, &hdev->dev_flags))
858 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
859 rp->status);
861 hci_dev_unlock(hdev);
864 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
865 struct sk_buff *skb)
867 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
871 hci_dev_lock(hdev);
873 if (test_bit(HCI_MGMT, &hdev->dev_flags))
874 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
875 ACL_LINK, 0, rp->status);
877 hci_dev_unlock(hdev);
880 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
882 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
884 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886 hci_dev_lock(hdev);
888 if (test_bit(HCI_MGMT, &hdev->dev_flags))
889 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
890 0, rp->status);
892 hci_dev_unlock(hdev);
895 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
896 struct sk_buff *skb)
898 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
900 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902 hci_dev_lock(hdev);
904 if (test_bit(HCI_MGMT, &hdev->dev_flags))
905 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
906 ACL_LINK, 0, rp->status);
908 hci_dev_unlock(hdev);
911 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
912 struct sk_buff *skb)
914 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
916 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
918 hci_dev_lock(hdev);
919 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
920 rp->randomizer, rp->status);
921 hci_dev_unlock(hdev);
924 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
926 __u8 *sent, status = *((__u8 *) skb->data);
928 BT_DBG("%s status 0x%2.2x", hdev->name, status);
930 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
931 if (!sent)
932 return;
934 hci_dev_lock(hdev);
936 if (!status) {
937 if (*sent)
938 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
939 else
940 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
943 hci_dev_unlock(hdev);
946 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
947 struct sk_buff *skb)
949 struct hci_cp_le_set_scan_enable *cp;
950 __u8 status = *((__u8 *) skb->data);
952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
954 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
955 if (!cp)
956 return;
958 if (status)
959 return;
961 switch (cp->enable) {
962 case LE_SCAN_ENABLE:
963 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
964 break;
966 case LE_SCAN_DISABLE:
967 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
968 break;
970 default:
971 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
972 break;
976 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
977 struct sk_buff *skb)
979 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
983 if (!rp->status)
984 hdev->le_white_list_size = rp->size;
987 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
988 struct sk_buff *skb)
990 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
992 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
994 if (!rp->status)
995 memcpy(hdev->le_states, rp->le_states, 8);
998 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
999 struct sk_buff *skb)
1001 struct hci_cp_write_le_host_supported *sent;
1002 __u8 status = *((__u8 *) skb->data);
1004 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1006 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1007 if (!sent)
1008 return;
1010 if (!status) {
1011 if (sent->le) {
1012 hdev->features[1][0] |= LMP_HOST_LE;
1013 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1014 } else {
1015 hdev->features[1][0] &= ~LMP_HOST_LE;
1016 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1017 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1020 if (sent->simul)
1021 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1022 else
1023 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1027 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1028 struct sk_buff *skb)
1030 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1032 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1033 hdev->name, rp->status, rp->phy_handle);
1035 if (rp->status)
1036 return;
1038 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1041 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1043 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1045 if (status) {
1046 hci_conn_check_pending(hdev);
1047 return;
1050 set_bit(HCI_INQUIRY, &hdev->flags);
1053 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1055 struct hci_cp_create_conn *cp;
1056 struct hci_conn *conn;
1058 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1060 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1061 if (!cp)
1062 return;
1064 hci_dev_lock(hdev);
1066 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1068 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1070 if (status) {
1071 if (conn && conn->state == BT_CONNECT) {
1072 if (status != 0x0c || conn->attempt > 2) {
1073 conn->state = BT_CLOSED;
1074 hci_proto_connect_cfm(conn, status);
1075 hci_conn_del(conn);
1076 } else
1077 conn->state = BT_CONNECT2;
1079 } else {
1080 if (!conn) {
1081 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1082 if (conn) {
1083 conn->out = true;
1084 conn->link_mode |= HCI_LM_MASTER;
1085 } else
1086 BT_ERR("No memory for new connection");
1090 hci_dev_unlock(hdev);
1093 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1095 struct hci_cp_add_sco *cp;
1096 struct hci_conn *acl, *sco;
1097 __u16 handle;
1099 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1101 if (!status)
1102 return;
1104 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1105 if (!cp)
1106 return;
1108 handle = __le16_to_cpu(cp->handle);
1110 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1112 hci_dev_lock(hdev);
1114 acl = hci_conn_hash_lookup_handle(hdev, handle);
1115 if (acl) {
1116 sco = acl->link;
1117 if (sco) {
1118 sco->state = BT_CLOSED;
1120 hci_proto_connect_cfm(sco, status);
1121 hci_conn_del(sco);
1125 hci_dev_unlock(hdev);
1128 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1130 struct hci_cp_auth_requested *cp;
1131 struct hci_conn *conn;
1133 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1135 if (!status)
1136 return;
1138 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1139 if (!cp)
1140 return;
1142 hci_dev_lock(hdev);
1144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1145 if (conn) {
1146 if (conn->state == BT_CONFIG) {
1147 hci_proto_connect_cfm(conn, status);
1148 hci_conn_drop(conn);
1152 hci_dev_unlock(hdev);
1155 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1157 struct hci_cp_set_conn_encrypt *cp;
1158 struct hci_conn *conn;
1160 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1162 if (!status)
1163 return;
1165 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1166 if (!cp)
1167 return;
1169 hci_dev_lock(hdev);
1171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1172 if (conn) {
1173 if (conn->state == BT_CONFIG) {
1174 hci_proto_connect_cfm(conn, status);
1175 hci_conn_drop(conn);
1179 hci_dev_unlock(hdev);
1182 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1183 struct hci_conn *conn)
1185 if (conn->state != BT_CONFIG || !conn->out)
1186 return 0;
1188 if (conn->pending_sec_level == BT_SECURITY_SDP)
1189 return 0;
1191 /* Only request authentication for SSP connections or non-SSP
1192 * devices with sec_level HIGH or if MITM protection is requested */
1193 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1194 conn->pending_sec_level != BT_SECURITY_HIGH)
1195 return 0;
1197 return 1;
1200 static int hci_resolve_name(struct hci_dev *hdev,
1201 struct inquiry_entry *e)
1203 struct hci_cp_remote_name_req cp;
1205 memset(&cp, 0, sizeof(cp));
1207 bacpy(&cp.bdaddr, &e->data.bdaddr);
1208 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1209 cp.pscan_mode = e->data.pscan_mode;
1210 cp.clock_offset = e->data.clock_offset;
1212 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1215 static bool hci_resolve_next_name(struct hci_dev *hdev)
1217 struct discovery_state *discov = &hdev->discovery;
1218 struct inquiry_entry *e;
1220 if (list_empty(&discov->resolve))
1221 return false;
1223 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1224 if (!e)
1225 return false;
1227 if (hci_resolve_name(hdev, e) == 0) {
1228 e->name_state = NAME_PENDING;
1229 return true;
1232 return false;
1235 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1236 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1238 struct discovery_state *discov = &hdev->discovery;
1239 struct inquiry_entry *e;
1241 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1242 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1243 name_len, conn->dev_class);
1245 if (discov->state == DISCOVERY_STOPPED)
1246 return;
1248 if (discov->state == DISCOVERY_STOPPING)
1249 goto discov_complete;
1251 if (discov->state != DISCOVERY_RESOLVING)
1252 return;
1254 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1255 /* If the device was not found in a list of found devices names of which
1256 * are pending. there is no need to continue resolving a next name as it
1257 * will be done upon receiving another Remote Name Request Complete
1258 * Event */
1259 if (!e)
1260 return;
1262 list_del(&e->list);
1263 if (name) {
1264 e->name_state = NAME_KNOWN;
1265 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1266 e->data.rssi, name, name_len);
1267 } else {
1268 e->name_state = NAME_NOT_KNOWN;
1271 if (hci_resolve_next_name(hdev))
1272 return;
1274 discov_complete:
1275 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1278 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1280 struct hci_cp_remote_name_req *cp;
1281 struct hci_conn *conn;
1283 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1285 /* If successful wait for the name req complete event before
1286 * checking for the need to do authentication */
1287 if (!status)
1288 return;
1290 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1291 if (!cp)
1292 return;
1294 hci_dev_lock(hdev);
1296 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1298 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1299 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1301 if (!conn)
1302 goto unlock;
1304 if (!hci_outgoing_auth_needed(hdev, conn))
1305 goto unlock;
1307 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1308 struct hci_cp_auth_requested auth_cp;
1310 auth_cp.handle = __cpu_to_le16(conn->handle);
1311 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1312 sizeof(auth_cp), &auth_cp);
1315 unlock:
1316 hci_dev_unlock(hdev);
1319 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1321 struct hci_cp_read_remote_features *cp;
1322 struct hci_conn *conn;
1324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1326 if (!status)
1327 return;
1329 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1330 if (!cp)
1331 return;
1333 hci_dev_lock(hdev);
1335 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1336 if (conn) {
1337 if (conn->state == BT_CONFIG) {
1338 hci_proto_connect_cfm(conn, status);
1339 hci_conn_drop(conn);
1343 hci_dev_unlock(hdev);
1346 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1348 struct hci_cp_read_remote_ext_features *cp;
1349 struct hci_conn *conn;
1351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1353 if (!status)
1354 return;
1356 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1357 if (!cp)
1358 return;
1360 hci_dev_lock(hdev);
1362 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1363 if (conn) {
1364 if (conn->state == BT_CONFIG) {
1365 hci_proto_connect_cfm(conn, status);
1366 hci_conn_drop(conn);
1370 hci_dev_unlock(hdev);
1373 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1375 struct hci_cp_setup_sync_conn *cp;
1376 struct hci_conn *acl, *sco;
1377 __u16 handle;
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1381 if (!status)
1382 return;
1384 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1385 if (!cp)
1386 return;
1388 handle = __le16_to_cpu(cp->handle);
1390 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1392 hci_dev_lock(hdev);
1394 acl = hci_conn_hash_lookup_handle(hdev, handle);
1395 if (acl) {
1396 sco = acl->link;
1397 if (sco) {
1398 sco->state = BT_CLOSED;
1400 hci_proto_connect_cfm(sco, status);
1401 hci_conn_del(sco);
1405 hci_dev_unlock(hdev);
1408 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1410 struct hci_cp_sniff_mode *cp;
1411 struct hci_conn *conn;
1413 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1415 if (!status)
1416 return;
1418 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1419 if (!cp)
1420 return;
1422 hci_dev_lock(hdev);
1424 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1425 if (conn) {
1426 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1428 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1429 hci_sco_setup(conn, status);
1432 hci_dev_unlock(hdev);
1435 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1437 struct hci_cp_exit_sniff_mode *cp;
1438 struct hci_conn *conn;
1440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1442 if (!status)
1443 return;
1445 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1446 if (!cp)
1447 return;
1449 hci_dev_lock(hdev);
1451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1452 if (conn) {
1453 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1455 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1456 hci_sco_setup(conn, status);
1459 hci_dev_unlock(hdev);
1462 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1464 struct hci_cp_disconnect *cp;
1465 struct hci_conn *conn;
1467 if (!status)
1468 return;
1470 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1471 if (!cp)
1472 return;
1474 hci_dev_lock(hdev);
1476 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1477 if (conn)
1478 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1479 conn->dst_type, status);
1481 hci_dev_unlock(hdev);
1484 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1486 struct hci_cp_create_phy_link *cp;
1488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1490 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1491 if (!cp)
1492 return;
1494 hci_dev_lock(hdev);
1496 if (status) {
1497 struct hci_conn *hcon;
1499 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1500 if (hcon)
1501 hci_conn_del(hcon);
1502 } else {
1503 amp_write_remote_assoc(hdev, cp->phy_handle);
1506 hci_dev_unlock(hdev);
1509 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1511 struct hci_cp_accept_phy_link *cp;
1513 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1515 if (status)
1516 return;
1518 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1519 if (!cp)
1520 return;
1522 amp_write_remote_assoc(hdev, cp->phy_handle);
1525 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1527 __u8 status = *((__u8 *) skb->data);
1528 struct discovery_state *discov = &hdev->discovery;
1529 struct inquiry_entry *e;
1531 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1533 hci_conn_check_pending(hdev);
1535 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1536 return;
1538 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1539 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1541 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1542 return;
1544 hci_dev_lock(hdev);
1546 if (discov->state != DISCOVERY_FINDING)
1547 goto unlock;
1549 if (list_empty(&discov->resolve)) {
1550 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1551 goto unlock;
1554 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1555 if (e && hci_resolve_name(hdev, e) == 0) {
1556 e->name_state = NAME_PENDING;
1557 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1558 } else {
1559 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1562 unlock:
1563 hci_dev_unlock(hdev);
1566 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1568 struct inquiry_data data;
1569 struct inquiry_info *info = (void *) (skb->data + 1);
1570 int num_rsp = *((__u8 *) skb->data);
1572 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1574 if (!num_rsp)
1575 return;
1577 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1578 return;
1580 hci_dev_lock(hdev);
1582 for (; num_rsp; num_rsp--, info++) {
1583 bool name_known, ssp;
1585 bacpy(&data.bdaddr, &info->bdaddr);
1586 data.pscan_rep_mode = info->pscan_rep_mode;
1587 data.pscan_period_mode = info->pscan_period_mode;
1588 data.pscan_mode = info->pscan_mode;
1589 memcpy(data.dev_class, info->dev_class, 3);
1590 data.clock_offset = info->clock_offset;
1591 data.rssi = 0x00;
1592 data.ssp_mode = 0x00;
1594 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1595 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1596 info->dev_class, 0, !name_known, ssp, NULL,
1600 hci_dev_unlock(hdev);
1603 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1605 struct hci_ev_conn_complete *ev = (void *) skb->data;
1606 struct hci_conn *conn;
1608 BT_DBG("%s", hdev->name);
1610 hci_dev_lock(hdev);
1612 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1613 if (!conn) {
1614 if (ev->link_type != SCO_LINK)
1615 goto unlock;
1617 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1618 if (!conn)
1619 goto unlock;
1621 conn->type = SCO_LINK;
1624 if (!ev->status) {
1625 conn->handle = __le16_to_cpu(ev->handle);
1627 if (conn->type == ACL_LINK) {
1628 conn->state = BT_CONFIG;
1629 hci_conn_hold(conn);
1631 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1632 !hci_find_link_key(hdev, &ev->bdaddr))
1633 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1634 else
1635 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1636 } else
1637 conn->state = BT_CONNECTED;
1639 hci_conn_add_sysfs(conn);
1641 if (test_bit(HCI_AUTH, &hdev->flags))
1642 conn->link_mode |= HCI_LM_AUTH;
1644 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1645 conn->link_mode |= HCI_LM_ENCRYPT;
1647 /* Get remote features */
1648 if (conn->type == ACL_LINK) {
1649 struct hci_cp_read_remote_features cp;
1650 cp.handle = ev->handle;
1651 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1652 sizeof(cp), &cp);
1655 /* Set packet type for incoming connection */
1656 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1657 struct hci_cp_change_conn_ptype cp;
1658 cp.handle = ev->handle;
1659 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1660 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1661 &cp);
1663 } else {
1664 conn->state = BT_CLOSED;
1665 if (conn->type == ACL_LINK)
1666 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1667 conn->dst_type, ev->status);
1670 if (conn->type == ACL_LINK)
1671 hci_sco_setup(conn, ev->status);
1673 if (ev->status) {
1674 hci_proto_connect_cfm(conn, ev->status);
1675 hci_conn_del(conn);
1676 } else if (ev->link_type != ACL_LINK)
1677 hci_proto_connect_cfm(conn, ev->status);
1679 unlock:
1680 hci_dev_unlock(hdev);
1682 hci_conn_check_pending(hdev);
1685 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1687 struct hci_ev_conn_request *ev = (void *) skb->data;
1688 int mask = hdev->link_mode;
1689 __u8 flags = 0;
1691 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1692 ev->link_type);
1694 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1695 &flags);
1697 if ((mask & HCI_LM_ACCEPT) &&
1698 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1699 /* Connection accepted */
1700 struct inquiry_entry *ie;
1701 struct hci_conn *conn;
1703 hci_dev_lock(hdev);
1705 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1706 if (ie)
1707 memcpy(ie->data.dev_class, ev->dev_class, 3);
1709 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1710 &ev->bdaddr);
1711 if (!conn) {
1712 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1713 if (!conn) {
1714 BT_ERR("No memory for new connection");
1715 hci_dev_unlock(hdev);
1716 return;
1720 memcpy(conn->dev_class, ev->dev_class, 3);
1722 hci_dev_unlock(hdev);
1724 if (ev->link_type == ACL_LINK ||
1725 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1726 struct hci_cp_accept_conn_req cp;
1727 conn->state = BT_CONNECT;
1729 bacpy(&cp.bdaddr, &ev->bdaddr);
1731 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1732 cp.role = 0x00; /* Become master */
1733 else
1734 cp.role = 0x01; /* Remain slave */
1736 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1737 &cp);
1738 } else if (!(flags & HCI_PROTO_DEFER)) {
1739 struct hci_cp_accept_sync_conn_req cp;
1740 conn->state = BT_CONNECT;
1742 bacpy(&cp.bdaddr, &ev->bdaddr);
1743 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1745 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1746 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1747 cp.max_latency = __constant_cpu_to_le16(0xffff);
1748 cp.content_format = cpu_to_le16(hdev->voice_setting);
1749 cp.retrans_effort = 0xff;
1751 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1752 sizeof(cp), &cp);
1753 } else {
1754 conn->state = BT_CONNECT2;
1755 hci_proto_connect_cfm(conn, 0);
1757 } else {
1758 /* Connection rejected */
1759 struct hci_cp_reject_conn_req cp;
1761 bacpy(&cp.bdaddr, &ev->bdaddr);
1762 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1763 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1767 static u8 hci_to_mgmt_reason(u8 err)
1769 switch (err) {
1770 case HCI_ERROR_CONNECTION_TIMEOUT:
1771 return MGMT_DEV_DISCONN_TIMEOUT;
1772 case HCI_ERROR_REMOTE_USER_TERM:
1773 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1774 case HCI_ERROR_REMOTE_POWER_OFF:
1775 return MGMT_DEV_DISCONN_REMOTE;
1776 case HCI_ERROR_LOCAL_HOST_TERM:
1777 return MGMT_DEV_DISCONN_LOCAL_HOST;
1778 default:
1779 return MGMT_DEV_DISCONN_UNKNOWN;
1783 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1785 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1786 u8 reason = hci_to_mgmt_reason(ev->reason);
1787 struct hci_conn *conn;
1788 u8 type;
1790 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1792 hci_dev_lock(hdev);
1794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1795 if (!conn)
1796 goto unlock;
1798 if (ev->status) {
1799 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1800 conn->dst_type, ev->status);
1801 goto unlock;
1804 conn->state = BT_CLOSED;
1806 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1807 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1808 conn->dst_type, reason);
1810 if (conn->type == ACL_LINK && conn->flush_key)
1811 hci_remove_link_key(hdev, &conn->dst);
1813 type = conn->type;
1815 hci_proto_disconn_cfm(conn, ev->reason);
1816 hci_conn_del(conn);
1818 /* Re-enable advertising if necessary, since it might
1819 * have been disabled by the connection. From the
1820 * HCI_LE_Set_Advertise_Enable command description in
1821 * the core specification (v4.0):
1822 * "The Controller shall continue advertising until the Host
1823 * issues an LE_Set_Advertise_Enable command with
1824 * Advertising_Enable set to 0x00 (Advertising is disabled)
1825 * or until a connection is created or until the Advertising
1826 * is timed out due to Directed Advertising."
1828 if (type == LE_LINK)
1829 mgmt_reenable_advertising(hdev);
1831 unlock:
1832 hci_dev_unlock(hdev);
1835 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1837 struct hci_ev_auth_complete *ev = (void *) skb->data;
1838 struct hci_conn *conn;
1840 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1842 hci_dev_lock(hdev);
1844 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1845 if (!conn)
1846 goto unlock;
1848 if (!ev->status) {
1849 if (!hci_conn_ssp_enabled(conn) &&
1850 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1851 BT_INFO("re-auth of legacy device is not possible.");
1852 } else {
1853 conn->link_mode |= HCI_LM_AUTH;
1854 conn->sec_level = conn->pending_sec_level;
1856 } else {
1857 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1858 ev->status);
1861 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1862 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1864 if (conn->state == BT_CONFIG) {
1865 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1866 struct hci_cp_set_conn_encrypt cp;
1867 cp.handle = ev->handle;
1868 cp.encrypt = 0x01;
1869 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1870 &cp);
1871 } else {
1872 conn->state = BT_CONNECTED;
1873 hci_proto_connect_cfm(conn, ev->status);
1874 hci_conn_drop(conn);
1876 } else {
1877 hci_auth_cfm(conn, ev->status);
1879 hci_conn_hold(conn);
1880 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1881 hci_conn_drop(conn);
1884 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1885 if (!ev->status) {
1886 struct hci_cp_set_conn_encrypt cp;
1887 cp.handle = ev->handle;
1888 cp.encrypt = 0x01;
1889 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1890 &cp);
1891 } else {
1892 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1893 hci_encrypt_cfm(conn, ev->status, 0x00);
1897 unlock:
1898 hci_dev_unlock(hdev);
1901 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1903 struct hci_ev_remote_name *ev = (void *) skb->data;
1904 struct hci_conn *conn;
1906 BT_DBG("%s", hdev->name);
1908 hci_conn_check_pending(hdev);
1910 hci_dev_lock(hdev);
1912 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1914 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1915 goto check_auth;
1917 if (ev->status == 0)
1918 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1919 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1920 else
1921 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1923 check_auth:
1924 if (!conn)
1925 goto unlock;
1927 if (!hci_outgoing_auth_needed(hdev, conn))
1928 goto unlock;
1930 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1931 struct hci_cp_auth_requested cp;
1932 cp.handle = __cpu_to_le16(conn->handle);
1933 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1936 unlock:
1937 hci_dev_unlock(hdev);
1940 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1942 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1943 struct hci_conn *conn;
1945 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1947 hci_dev_lock(hdev);
1949 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1950 if (conn) {
1951 if (!ev->status) {
1952 if (ev->encrypt) {
1953 /* Encryption implies authentication */
1954 conn->link_mode |= HCI_LM_AUTH;
1955 conn->link_mode |= HCI_LM_ENCRYPT;
1956 conn->sec_level = conn->pending_sec_level;
1957 } else
1958 conn->link_mode &= ~HCI_LM_ENCRYPT;
1961 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1963 if (ev->status && conn->state == BT_CONNECTED) {
1964 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1965 hci_conn_drop(conn);
1966 goto unlock;
1969 if (conn->state == BT_CONFIG) {
1970 if (!ev->status)
1971 conn->state = BT_CONNECTED;
1973 hci_proto_connect_cfm(conn, ev->status);
1974 hci_conn_drop(conn);
1975 } else
1976 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1979 unlock:
1980 hci_dev_unlock(hdev);
1983 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
1984 struct sk_buff *skb)
1986 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1987 struct hci_conn *conn;
1989 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1991 hci_dev_lock(hdev);
1993 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1994 if (conn) {
1995 if (!ev->status)
1996 conn->link_mode |= HCI_LM_SECURE;
1998 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2000 hci_key_change_cfm(conn, ev->status);
2003 hci_dev_unlock(hdev);
2006 static void hci_remote_features_evt(struct hci_dev *hdev,
2007 struct sk_buff *skb)
2009 struct hci_ev_remote_features *ev = (void *) skb->data;
2010 struct hci_conn *conn;
2012 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2014 hci_dev_lock(hdev);
2016 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2017 if (!conn)
2018 goto unlock;
2020 if (!ev->status)
2021 memcpy(conn->features[0], ev->features, 8);
2023 if (conn->state != BT_CONFIG)
2024 goto unlock;
2026 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2027 struct hci_cp_read_remote_ext_features cp;
2028 cp.handle = ev->handle;
2029 cp.page = 0x01;
2030 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2031 sizeof(cp), &cp);
2032 goto unlock;
2035 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2036 struct hci_cp_remote_name_req cp;
2037 memset(&cp, 0, sizeof(cp));
2038 bacpy(&cp.bdaddr, &conn->dst);
2039 cp.pscan_rep_mode = 0x02;
2040 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2041 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2042 mgmt_device_connected(hdev, &conn->dst, conn->type,
2043 conn->dst_type, 0, NULL, 0,
2044 conn->dev_class);
2046 if (!hci_outgoing_auth_needed(hdev, conn)) {
2047 conn->state = BT_CONNECTED;
2048 hci_proto_connect_cfm(conn, ev->status);
2049 hci_conn_drop(conn);
2052 unlock:
2053 hci_dev_unlock(hdev);
2056 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2058 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2059 u8 status = skb->data[sizeof(*ev)];
2060 __u16 opcode;
2062 skb_pull(skb, sizeof(*ev));
2064 opcode = __le16_to_cpu(ev->opcode);
2066 switch (opcode) {
2067 case HCI_OP_INQUIRY_CANCEL:
2068 hci_cc_inquiry_cancel(hdev, skb);
2069 break;
2071 case HCI_OP_PERIODIC_INQ:
2072 hci_cc_periodic_inq(hdev, skb);
2073 break;
2075 case HCI_OP_EXIT_PERIODIC_INQ:
2076 hci_cc_exit_periodic_inq(hdev, skb);
2077 break;
2079 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2080 hci_cc_remote_name_req_cancel(hdev, skb);
2081 break;
2083 case HCI_OP_ROLE_DISCOVERY:
2084 hci_cc_role_discovery(hdev, skb);
2085 break;
2087 case HCI_OP_READ_LINK_POLICY:
2088 hci_cc_read_link_policy(hdev, skb);
2089 break;
2091 case HCI_OP_WRITE_LINK_POLICY:
2092 hci_cc_write_link_policy(hdev, skb);
2093 break;
2095 case HCI_OP_READ_DEF_LINK_POLICY:
2096 hci_cc_read_def_link_policy(hdev, skb);
2097 break;
2099 case HCI_OP_WRITE_DEF_LINK_POLICY:
2100 hci_cc_write_def_link_policy(hdev, skb);
2101 break;
2103 case HCI_OP_RESET:
2104 hci_cc_reset(hdev, skb);
2105 break;
2107 case HCI_OP_WRITE_LOCAL_NAME:
2108 hci_cc_write_local_name(hdev, skb);
2109 break;
2111 case HCI_OP_READ_LOCAL_NAME:
2112 hci_cc_read_local_name(hdev, skb);
2113 break;
2115 case HCI_OP_WRITE_AUTH_ENABLE:
2116 hci_cc_write_auth_enable(hdev, skb);
2117 break;
2119 case HCI_OP_WRITE_ENCRYPT_MODE:
2120 hci_cc_write_encrypt_mode(hdev, skb);
2121 break;
2123 case HCI_OP_WRITE_SCAN_ENABLE:
2124 hci_cc_write_scan_enable(hdev, skb);
2125 break;
2127 case HCI_OP_READ_CLASS_OF_DEV:
2128 hci_cc_read_class_of_dev(hdev, skb);
2129 break;
2131 case HCI_OP_WRITE_CLASS_OF_DEV:
2132 hci_cc_write_class_of_dev(hdev, skb);
2133 break;
2135 case HCI_OP_READ_VOICE_SETTING:
2136 hci_cc_read_voice_setting(hdev, skb);
2137 break;
2139 case HCI_OP_WRITE_VOICE_SETTING:
2140 hci_cc_write_voice_setting(hdev, skb);
2141 break;
2143 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2144 hci_cc_read_num_supported_iac(hdev, skb);
2145 break;
2147 case HCI_OP_WRITE_SSP_MODE:
2148 hci_cc_write_ssp_mode(hdev, skb);
2149 break;
2151 case HCI_OP_READ_LOCAL_VERSION:
2152 hci_cc_read_local_version(hdev, skb);
2153 break;
2155 case HCI_OP_READ_LOCAL_COMMANDS:
2156 hci_cc_read_local_commands(hdev, skb);
2157 break;
2159 case HCI_OP_READ_LOCAL_FEATURES:
2160 hci_cc_read_local_features(hdev, skb);
2161 break;
2163 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2164 hci_cc_read_local_ext_features(hdev, skb);
2165 break;
2167 case HCI_OP_READ_BUFFER_SIZE:
2168 hci_cc_read_buffer_size(hdev, skb);
2169 break;
2171 case HCI_OP_READ_BD_ADDR:
2172 hci_cc_read_bd_addr(hdev, skb);
2173 break;
2175 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2176 hci_cc_read_page_scan_activity(hdev, skb);
2177 break;
2179 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2180 hci_cc_write_page_scan_activity(hdev, skb);
2181 break;
2183 case HCI_OP_READ_PAGE_SCAN_TYPE:
2184 hci_cc_read_page_scan_type(hdev, skb);
2185 break;
2187 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2188 hci_cc_write_page_scan_type(hdev, skb);
2189 break;
2191 case HCI_OP_READ_DATA_BLOCK_SIZE:
2192 hci_cc_read_data_block_size(hdev, skb);
2193 break;
2195 case HCI_OP_READ_FLOW_CONTROL_MODE:
2196 hci_cc_read_flow_control_mode(hdev, skb);
2197 break;
2199 case HCI_OP_READ_LOCAL_AMP_INFO:
2200 hci_cc_read_local_amp_info(hdev, skb);
2201 break;
2203 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2204 hci_cc_read_local_amp_assoc(hdev, skb);
2205 break;
2207 case HCI_OP_READ_INQ_RSP_TX_POWER:
2208 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2209 break;
2211 case HCI_OP_PIN_CODE_REPLY:
2212 hci_cc_pin_code_reply(hdev, skb);
2213 break;
2215 case HCI_OP_PIN_CODE_NEG_REPLY:
2216 hci_cc_pin_code_neg_reply(hdev, skb);
2217 break;
2219 case HCI_OP_READ_LOCAL_OOB_DATA:
2220 hci_cc_read_local_oob_data_reply(hdev, skb);
2221 break;
2223 case HCI_OP_LE_READ_BUFFER_SIZE:
2224 hci_cc_le_read_buffer_size(hdev, skb);
2225 break;
2227 case HCI_OP_LE_READ_LOCAL_FEATURES:
2228 hci_cc_le_read_local_features(hdev, skb);
2229 break;
2231 case HCI_OP_LE_READ_ADV_TX_POWER:
2232 hci_cc_le_read_adv_tx_power(hdev, skb);
2233 break;
2235 case HCI_OP_USER_CONFIRM_REPLY:
2236 hci_cc_user_confirm_reply(hdev, skb);
2237 break;
2239 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2240 hci_cc_user_confirm_neg_reply(hdev, skb);
2241 break;
2243 case HCI_OP_USER_PASSKEY_REPLY:
2244 hci_cc_user_passkey_reply(hdev, skb);
2245 break;
2247 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2248 hci_cc_user_passkey_neg_reply(hdev, skb);
2249 break;
2251 case HCI_OP_LE_SET_ADV_ENABLE:
2252 hci_cc_le_set_adv_enable(hdev, skb);
2253 break;
2255 case HCI_OP_LE_SET_SCAN_ENABLE:
2256 hci_cc_le_set_scan_enable(hdev, skb);
2257 break;
2259 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2260 hci_cc_le_read_white_list_size(hdev, skb);
2261 break;
2263 case HCI_OP_LE_READ_SUPPORTED_STATES:
2264 hci_cc_le_read_supported_states(hdev, skb);
2265 break;
2267 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2268 hci_cc_write_le_host_supported(hdev, skb);
2269 break;
2271 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2272 hci_cc_write_remote_amp_assoc(hdev, skb);
2273 break;
2275 default:
2276 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2277 break;
2280 if (opcode != HCI_OP_NOP)
2281 del_timer(&hdev->cmd_timer);
2283 hci_req_cmd_complete(hdev, opcode, status);
2285 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2286 atomic_set(&hdev->cmd_cnt, 1);
2287 if (!skb_queue_empty(&hdev->cmd_q))
2288 queue_work(hdev->workqueue, &hdev->cmd_work);
2292 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2294 struct hci_ev_cmd_status *ev = (void *) skb->data;
2295 __u16 opcode;
2297 skb_pull(skb, sizeof(*ev));
2299 opcode = __le16_to_cpu(ev->opcode);
2301 switch (opcode) {
2302 case HCI_OP_INQUIRY:
2303 hci_cs_inquiry(hdev, ev->status);
2304 break;
2306 case HCI_OP_CREATE_CONN:
2307 hci_cs_create_conn(hdev, ev->status);
2308 break;
2310 case HCI_OP_ADD_SCO:
2311 hci_cs_add_sco(hdev, ev->status);
2312 break;
2314 case HCI_OP_AUTH_REQUESTED:
2315 hci_cs_auth_requested(hdev, ev->status);
2316 break;
2318 case HCI_OP_SET_CONN_ENCRYPT:
2319 hci_cs_set_conn_encrypt(hdev, ev->status);
2320 break;
2322 case HCI_OP_REMOTE_NAME_REQ:
2323 hci_cs_remote_name_req(hdev, ev->status);
2324 break;
2326 case HCI_OP_READ_REMOTE_FEATURES:
2327 hci_cs_read_remote_features(hdev, ev->status);
2328 break;
2330 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2331 hci_cs_read_remote_ext_features(hdev, ev->status);
2332 break;
2334 case HCI_OP_SETUP_SYNC_CONN:
2335 hci_cs_setup_sync_conn(hdev, ev->status);
2336 break;
2338 case HCI_OP_SNIFF_MODE:
2339 hci_cs_sniff_mode(hdev, ev->status);
2340 break;
2342 case HCI_OP_EXIT_SNIFF_MODE:
2343 hci_cs_exit_sniff_mode(hdev, ev->status);
2344 break;
2346 case HCI_OP_DISCONNECT:
2347 hci_cs_disconnect(hdev, ev->status);
2348 break;
2350 case HCI_OP_CREATE_PHY_LINK:
2351 hci_cs_create_phylink(hdev, ev->status);
2352 break;
2354 case HCI_OP_ACCEPT_PHY_LINK:
2355 hci_cs_accept_phylink(hdev, ev->status);
2356 break;
2358 default:
2359 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2360 break;
2363 if (opcode != HCI_OP_NOP)
2364 del_timer(&hdev->cmd_timer);
2366 if (ev->status ||
2367 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2368 hci_req_cmd_complete(hdev, opcode, ev->status);
2370 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2371 atomic_set(&hdev->cmd_cnt, 1);
2372 if (!skb_queue_empty(&hdev->cmd_q))
2373 queue_work(hdev->workqueue, &hdev->cmd_work);
2377 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2379 struct hci_ev_role_change *ev = (void *) skb->data;
2380 struct hci_conn *conn;
2382 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2384 hci_dev_lock(hdev);
2386 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2387 if (conn) {
2388 if (!ev->status) {
2389 if (ev->role)
2390 conn->link_mode &= ~HCI_LM_MASTER;
2391 else
2392 conn->link_mode |= HCI_LM_MASTER;
2395 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2397 hci_role_switch_cfm(conn, ev->status, ev->role);
2400 hci_dev_unlock(hdev);
2403 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2405 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2406 int i;
2408 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2409 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2410 return;
2413 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2414 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2415 BT_DBG("%s bad parameters", hdev->name);
2416 return;
2419 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2421 for (i = 0; i < ev->num_hndl; i++) {
2422 struct hci_comp_pkts_info *info = &ev->handles[i];
2423 struct hci_conn *conn;
2424 __u16 handle, count;
2426 handle = __le16_to_cpu(info->handle);
2427 count = __le16_to_cpu(info->count);
2429 conn = hci_conn_hash_lookup_handle(hdev, handle);
2430 if (!conn)
2431 continue;
2433 conn->sent -= count;
2435 switch (conn->type) {
2436 case ACL_LINK:
2437 hdev->acl_cnt += count;
2438 if (hdev->acl_cnt > hdev->acl_pkts)
2439 hdev->acl_cnt = hdev->acl_pkts;
2440 break;
2442 case LE_LINK:
2443 if (hdev->le_pkts) {
2444 hdev->le_cnt += count;
2445 if (hdev->le_cnt > hdev->le_pkts)
2446 hdev->le_cnt = hdev->le_pkts;
2447 } else {
2448 hdev->acl_cnt += count;
2449 if (hdev->acl_cnt > hdev->acl_pkts)
2450 hdev->acl_cnt = hdev->acl_pkts;
2452 break;
2454 case SCO_LINK:
2455 hdev->sco_cnt += count;
2456 if (hdev->sco_cnt > hdev->sco_pkts)
2457 hdev->sco_cnt = hdev->sco_pkts;
2458 break;
2460 default:
2461 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2462 break;
2466 queue_work(hdev->workqueue, &hdev->tx_work);
2469 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2470 __u16 handle)
2472 struct hci_chan *chan;
2474 switch (hdev->dev_type) {
2475 case HCI_BREDR:
2476 return hci_conn_hash_lookup_handle(hdev, handle);
2477 case HCI_AMP:
2478 chan = hci_chan_lookup_handle(hdev, handle);
2479 if (chan)
2480 return chan->conn;
2481 break;
2482 default:
2483 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2484 break;
2487 return NULL;
2490 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2492 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2493 int i;
2495 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2496 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2497 return;
2500 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2501 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2502 BT_DBG("%s bad parameters", hdev->name);
2503 return;
2506 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2507 ev->num_hndl);
2509 for (i = 0; i < ev->num_hndl; i++) {
2510 struct hci_comp_blocks_info *info = &ev->handles[i];
2511 struct hci_conn *conn = NULL;
2512 __u16 handle, block_count;
2514 handle = __le16_to_cpu(info->handle);
2515 block_count = __le16_to_cpu(info->blocks);
2517 conn = __hci_conn_lookup_handle(hdev, handle);
2518 if (!conn)
2519 continue;
2521 conn->sent -= block_count;
2523 switch (conn->type) {
2524 case ACL_LINK:
2525 case AMP_LINK:
2526 hdev->block_cnt += block_count;
2527 if (hdev->block_cnt > hdev->num_blocks)
2528 hdev->block_cnt = hdev->num_blocks;
2529 break;
2531 default:
2532 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2533 break;
2537 queue_work(hdev->workqueue, &hdev->tx_work);
2540 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2542 struct hci_ev_mode_change *ev = (void *) skb->data;
2543 struct hci_conn *conn;
2545 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2547 hci_dev_lock(hdev);
2549 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2550 if (conn) {
2551 conn->mode = ev->mode;
2553 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2554 &conn->flags)) {
2555 if (conn->mode == HCI_CM_ACTIVE)
2556 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2557 else
2558 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2561 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2562 hci_sco_setup(conn, ev->status);
2565 hci_dev_unlock(hdev);
2568 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2570 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2571 struct hci_conn *conn;
2573 BT_DBG("%s", hdev->name);
2575 hci_dev_lock(hdev);
2577 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2578 if (!conn)
2579 goto unlock;
2581 if (conn->state == BT_CONNECTED) {
2582 hci_conn_hold(conn);
2583 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2584 hci_conn_drop(conn);
2587 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2588 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2589 sizeof(ev->bdaddr), &ev->bdaddr);
2590 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2591 u8 secure;
2593 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2594 secure = 1;
2595 else
2596 secure = 0;
2598 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2601 unlock:
2602 hci_dev_unlock(hdev);
2605 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2607 struct hci_ev_link_key_req *ev = (void *) skb->data;
2608 struct hci_cp_link_key_reply cp;
2609 struct hci_conn *conn;
2610 struct link_key *key;
2612 BT_DBG("%s", hdev->name);
2614 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2615 return;
2617 hci_dev_lock(hdev);
2619 key = hci_find_link_key(hdev, &ev->bdaddr);
2620 if (!key) {
2621 BT_DBG("%s link key not found for %pMR", hdev->name,
2622 &ev->bdaddr);
2623 goto not_found;
2626 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2627 &ev->bdaddr);
2629 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2630 key->type == HCI_LK_DEBUG_COMBINATION) {
2631 BT_DBG("%s ignoring debug key", hdev->name);
2632 goto not_found;
2635 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2636 if (conn) {
2637 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2638 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2639 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2640 goto not_found;
2643 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2644 conn->pending_sec_level == BT_SECURITY_HIGH) {
2645 BT_DBG("%s ignoring key unauthenticated for high security",
2646 hdev->name);
2647 goto not_found;
2650 conn->key_type = key->type;
2651 conn->pin_length = key->pin_len;
2654 bacpy(&cp.bdaddr, &ev->bdaddr);
2655 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2657 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2659 hci_dev_unlock(hdev);
2661 return;
2663 not_found:
2664 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2665 hci_dev_unlock(hdev);
2668 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2670 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2671 struct hci_conn *conn;
2672 u8 pin_len = 0;
2674 BT_DBG("%s", hdev->name);
2676 hci_dev_lock(hdev);
2678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2679 if (conn) {
2680 hci_conn_hold(conn);
2681 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2682 pin_len = conn->pin_length;
2684 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2685 conn->key_type = ev->key_type;
2687 hci_conn_drop(conn);
2690 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2691 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2692 ev->key_type, pin_len);
2694 hci_dev_unlock(hdev);
2697 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2699 struct hci_ev_clock_offset *ev = (void *) skb->data;
2700 struct hci_conn *conn;
2702 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2704 hci_dev_lock(hdev);
2706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2707 if (conn && !ev->status) {
2708 struct inquiry_entry *ie;
2710 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2711 if (ie) {
2712 ie->data.clock_offset = ev->clock_offset;
2713 ie->timestamp = jiffies;
2717 hci_dev_unlock(hdev);
2720 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2722 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2723 struct hci_conn *conn;
2725 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2727 hci_dev_lock(hdev);
2729 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2730 if (conn && !ev->status)
2731 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2733 hci_dev_unlock(hdev);
2736 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2738 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2739 struct inquiry_entry *ie;
2741 BT_DBG("%s", hdev->name);
2743 hci_dev_lock(hdev);
2745 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2746 if (ie) {
2747 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2748 ie->timestamp = jiffies;
2751 hci_dev_unlock(hdev);
2754 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2755 struct sk_buff *skb)
2757 struct inquiry_data data;
2758 int num_rsp = *((__u8 *) skb->data);
2759 bool name_known, ssp;
2761 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2763 if (!num_rsp)
2764 return;
2766 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2767 return;
2769 hci_dev_lock(hdev);
2771 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2772 struct inquiry_info_with_rssi_and_pscan_mode *info;
2773 info = (void *) (skb->data + 1);
2775 for (; num_rsp; num_rsp--, info++) {
2776 bacpy(&data.bdaddr, &info->bdaddr);
2777 data.pscan_rep_mode = info->pscan_rep_mode;
2778 data.pscan_period_mode = info->pscan_period_mode;
2779 data.pscan_mode = info->pscan_mode;
2780 memcpy(data.dev_class, info->dev_class, 3);
2781 data.clock_offset = info->clock_offset;
2782 data.rssi = info->rssi;
2783 data.ssp_mode = 0x00;
2785 name_known = hci_inquiry_cache_update(hdev, &data,
2786 false, &ssp);
2787 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2788 info->dev_class, info->rssi,
2789 !name_known, ssp, NULL, 0);
2791 } else {
2792 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2794 for (; num_rsp; num_rsp--, info++) {
2795 bacpy(&data.bdaddr, &info->bdaddr);
2796 data.pscan_rep_mode = info->pscan_rep_mode;
2797 data.pscan_period_mode = info->pscan_period_mode;
2798 data.pscan_mode = 0x00;
2799 memcpy(data.dev_class, info->dev_class, 3);
2800 data.clock_offset = info->clock_offset;
2801 data.rssi = info->rssi;
2802 data.ssp_mode = 0x00;
2803 name_known = hci_inquiry_cache_update(hdev, &data,
2804 false, &ssp);
2805 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2806 info->dev_class, info->rssi,
2807 !name_known, ssp, NULL, 0);
2811 hci_dev_unlock(hdev);
2814 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2815 struct sk_buff *skb)
2817 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2818 struct hci_conn *conn;
2820 BT_DBG("%s", hdev->name);
2822 hci_dev_lock(hdev);
2824 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2825 if (!conn)
2826 goto unlock;
2828 if (ev->page < HCI_MAX_PAGES)
2829 memcpy(conn->features[ev->page], ev->features, 8);
2831 if (!ev->status && ev->page == 0x01) {
2832 struct inquiry_entry *ie;
2834 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2835 if (ie)
2836 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2838 if (ev->features[0] & LMP_HOST_SSP) {
2839 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2840 } else {
2841 /* It is mandatory by the Bluetooth specification that
2842 * Extended Inquiry Results are only used when Secure
2843 * Simple Pairing is enabled, but some devices violate
2844 * this.
2846 * To make these devices work, the internal SSP
2847 * enabled flag needs to be cleared if the remote host
2848 * features do not indicate SSP support */
2849 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2853 if (conn->state != BT_CONFIG)
2854 goto unlock;
2856 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2857 struct hci_cp_remote_name_req cp;
2858 memset(&cp, 0, sizeof(cp));
2859 bacpy(&cp.bdaddr, &conn->dst);
2860 cp.pscan_rep_mode = 0x02;
2861 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2862 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2863 mgmt_device_connected(hdev, &conn->dst, conn->type,
2864 conn->dst_type, 0, NULL, 0,
2865 conn->dev_class);
2867 if (!hci_outgoing_auth_needed(hdev, conn)) {
2868 conn->state = BT_CONNECTED;
2869 hci_proto_connect_cfm(conn, ev->status);
2870 hci_conn_drop(conn);
2873 unlock:
2874 hci_dev_unlock(hdev);
2877 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2878 struct sk_buff *skb)
2880 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2881 struct hci_conn *conn;
2883 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2885 hci_dev_lock(hdev);
2887 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2888 if (!conn) {
2889 if (ev->link_type == ESCO_LINK)
2890 goto unlock;
2892 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2893 if (!conn)
2894 goto unlock;
2896 conn->type = SCO_LINK;
2899 switch (ev->status) {
2900 case 0x00:
2901 conn->handle = __le16_to_cpu(ev->handle);
2902 conn->state = BT_CONNECTED;
2904 hci_conn_add_sysfs(conn);
2905 break;
2907 case 0x0d: /* Connection Rejected due to Limited Resources */
2908 case 0x11: /* Unsupported Feature or Parameter Value */
2909 case 0x1c: /* SCO interval rejected */
2910 case 0x1a: /* Unsupported Remote Feature */
2911 case 0x1f: /* Unspecified error */
2912 if (conn->out) {
2913 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2914 (hdev->esco_type & EDR_ESCO_MASK);
2915 if (hci_setup_sync(conn, conn->link->handle))
2916 goto unlock;
2918 /* fall through */
2920 default:
2921 conn->state = BT_CLOSED;
2922 break;
2925 hci_proto_connect_cfm(conn, ev->status);
2926 if (ev->status)
2927 hci_conn_del(conn);
2929 unlock:
2930 hci_dev_unlock(hdev);
2933 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
2935 size_t parsed = 0;
2937 while (parsed < eir_len) {
2938 u8 field_len = eir[0];
2940 if (field_len == 0)
2941 return parsed;
2943 parsed += field_len + 1;
2944 eir += field_len + 1;
2947 return eir_len;
2950 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2951 struct sk_buff *skb)
2953 struct inquiry_data data;
2954 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2955 int num_rsp = *((__u8 *) skb->data);
2956 size_t eir_len;
2958 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2960 if (!num_rsp)
2961 return;
2963 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2964 return;
2966 hci_dev_lock(hdev);
2968 for (; num_rsp; num_rsp--, info++) {
2969 bool name_known, ssp;
2971 bacpy(&data.bdaddr, &info->bdaddr);
2972 data.pscan_rep_mode = info->pscan_rep_mode;
2973 data.pscan_period_mode = info->pscan_period_mode;
2974 data.pscan_mode = 0x00;
2975 memcpy(data.dev_class, info->dev_class, 3);
2976 data.clock_offset = info->clock_offset;
2977 data.rssi = info->rssi;
2978 data.ssp_mode = 0x01;
2980 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2981 name_known = eir_has_data_type(info->data,
2982 sizeof(info->data),
2983 EIR_NAME_COMPLETE);
2984 else
2985 name_known = true;
2987 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2988 &ssp);
2989 eir_len = eir_get_length(info->data, sizeof(info->data));
2990 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2991 info->dev_class, info->rssi, !name_known,
2992 ssp, info->data, eir_len);
2995 hci_dev_unlock(hdev);
2998 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2999 struct sk_buff *skb)
3001 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3002 struct hci_conn *conn;
3004 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3005 __le16_to_cpu(ev->handle));
3007 hci_dev_lock(hdev);
3009 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3010 if (!conn)
3011 goto unlock;
3013 /* For BR/EDR the necessary steps are taken through the
3014 * auth_complete event.
3016 if (conn->type != LE_LINK)
3017 goto unlock;
3019 if (!ev->status)
3020 conn->sec_level = conn->pending_sec_level;
3022 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3024 if (ev->status && conn->state == BT_CONNECTED) {
3025 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3026 hci_conn_drop(conn);
3027 goto unlock;
3030 if (conn->state == BT_CONFIG) {
3031 if (!ev->status)
3032 conn->state = BT_CONNECTED;
3034 hci_proto_connect_cfm(conn, ev->status);
3035 hci_conn_drop(conn);
3036 } else {
3037 hci_auth_cfm(conn, ev->status);
3039 hci_conn_hold(conn);
3040 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3041 hci_conn_drop(conn);
3044 unlock:
3045 hci_dev_unlock(hdev);
3048 static u8 hci_get_auth_req(struct hci_conn *conn)
3050 /* If remote requests dedicated bonding follow that lead */
3051 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3052 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3053 /* If both remote and local IO capabilities allow MITM
3054 * protection then require it, otherwise don't */
3055 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3056 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3057 return HCI_AT_DEDICATED_BONDING;
3058 else
3059 return HCI_AT_DEDICATED_BONDING_MITM;
3062 /* If remote requests no-bonding follow that lead */
3063 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3064 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3065 return conn->remote_auth | (conn->auth_type & 0x01);
3067 return conn->auth_type;
3070 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3072 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3073 struct hci_conn *conn;
3075 BT_DBG("%s", hdev->name);
3077 hci_dev_lock(hdev);
3079 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3080 if (!conn)
3081 goto unlock;
3083 hci_conn_hold(conn);
3085 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3086 goto unlock;
3088 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3089 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3090 struct hci_cp_io_capability_reply cp;
3092 bacpy(&cp.bdaddr, &ev->bdaddr);
3093 /* Change the IO capability from KeyboardDisplay
3094 * to DisplayYesNo as it is not supported by BT spec. */
3095 cp.capability = (conn->io_capability == 0x04) ?
3096 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3097 conn->auth_type = hci_get_auth_req(conn);
3098 cp.authentication = conn->auth_type;
3100 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3101 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3102 cp.oob_data = 0x01;
3103 else
3104 cp.oob_data = 0x00;
3106 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3107 sizeof(cp), &cp);
3108 } else {
3109 struct hci_cp_io_capability_neg_reply cp;
3111 bacpy(&cp.bdaddr, &ev->bdaddr);
3112 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3114 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3115 sizeof(cp), &cp);
3118 unlock:
3119 hci_dev_unlock(hdev);
3122 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3124 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3125 struct hci_conn *conn;
3127 BT_DBG("%s", hdev->name);
3129 hci_dev_lock(hdev);
3131 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3132 if (!conn)
3133 goto unlock;
3135 conn->remote_cap = ev->capability;
3136 conn->remote_auth = ev->authentication;
3137 if (ev->oob_data)
3138 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3140 unlock:
3141 hci_dev_unlock(hdev);
3144 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3145 struct sk_buff *skb)
3147 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3148 int loc_mitm, rem_mitm, confirm_hint = 0;
3149 struct hci_conn *conn;
3151 BT_DBG("%s", hdev->name);
3153 hci_dev_lock(hdev);
3155 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3156 goto unlock;
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3159 if (!conn)
3160 goto unlock;
3162 loc_mitm = (conn->auth_type & 0x01);
3163 rem_mitm = (conn->remote_auth & 0x01);
3165 /* If we require MITM but the remote device can't provide that
3166 * (it has NoInputNoOutput) then reject the confirmation
3167 * request. The only exception is when we're dedicated bonding
3168 * initiators (connect_cfm_cb set) since then we always have the MITM
3169 * bit set. */
3170 if (!conn->connect_cfm_cb && loc_mitm &&
3171 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3172 BT_DBG("Rejecting request: remote device can't provide MITM");
3173 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3174 sizeof(ev->bdaddr), &ev->bdaddr);
3175 goto unlock;
3178 /* If no side requires MITM protection; auto-accept */
3179 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3180 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3182 /* If we're not the initiators request authorization to
3183 * proceed from user space (mgmt_user_confirm with
3184 * confirm_hint set to 1). The exception is if neither
3185 * side had MITM in which case we do auto-accept.
3187 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3188 (loc_mitm || rem_mitm)) {
3189 BT_DBG("Confirming auto-accept as acceptor");
3190 confirm_hint = 1;
3191 goto confirm;
3194 BT_DBG("Auto-accept of user confirmation with %ums delay",
3195 hdev->auto_accept_delay);
3197 if (hdev->auto_accept_delay > 0) {
3198 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3199 queue_delayed_work(conn->hdev->workqueue,
3200 &conn->auto_accept_work, delay);
3201 goto unlock;
3204 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3205 sizeof(ev->bdaddr), &ev->bdaddr);
3206 goto unlock;
3209 confirm:
3210 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3211 confirm_hint);
3213 unlock:
3214 hci_dev_unlock(hdev);
3217 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3218 struct sk_buff *skb)
3220 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3222 BT_DBG("%s", hdev->name);
3224 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3225 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3228 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3229 struct sk_buff *skb)
3231 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3232 struct hci_conn *conn;
3234 BT_DBG("%s", hdev->name);
3236 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3237 if (!conn)
3238 return;
3240 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3241 conn->passkey_entered = 0;
3243 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3244 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3245 conn->dst_type, conn->passkey_notify,
3246 conn->passkey_entered);
3249 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3251 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3252 struct hci_conn *conn;
3254 BT_DBG("%s", hdev->name);
3256 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3257 if (!conn)
3258 return;
3260 switch (ev->type) {
3261 case HCI_KEYPRESS_STARTED:
3262 conn->passkey_entered = 0;
3263 return;
3265 case HCI_KEYPRESS_ENTERED:
3266 conn->passkey_entered++;
3267 break;
3269 case HCI_KEYPRESS_ERASED:
3270 conn->passkey_entered--;
3271 break;
3273 case HCI_KEYPRESS_CLEARED:
3274 conn->passkey_entered = 0;
3275 break;
3277 case HCI_KEYPRESS_COMPLETED:
3278 return;
3281 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3282 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3283 conn->dst_type, conn->passkey_notify,
3284 conn->passkey_entered);
3287 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3288 struct sk_buff *skb)
3290 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3291 struct hci_conn *conn;
3293 BT_DBG("%s", hdev->name);
3295 hci_dev_lock(hdev);
3297 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3298 if (!conn)
3299 goto unlock;
3301 /* To avoid duplicate auth_failed events to user space we check
3302 * the HCI_CONN_AUTH_PEND flag which will be set if we
3303 * initiated the authentication. A traditional auth_complete
3304 * event gets always produced as initiator and is also mapped to
3305 * the mgmt_auth_failed event */
3306 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3307 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3308 ev->status);
3310 hci_conn_drop(conn);
3312 unlock:
3313 hci_dev_unlock(hdev);
3316 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3317 struct sk_buff *skb)
3319 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3320 struct inquiry_entry *ie;
3321 struct hci_conn *conn;
3323 BT_DBG("%s", hdev->name);
3325 hci_dev_lock(hdev);
3327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3328 if (conn)
3329 memcpy(conn->features[1], ev->features, 8);
3331 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3332 if (ie)
3333 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3335 hci_dev_unlock(hdev);
3338 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3339 struct sk_buff *skb)
3341 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3342 struct oob_data *data;
3344 BT_DBG("%s", hdev->name);
3346 hci_dev_lock(hdev);
3348 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3349 goto unlock;
3351 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3352 if (data) {
3353 struct hci_cp_remote_oob_data_reply cp;
3355 bacpy(&cp.bdaddr, &ev->bdaddr);
3356 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3357 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3359 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3360 &cp);
3361 } else {
3362 struct hci_cp_remote_oob_data_neg_reply cp;
3364 bacpy(&cp.bdaddr, &ev->bdaddr);
3365 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3366 &cp);
3369 unlock:
3370 hci_dev_unlock(hdev);
3373 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3374 struct sk_buff *skb)
3376 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3377 struct hci_conn *hcon, *bredr_hcon;
3379 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3380 ev->status);
3382 hci_dev_lock(hdev);
3384 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3385 if (!hcon) {
3386 hci_dev_unlock(hdev);
3387 return;
3390 if (ev->status) {
3391 hci_conn_del(hcon);
3392 hci_dev_unlock(hdev);
3393 return;
3396 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3398 hcon->state = BT_CONNECTED;
3399 bacpy(&hcon->dst, &bredr_hcon->dst);
3401 hci_conn_hold(hcon);
3402 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3403 hci_conn_drop(hcon);
3405 hci_conn_add_sysfs(hcon);
3407 amp_physical_cfm(bredr_hcon, hcon);
3409 hci_dev_unlock(hdev);
3412 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3414 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3415 struct hci_conn *hcon;
3416 struct hci_chan *hchan;
3417 struct amp_mgr *mgr;
3419 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3420 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3421 ev->status);
3423 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3424 if (!hcon)
3425 return;
3427 /* Create AMP hchan */
3428 hchan = hci_chan_create(hcon);
3429 if (!hchan)
3430 return;
3432 hchan->handle = le16_to_cpu(ev->handle);
3434 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3436 mgr = hcon->amp_mgr;
3437 if (mgr && mgr->bredr_chan) {
3438 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3440 l2cap_chan_lock(bredr_chan);
3442 bredr_chan->conn->mtu = hdev->block_mtu;
3443 l2cap_logical_cfm(bredr_chan, hchan, 0);
3444 hci_conn_hold(hcon);
3446 l2cap_chan_unlock(bredr_chan);
3450 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3451 struct sk_buff *skb)
3453 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3454 struct hci_chan *hchan;
3456 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3457 le16_to_cpu(ev->handle), ev->status);
3459 if (ev->status)
3460 return;
3462 hci_dev_lock(hdev);
3464 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3465 if (!hchan)
3466 goto unlock;
3468 amp_destroy_logical_link(hchan, ev->reason);
3470 unlock:
3471 hci_dev_unlock(hdev);
3474 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3475 struct sk_buff *skb)
3477 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3478 struct hci_conn *hcon;
3480 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3482 if (ev->status)
3483 return;
3485 hci_dev_lock(hdev);
3487 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3488 if (hcon) {
3489 hcon->state = BT_CLOSED;
3490 hci_conn_del(hcon);
3493 hci_dev_unlock(hdev);
3496 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3498 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3499 struct hci_conn *conn;
3501 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3503 hci_dev_lock(hdev);
3505 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3506 if (!conn) {
3507 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3508 if (!conn) {
3509 BT_ERR("No memory for new connection");
3510 goto unlock;
3513 conn->dst_type = ev->bdaddr_type;
3515 /* The advertising parameters for own address type
3516 * define which source address and source address
3517 * type this connections has.
3519 if (bacmp(&conn->src, BDADDR_ANY)) {
3520 conn->src_type = ADDR_LE_DEV_PUBLIC;
3521 } else {
3522 bacpy(&conn->src, &hdev->static_addr);
3523 conn->src_type = ADDR_LE_DEV_RANDOM;
3526 if (ev->role == LE_CONN_ROLE_MASTER) {
3527 conn->out = true;
3528 conn->link_mode |= HCI_LM_MASTER;
3532 if (ev->status) {
3533 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3534 conn->dst_type, ev->status);
3535 hci_proto_connect_cfm(conn, ev->status);
3536 conn->state = BT_CLOSED;
3537 hci_conn_del(conn);
3538 goto unlock;
3541 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3542 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3543 conn->dst_type, 0, NULL, 0, NULL);
3545 conn->sec_level = BT_SECURITY_LOW;
3546 conn->handle = __le16_to_cpu(ev->handle);
3547 conn->state = BT_CONNECTED;
3549 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3550 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3552 hci_conn_add_sysfs(conn);
3554 hci_proto_connect_cfm(conn, ev->status);
3556 unlock:
3557 hci_dev_unlock(hdev);
3560 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3562 u8 num_reports = skb->data[0];
3563 void *ptr = &skb->data[1];
3564 s8 rssi;
3566 while (num_reports--) {
3567 struct hci_ev_le_advertising_info *ev = ptr;
3569 rssi = ev->data[ev->length];
3570 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3571 NULL, rssi, 0, 1, ev->data, ev->length);
3573 ptr += sizeof(*ev) + ev->length + 1;
3577 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3579 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3580 struct hci_cp_le_ltk_reply cp;
3581 struct hci_cp_le_ltk_neg_reply neg;
3582 struct hci_conn *conn;
3583 struct smp_ltk *ltk;
3585 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3587 hci_dev_lock(hdev);
3589 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3590 if (conn == NULL)
3591 goto not_found;
3593 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3594 if (ltk == NULL)
3595 goto not_found;
3597 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3598 cp.handle = cpu_to_le16(conn->handle);
3600 if (ltk->authenticated)
3601 conn->pending_sec_level = BT_SECURITY_HIGH;
3602 else
3603 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3605 conn->enc_key_size = ltk->enc_size;
3607 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3609 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
3610 * temporary key used to encrypt a connection following
3611 * pairing. It is used during the Encrypted Session Setup to
3612 * distribute the keys. Later, security can be re-established
3613 * using a distributed LTK.
3615 if (ltk->type == HCI_SMP_STK_SLAVE) {
3616 list_del(&ltk->list);
3617 kfree(ltk);
3620 hci_dev_unlock(hdev);
3622 return;
3624 not_found:
3625 neg.handle = ev->handle;
3626 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3627 hci_dev_unlock(hdev);
3630 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3632 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3634 skb_pull(skb, sizeof(*le_ev));
3636 switch (le_ev->subevent) {
3637 case HCI_EV_LE_CONN_COMPLETE:
3638 hci_le_conn_complete_evt(hdev, skb);
3639 break;
3641 case HCI_EV_LE_ADVERTISING_REPORT:
3642 hci_le_adv_report_evt(hdev, skb);
3643 break;
3645 case HCI_EV_LE_LTK_REQ:
3646 hci_le_ltk_request_evt(hdev, skb);
3647 break;
3649 default:
3650 break;
3654 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3656 struct hci_ev_channel_selected *ev = (void *) skb->data;
3657 struct hci_conn *hcon;
3659 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3661 skb_pull(skb, sizeof(*ev));
3663 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3664 if (!hcon)
3665 return;
3667 amp_read_loc_assoc_final_data(hdev, hcon);
3670 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3672 struct hci_event_hdr *hdr = (void *) skb->data;
3673 __u8 event = hdr->evt;
3675 hci_dev_lock(hdev);
3677 /* Received events are (currently) only needed when a request is
3678 * ongoing so avoid unnecessary memory allocation.
3680 if (hdev->req_status == HCI_REQ_PEND) {
3681 kfree_skb(hdev->recv_evt);
3682 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3685 hci_dev_unlock(hdev);
3687 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3689 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3690 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3691 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3693 hci_req_cmd_complete(hdev, opcode, 0);
3696 switch (event) {
3697 case HCI_EV_INQUIRY_COMPLETE:
3698 hci_inquiry_complete_evt(hdev, skb);
3699 break;
3701 case HCI_EV_INQUIRY_RESULT:
3702 hci_inquiry_result_evt(hdev, skb);
3703 break;
3705 case HCI_EV_CONN_COMPLETE:
3706 hci_conn_complete_evt(hdev, skb);
3707 break;
3709 case HCI_EV_CONN_REQUEST:
3710 hci_conn_request_evt(hdev, skb);
3711 break;
3713 case HCI_EV_DISCONN_COMPLETE:
3714 hci_disconn_complete_evt(hdev, skb);
3715 break;
3717 case HCI_EV_AUTH_COMPLETE:
3718 hci_auth_complete_evt(hdev, skb);
3719 break;
3721 case HCI_EV_REMOTE_NAME:
3722 hci_remote_name_evt(hdev, skb);
3723 break;
3725 case HCI_EV_ENCRYPT_CHANGE:
3726 hci_encrypt_change_evt(hdev, skb);
3727 break;
3729 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3730 hci_change_link_key_complete_evt(hdev, skb);
3731 break;
3733 case HCI_EV_REMOTE_FEATURES:
3734 hci_remote_features_evt(hdev, skb);
3735 break;
3737 case HCI_EV_CMD_COMPLETE:
3738 hci_cmd_complete_evt(hdev, skb);
3739 break;
3741 case HCI_EV_CMD_STATUS:
3742 hci_cmd_status_evt(hdev, skb);
3743 break;
3745 case HCI_EV_ROLE_CHANGE:
3746 hci_role_change_evt(hdev, skb);
3747 break;
3749 case HCI_EV_NUM_COMP_PKTS:
3750 hci_num_comp_pkts_evt(hdev, skb);
3751 break;
3753 case HCI_EV_MODE_CHANGE:
3754 hci_mode_change_evt(hdev, skb);
3755 break;
3757 case HCI_EV_PIN_CODE_REQ:
3758 hci_pin_code_request_evt(hdev, skb);
3759 break;
3761 case HCI_EV_LINK_KEY_REQ:
3762 hci_link_key_request_evt(hdev, skb);
3763 break;
3765 case HCI_EV_LINK_KEY_NOTIFY:
3766 hci_link_key_notify_evt(hdev, skb);
3767 break;
3769 case HCI_EV_CLOCK_OFFSET:
3770 hci_clock_offset_evt(hdev, skb);
3771 break;
3773 case HCI_EV_PKT_TYPE_CHANGE:
3774 hci_pkt_type_change_evt(hdev, skb);
3775 break;
3777 case HCI_EV_PSCAN_REP_MODE:
3778 hci_pscan_rep_mode_evt(hdev, skb);
3779 break;
3781 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3782 hci_inquiry_result_with_rssi_evt(hdev, skb);
3783 break;
3785 case HCI_EV_REMOTE_EXT_FEATURES:
3786 hci_remote_ext_features_evt(hdev, skb);
3787 break;
3789 case HCI_EV_SYNC_CONN_COMPLETE:
3790 hci_sync_conn_complete_evt(hdev, skb);
3791 break;
3793 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3794 hci_extended_inquiry_result_evt(hdev, skb);
3795 break;
3797 case HCI_EV_KEY_REFRESH_COMPLETE:
3798 hci_key_refresh_complete_evt(hdev, skb);
3799 break;
3801 case HCI_EV_IO_CAPA_REQUEST:
3802 hci_io_capa_request_evt(hdev, skb);
3803 break;
3805 case HCI_EV_IO_CAPA_REPLY:
3806 hci_io_capa_reply_evt(hdev, skb);
3807 break;
3809 case HCI_EV_USER_CONFIRM_REQUEST:
3810 hci_user_confirm_request_evt(hdev, skb);
3811 break;
3813 case HCI_EV_USER_PASSKEY_REQUEST:
3814 hci_user_passkey_request_evt(hdev, skb);
3815 break;
3817 case HCI_EV_USER_PASSKEY_NOTIFY:
3818 hci_user_passkey_notify_evt(hdev, skb);
3819 break;
3821 case HCI_EV_KEYPRESS_NOTIFY:
3822 hci_keypress_notify_evt(hdev, skb);
3823 break;
3825 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3826 hci_simple_pair_complete_evt(hdev, skb);
3827 break;
3829 case HCI_EV_REMOTE_HOST_FEATURES:
3830 hci_remote_host_features_evt(hdev, skb);
3831 break;
3833 case HCI_EV_LE_META:
3834 hci_le_meta_evt(hdev, skb);
3835 break;
3837 case HCI_EV_CHANNEL_SELECTED:
3838 hci_chan_selected_evt(hdev, skb);
3839 break;
3841 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3842 hci_remote_oob_data_request_evt(hdev, skb);
3843 break;
3845 case HCI_EV_PHY_LINK_COMPLETE:
3846 hci_phy_link_complete_evt(hdev, skb);
3847 break;
3849 case HCI_EV_LOGICAL_LINK_COMPLETE:
3850 hci_loglink_complete_evt(hdev, skb);
3851 break;
3853 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3854 hci_disconn_loglink_complete_evt(hdev, skb);
3855 break;
3857 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3858 hci_disconn_phylink_complete_evt(hdev, skb);
3859 break;
3861 case HCI_EV_NUM_COMP_BLOCKS:
3862 hci_num_comp_blocks_evt(hdev, skb);
3863 break;
3865 default:
3866 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3867 break;
3870 kfree_skb(skb);
3871 hdev->stat.evt_rx++;