Linux 3.12.28
[linux/fpc-iii.git] / net / bluetooth / hci_event.c
blob729f516ecd63c5766ac9147f11b085ac2f1e5f90
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
35 /* Handle HCI Event packets */
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 __u8 status = *((__u8 *) skb->data);
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 if (status)
44 return;
46 clear_bit(HCI_INQUIRY, &hdev->flags);
47 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
48 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 hci_dev_lock(hdev);
51 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
52 hci_dev_unlock(hdev);
54 hci_conn_check_pending(hdev);
57 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
59 __u8 status = *((__u8 *) skb->data);
61 BT_DBG("%s status 0x%2.2x", hdev->name, status);
63 if (status)
64 return;
66 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 if (status)
76 return;
78 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
80 hci_conn_check_pending(hdev);
83 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
84 struct sk_buff *skb)
86 BT_DBG("%s", hdev->name);
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 struct hci_rp_role_discovery *rp = (void *) skb->data;
92 struct hci_conn *conn;
94 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
96 if (rp->status)
97 return;
99 hci_dev_lock(hdev);
101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 if (conn) {
103 if (rp->role)
104 conn->link_mode &= ~HCI_LM_MASTER;
105 else
106 conn->link_mode |= HCI_LM_MASTER;
109 hci_dev_unlock(hdev);
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
117 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
119 if (rp->status)
120 return;
122 hci_dev_lock(hdev);
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 if (conn)
126 conn->link_policy = __le16_to_cpu(rp->policy);
128 hci_dev_unlock(hdev);
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
135 void *sent;
137 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
139 if (rp->status)
140 return;
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 if (!sent)
144 return;
146 hci_dev_lock(hdev);
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = get_unaligned_le16(sent + 2);
152 hci_dev_unlock(hdev);
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
156 struct sk_buff *skb)
158 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 if (rp->status)
163 return;
165 hdev->link_policy = __le16_to_cpu(rp->policy);
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
169 struct sk_buff *skb)
171 __u8 status = *((__u8 *) skb->data);
172 void *sent;
174 BT_DBG("%s status 0x%2.2x", hdev->name, status);
176 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
177 if (!sent)
178 return;
180 if (!status)
181 hdev->link_policy = get_unaligned_le16(sent);
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 __u8 status = *((__u8 *) skb->data);
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
190 clear_bit(HCI_RESET, &hdev->flags);
192 /* Reset all non-persistent flags */
193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
195 hdev->discovery.state = DISCOVERY_STOPPED;
196 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
199 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200 hdev->adv_data_len = 0;
203 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
205 __u8 status = *((__u8 *) skb->data);
206 void *sent;
208 BT_DBG("%s status 0x%2.2x", hdev->name, status);
210 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
211 if (!sent)
212 return;
214 hci_dev_lock(hdev);
216 if (test_bit(HCI_MGMT, &hdev->dev_flags))
217 mgmt_set_local_name_complete(hdev, sent, status);
218 else if (!status)
219 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
221 hci_dev_unlock(hdev);
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
226 struct hci_rp_read_local_name *rp = (void *) skb->data;
228 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
230 if (rp->status)
231 return;
233 if (test_bit(HCI_SETUP, &hdev->dev_flags))
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
239 __u8 status = *((__u8 *) skb->data);
240 void *sent;
242 BT_DBG("%s status 0x%2.2x", hdev->name, status);
244 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245 if (!sent)
246 return;
248 if (!status) {
249 __u8 param = *((__u8 *) sent);
251 if (param == AUTH_ENABLED)
252 set_bit(HCI_AUTH, &hdev->flags);
253 else
254 clear_bit(HCI_AUTH, &hdev->flags);
257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 mgmt_auth_enable_complete(hdev, status);
261 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
264 void *sent;
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
269 if (!sent)
270 return;
272 if (!status) {
273 __u8 param = *((__u8 *) sent);
275 if (param)
276 set_bit(HCI_ENCRYPT, &hdev->flags);
277 else
278 clear_bit(HCI_ENCRYPT, &hdev->flags);
282 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
284 __u8 param, status = *((__u8 *) skb->data);
285 int old_pscan, old_iscan;
286 void *sent;
288 BT_DBG("%s status 0x%2.2x", hdev->name, status);
290 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
291 if (!sent)
292 return;
294 param = *((__u8 *) sent);
296 hci_dev_lock(hdev);
298 if (status) {
299 mgmt_write_scan_failed(hdev, param, status);
300 hdev->discov_timeout = 0;
301 goto done;
304 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
305 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
307 if (param & SCAN_INQUIRY) {
308 set_bit(HCI_ISCAN, &hdev->flags);
309 if (!old_iscan)
310 mgmt_discoverable(hdev, 1);
311 if (hdev->discov_timeout > 0) {
312 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
313 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
314 to);
316 } else if (old_iscan)
317 mgmt_discoverable(hdev, 0);
319 if (param & SCAN_PAGE) {
320 set_bit(HCI_PSCAN, &hdev->flags);
321 if (!old_pscan)
322 mgmt_connectable(hdev, 1);
323 } else if (old_pscan)
324 mgmt_connectable(hdev, 0);
326 done:
327 hci_dev_unlock(hdev);
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336 if (rp->status)
337 return;
339 memcpy(hdev->dev_class, rp->dev_class, 3);
341 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 __u8 status = *((__u8 *) skb->data);
348 void *sent;
350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
352 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
353 if (!sent)
354 return;
356 hci_dev_lock(hdev);
358 if (status == 0)
359 memcpy(hdev->dev_class, sent, 3);
361 if (test_bit(HCI_MGMT, &hdev->dev_flags))
362 mgmt_set_class_of_dev_complete(hdev, sent, status);
364 hci_dev_unlock(hdev);
367 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
370 __u16 setting;
372 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374 if (rp->status)
375 return;
377 setting = __le16_to_cpu(rp->voice_setting);
379 if (hdev->voice_setting == setting)
380 return;
382 hdev->voice_setting = setting;
384 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386 if (hdev->notify)
387 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
391 struct sk_buff *skb)
393 __u8 status = *((__u8 *) skb->data);
394 __u16 setting;
395 void *sent;
397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
399 if (status)
400 return;
402 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
403 if (!sent)
404 return;
406 setting = get_unaligned_le16(sent);
408 if (hdev->voice_setting == setting)
409 return;
411 hdev->voice_setting = setting;
413 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415 if (hdev->notify)
416 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
421 __u8 status = *((__u8 *) skb->data);
422 struct hci_cp_write_ssp_mode *sent;
424 BT_DBG("%s status 0x%2.2x", hdev->name, status);
426 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
427 if (!sent)
428 return;
430 if (!status) {
431 if (sent->mode)
432 hdev->features[1][0] |= LMP_HOST_SSP;
433 else
434 hdev->features[1][0] &= ~LMP_HOST_SSP;
437 if (test_bit(HCI_MGMT, &hdev->dev_flags))
438 mgmt_ssp_enable_complete(hdev, sent->mode, status);
439 else if (!status) {
440 if (sent->mode)
441 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
442 else
443 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
447 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
449 struct hci_rp_read_local_version *rp = (void *) skb->data;
451 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
453 if (rp->status)
454 return;
456 hdev->hci_ver = rp->hci_ver;
457 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
458 hdev->lmp_ver = rp->lmp_ver;
459 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
460 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
462 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
463 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
466 static void hci_cc_read_local_commands(struct hci_dev *hdev,
467 struct sk_buff *skb)
469 struct hci_rp_read_local_commands *rp = (void *) skb->data;
471 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
473 if (!rp->status)
474 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
477 static void hci_cc_read_local_features(struct hci_dev *hdev,
478 struct sk_buff *skb)
480 struct hci_rp_read_local_features *rp = (void *) skb->data;
482 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
484 if (rp->status)
485 return;
487 memcpy(hdev->features, rp->features, 8);
489 /* Adjust default settings according to features
490 * supported by device. */
492 if (hdev->features[0][0] & LMP_3SLOT)
493 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
495 if (hdev->features[0][0] & LMP_5SLOT)
496 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
498 if (hdev->features[0][1] & LMP_HV2) {
499 hdev->pkt_type |= (HCI_HV2);
500 hdev->esco_type |= (ESCO_HV2);
503 if (hdev->features[0][1] & LMP_HV3) {
504 hdev->pkt_type |= (HCI_HV3);
505 hdev->esco_type |= (ESCO_HV3);
508 if (lmp_esco_capable(hdev))
509 hdev->esco_type |= (ESCO_EV3);
511 if (hdev->features[0][4] & LMP_EV4)
512 hdev->esco_type |= (ESCO_EV4);
514 if (hdev->features[0][4] & LMP_EV5)
515 hdev->esco_type |= (ESCO_EV5);
517 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
518 hdev->esco_type |= (ESCO_2EV3);
520 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
521 hdev->esco_type |= (ESCO_3EV3);
523 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
524 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
526 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
527 hdev->features[0][0], hdev->features[0][1],
528 hdev->features[0][2], hdev->features[0][3],
529 hdev->features[0][4], hdev->features[0][5],
530 hdev->features[0][6], hdev->features[0][7]);
533 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
534 struct sk_buff *skb)
536 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
538 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
540 if (rp->status)
541 return;
543 hdev->max_page = rp->max_page;
545 if (rp->page < HCI_MAX_PAGES)
546 memcpy(hdev->features[rp->page], rp->features, 8);
549 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
550 struct sk_buff *skb)
552 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
554 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
556 if (!rp->status)
557 hdev->flow_ctl_mode = rp->mode;
560 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
562 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
564 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
566 if (rp->status)
567 return;
569 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
570 hdev->sco_mtu = rp->sco_mtu;
571 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
572 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
574 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
575 hdev->sco_mtu = 64;
576 hdev->sco_pkts = 8;
579 hdev->acl_cnt = hdev->acl_pkts;
580 hdev->sco_cnt = hdev->sco_pkts;
582 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
583 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
586 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
588 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 if (!rp->status)
593 bacpy(&hdev->bdaddr, &rp->bdaddr);
596 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
597 struct sk_buff *skb)
599 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
604 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
605 hdev->page_scan_window = __le16_to_cpu(rp->window);
609 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
610 struct sk_buff *skb)
612 u8 status = *((u8 *) skb->data);
613 struct hci_cp_write_page_scan_activity *sent;
615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
617 if (status)
618 return;
620 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
621 if (!sent)
622 return;
624 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
625 hdev->page_scan_window = __le16_to_cpu(sent->window);
628 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
629 struct sk_buff *skb)
631 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
633 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
636 hdev->page_scan_type = rp->type;
639 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
640 struct sk_buff *skb)
642 u8 status = *((u8 *) skb->data);
643 u8 *type;
645 BT_DBG("%s status 0x%2.2x", hdev->name, status);
647 if (status)
648 return;
650 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
651 if (type)
652 hdev->page_scan_type = *type;
655 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
656 struct sk_buff *skb)
658 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
662 if (rp->status)
663 return;
665 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
666 hdev->block_len = __le16_to_cpu(rp->block_len);
667 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
669 hdev->block_cnt = hdev->num_blocks;
671 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
672 hdev->block_cnt, hdev->block_len);
675 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
676 struct sk_buff *skb)
678 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
682 if (rp->status)
683 goto a2mp_rsp;
685 hdev->amp_status = rp->amp_status;
686 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
687 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
688 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
689 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
690 hdev->amp_type = rp->amp_type;
691 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
692 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
693 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
694 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
696 a2mp_rsp:
697 a2mp_send_getinfo_rsp(hdev);
700 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
701 struct sk_buff *skb)
703 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
704 struct amp_assoc *assoc = &hdev->loc_assoc;
705 size_t rem_len, frag_len;
707 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709 if (rp->status)
710 goto a2mp_rsp;
712 frag_len = skb->len - sizeof(*rp);
713 rem_len = __le16_to_cpu(rp->rem_len);
715 if (rem_len > frag_len) {
716 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
718 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
719 assoc->offset += frag_len;
721 /* Read other fragments */
722 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
724 return;
727 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
728 assoc->len = assoc->offset + rem_len;
729 assoc->offset = 0;
731 a2mp_rsp:
732 /* Send A2MP Rsp when all fragments are received */
733 a2mp_send_getampassoc_rsp(hdev, rp->status);
734 a2mp_send_create_phy_link_req(hdev, rp->status);
737 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
738 struct sk_buff *skb)
740 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
742 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
744 if (!rp->status)
745 hdev->inq_tx_power = rp->tx_power;
748 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
750 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
751 struct hci_cp_pin_code_reply *cp;
752 struct hci_conn *conn;
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 hci_dev_lock(hdev);
758 if (test_bit(HCI_MGMT, &hdev->dev_flags))
759 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
761 if (rp->status)
762 goto unlock;
764 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
765 if (!cp)
766 goto unlock;
768 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
769 if (conn)
770 conn->pin_length = cp->pin_len;
772 unlock:
773 hci_dev_unlock(hdev);
776 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
778 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
780 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
782 hci_dev_lock(hdev);
784 if (test_bit(HCI_MGMT, &hdev->dev_flags))
785 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
786 rp->status);
788 hci_dev_unlock(hdev);
791 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
792 struct sk_buff *skb)
794 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 if (rp->status)
799 return;
801 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
802 hdev->le_pkts = rp->le_max_pkt;
804 hdev->le_cnt = hdev->le_pkts;
806 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
809 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
810 struct sk_buff *skb)
812 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
816 if (!rp->status)
817 memcpy(hdev->le_features, rp->features, 8);
820 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
821 struct sk_buff *skb)
823 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
825 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
827 if (!rp->status)
828 hdev->adv_tx_power = rp->tx_power;
831 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
833 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
835 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837 hci_dev_lock(hdev);
839 if (test_bit(HCI_MGMT, &hdev->dev_flags))
840 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
841 rp->status);
843 hci_dev_unlock(hdev);
846 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
847 struct sk_buff *skb)
849 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853 hci_dev_lock(hdev);
855 if (test_bit(HCI_MGMT, &hdev->dev_flags))
856 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
857 ACL_LINK, 0, rp->status);
859 hci_dev_unlock(hdev);
862 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
864 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
866 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868 hci_dev_lock(hdev);
870 if (test_bit(HCI_MGMT, &hdev->dev_flags))
871 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
872 0, rp->status);
874 hci_dev_unlock(hdev);
877 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
878 struct sk_buff *skb)
880 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
882 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884 hci_dev_lock(hdev);
886 if (test_bit(HCI_MGMT, &hdev->dev_flags))
887 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
888 ACL_LINK, 0, rp->status);
890 hci_dev_unlock(hdev);
893 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
894 struct sk_buff *skb)
896 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900 hci_dev_lock(hdev);
901 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
902 rp->randomizer, rp->status);
903 hci_dev_unlock(hdev);
906 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
908 __u8 *sent, status = *((__u8 *) skb->data);
910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
912 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
913 if (!sent)
914 return;
916 hci_dev_lock(hdev);
918 if (!status) {
919 if (*sent)
920 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
921 else
922 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
925 if (!test_bit(HCI_INIT, &hdev->flags)) {
926 struct hci_request req;
928 hci_req_init(&req, hdev);
929 hci_update_ad(&req);
930 hci_req_run(&req, NULL);
933 hci_dev_unlock(hdev);
936 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
937 struct sk_buff *skb)
939 struct hci_cp_le_set_scan_enable *cp;
940 __u8 status = *((__u8 *) skb->data);
942 BT_DBG("%s status 0x%2.2x", hdev->name, status);
944 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
945 if (!cp)
946 return;
948 if (status)
949 return;
951 switch (cp->enable) {
952 case LE_SCAN_ENABLE:
953 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
954 break;
956 case LE_SCAN_DISABLE:
957 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
958 break;
960 default:
961 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
962 break;
966 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
967 struct sk_buff *skb)
969 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
971 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
973 if (!rp->status)
974 hdev->le_white_list_size = rp->size;
977 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
978 struct sk_buff *skb)
980 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984 if (!rp->status)
985 memcpy(hdev->le_states, rp->le_states, 8);
988 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
989 struct sk_buff *skb)
991 struct hci_cp_write_le_host_supported *sent;
992 __u8 status = *((__u8 *) skb->data);
994 BT_DBG("%s status 0x%2.2x", hdev->name, status);
996 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
997 if (!sent)
998 return;
1000 if (!status) {
1001 if (sent->le)
1002 hdev->features[1][0] |= LMP_HOST_LE;
1003 else
1004 hdev->features[1][0] &= ~LMP_HOST_LE;
1006 if (sent->simul)
1007 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1008 else
1009 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1012 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1013 !test_bit(HCI_INIT, &hdev->flags))
1014 mgmt_le_enable_complete(hdev, sent->le, status);
1017 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1018 struct sk_buff *skb)
1020 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1022 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1023 hdev->name, rp->status, rp->phy_handle);
1025 if (rp->status)
1026 return;
1028 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1031 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1033 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1035 if (status) {
1036 hci_conn_check_pending(hdev);
1037 return;
1040 set_bit(HCI_INQUIRY, &hdev->flags);
1043 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1045 struct hci_cp_create_conn *cp;
1046 struct hci_conn *conn;
1048 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1050 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1051 if (!cp)
1052 return;
1054 hci_dev_lock(hdev);
1056 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1058 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1060 if (status) {
1061 if (conn && conn->state == BT_CONNECT) {
1062 if (status != 0x0c || conn->attempt > 2) {
1063 conn->state = BT_CLOSED;
1064 hci_proto_connect_cfm(conn, status);
1065 hci_conn_del(conn);
1066 } else
1067 conn->state = BT_CONNECT2;
1069 } else {
1070 if (!conn) {
1071 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1072 if (conn) {
1073 conn->out = true;
1074 conn->link_mode |= HCI_LM_MASTER;
1075 } else
1076 BT_ERR("No memory for new connection");
1080 hci_dev_unlock(hdev);
1083 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1085 struct hci_cp_add_sco *cp;
1086 struct hci_conn *acl, *sco;
1087 __u16 handle;
1089 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1091 if (!status)
1092 return;
1094 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1095 if (!cp)
1096 return;
1098 handle = __le16_to_cpu(cp->handle);
1100 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1102 hci_dev_lock(hdev);
1104 acl = hci_conn_hash_lookup_handle(hdev, handle);
1105 if (acl) {
1106 sco = acl->link;
1107 if (sco) {
1108 sco->state = BT_CLOSED;
1110 hci_proto_connect_cfm(sco, status);
1111 hci_conn_del(sco);
1115 hci_dev_unlock(hdev);
1118 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1120 struct hci_cp_auth_requested *cp;
1121 struct hci_conn *conn;
1123 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1125 if (!status)
1126 return;
1128 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1129 if (!cp)
1130 return;
1132 hci_dev_lock(hdev);
1134 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1135 if (conn) {
1136 if (conn->state == BT_CONFIG) {
1137 hci_proto_connect_cfm(conn, status);
1138 hci_conn_drop(conn);
1142 hci_dev_unlock(hdev);
1145 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1147 struct hci_cp_set_conn_encrypt *cp;
1148 struct hci_conn *conn;
1150 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1152 if (!status)
1153 return;
1155 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1156 if (!cp)
1157 return;
1159 hci_dev_lock(hdev);
1161 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1162 if (conn) {
1163 if (conn->state == BT_CONFIG) {
1164 hci_proto_connect_cfm(conn, status);
1165 hci_conn_drop(conn);
1169 hci_dev_unlock(hdev);
1172 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1173 struct hci_conn *conn)
1175 if (conn->state != BT_CONFIG || !conn->out)
1176 return 0;
1178 if (conn->pending_sec_level == BT_SECURITY_SDP)
1179 return 0;
1181 /* Only request authentication for SSP connections or non-SSP
1182 * devices with sec_level HIGH or if MITM protection is requested */
1183 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1184 conn->pending_sec_level != BT_SECURITY_HIGH)
1185 return 0;
1187 return 1;
1190 static int hci_resolve_name(struct hci_dev *hdev,
1191 struct inquiry_entry *e)
1193 struct hci_cp_remote_name_req cp;
1195 memset(&cp, 0, sizeof(cp));
1197 bacpy(&cp.bdaddr, &e->data.bdaddr);
1198 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1199 cp.pscan_mode = e->data.pscan_mode;
1200 cp.clock_offset = e->data.clock_offset;
1202 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1205 static bool hci_resolve_next_name(struct hci_dev *hdev)
1207 struct discovery_state *discov = &hdev->discovery;
1208 struct inquiry_entry *e;
1210 if (list_empty(&discov->resolve))
1211 return false;
1213 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1214 if (!e)
1215 return false;
1217 if (hci_resolve_name(hdev, e) == 0) {
1218 e->name_state = NAME_PENDING;
1219 return true;
1222 return false;
1225 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1226 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1228 struct discovery_state *discov = &hdev->discovery;
1229 struct inquiry_entry *e;
1231 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1232 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1233 name_len, conn->dev_class);
1235 if (discov->state == DISCOVERY_STOPPED)
1236 return;
1238 if (discov->state == DISCOVERY_STOPPING)
1239 goto discov_complete;
1241 if (discov->state != DISCOVERY_RESOLVING)
1242 return;
1244 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1245 /* If the device was not found in a list of found devices names of which
1246 * are pending. there is no need to continue resolving a next name as it
1247 * will be done upon receiving another Remote Name Request Complete
1248 * Event */
1249 if (!e)
1250 return;
1252 list_del(&e->list);
1253 if (name) {
1254 e->name_state = NAME_KNOWN;
1255 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1256 e->data.rssi, name, name_len);
1257 } else {
1258 e->name_state = NAME_NOT_KNOWN;
1261 if (hci_resolve_next_name(hdev))
1262 return;
1264 discov_complete:
1265 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1268 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1270 struct hci_cp_remote_name_req *cp;
1271 struct hci_conn *conn;
1273 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1275 /* If successful wait for the name req complete event before
1276 * checking for the need to do authentication */
1277 if (!status)
1278 return;
1280 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1281 if (!cp)
1282 return;
1284 hci_dev_lock(hdev);
1286 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1288 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1289 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1291 if (!conn)
1292 goto unlock;
1294 if (!hci_outgoing_auth_needed(hdev, conn))
1295 goto unlock;
1297 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1298 struct hci_cp_auth_requested cp;
1299 cp.handle = __cpu_to_le16(conn->handle);
1300 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1303 unlock:
1304 hci_dev_unlock(hdev);
1307 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1309 struct hci_cp_read_remote_features *cp;
1310 struct hci_conn *conn;
1312 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1314 if (!status)
1315 return;
1317 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1318 if (!cp)
1319 return;
1321 hci_dev_lock(hdev);
1323 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1324 if (conn) {
1325 if (conn->state == BT_CONFIG) {
1326 hci_proto_connect_cfm(conn, status);
1327 hci_conn_drop(conn);
1331 hci_dev_unlock(hdev);
1334 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1336 struct hci_cp_read_remote_ext_features *cp;
1337 struct hci_conn *conn;
1339 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1341 if (!status)
1342 return;
1344 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1345 if (!cp)
1346 return;
1348 hci_dev_lock(hdev);
1350 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1351 if (conn) {
1352 if (conn->state == BT_CONFIG) {
1353 hci_proto_connect_cfm(conn, status);
1354 hci_conn_drop(conn);
1358 hci_dev_unlock(hdev);
1361 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1363 struct hci_cp_setup_sync_conn *cp;
1364 struct hci_conn *acl, *sco;
1365 __u16 handle;
1367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1369 if (!status)
1370 return;
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1373 if (!cp)
1374 return;
1376 handle = __le16_to_cpu(cp->handle);
1378 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1380 hci_dev_lock(hdev);
1382 acl = hci_conn_hash_lookup_handle(hdev, handle);
1383 if (acl) {
1384 sco = acl->link;
1385 if (sco) {
1386 sco->state = BT_CLOSED;
1388 hci_proto_connect_cfm(sco, status);
1389 hci_conn_del(sco);
1393 hci_dev_unlock(hdev);
1396 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1398 struct hci_cp_sniff_mode *cp;
1399 struct hci_conn *conn;
1401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1403 if (!status)
1404 return;
1406 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1407 if (!cp)
1408 return;
1410 hci_dev_lock(hdev);
1412 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1413 if (conn) {
1414 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1416 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1417 hci_sco_setup(conn, status);
1420 hci_dev_unlock(hdev);
1423 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1425 struct hci_cp_exit_sniff_mode *cp;
1426 struct hci_conn *conn;
1428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1430 if (!status)
1431 return;
1433 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1434 if (!cp)
1435 return;
1437 hci_dev_lock(hdev);
1439 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1440 if (conn) {
1441 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1443 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1444 hci_sco_setup(conn, status);
1447 hci_dev_unlock(hdev);
1450 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1452 struct hci_cp_disconnect *cp;
1453 struct hci_conn *conn;
1455 if (!status)
1456 return;
1458 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1459 if (!cp)
1460 return;
1462 hci_dev_lock(hdev);
1464 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1465 if (conn)
1466 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1467 conn->dst_type, status);
1469 hci_dev_unlock(hdev);
1472 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1474 struct hci_conn *conn;
1476 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478 if (status) {
1479 hci_dev_lock(hdev);
1481 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1482 if (!conn) {
1483 hci_dev_unlock(hdev);
1484 return;
1487 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1489 conn->state = BT_CLOSED;
1490 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1491 conn->dst_type, status);
1492 hci_proto_connect_cfm(conn, status);
1493 hci_conn_del(conn);
1495 hci_dev_unlock(hdev);
1499 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1501 struct hci_cp_create_phy_link *cp;
1503 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1505 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1506 if (!cp)
1507 return;
1509 hci_dev_lock(hdev);
1511 if (status) {
1512 struct hci_conn *hcon;
1514 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1515 if (hcon)
1516 hci_conn_del(hcon);
1517 } else {
1518 amp_write_remote_assoc(hdev, cp->phy_handle);
1521 hci_dev_unlock(hdev);
1524 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1526 struct hci_cp_accept_phy_link *cp;
1528 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1530 if (status)
1531 return;
1533 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1534 if (!cp)
1535 return;
1537 amp_write_remote_assoc(hdev, cp->phy_handle);
1540 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1542 __u8 status = *((__u8 *) skb->data);
1543 struct discovery_state *discov = &hdev->discovery;
1544 struct inquiry_entry *e;
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1548 hci_conn_check_pending(hdev);
1550 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1551 return;
1553 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1554 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1556 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1557 return;
1559 hci_dev_lock(hdev);
1561 if (discov->state != DISCOVERY_FINDING)
1562 goto unlock;
1564 if (list_empty(&discov->resolve)) {
1565 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1566 goto unlock;
1569 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1570 if (e && hci_resolve_name(hdev, e) == 0) {
1571 e->name_state = NAME_PENDING;
1572 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1573 } else {
1574 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1577 unlock:
1578 hci_dev_unlock(hdev);
1581 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1583 struct inquiry_data data;
1584 struct inquiry_info *info = (void *) (skb->data + 1);
1585 int num_rsp = *((__u8 *) skb->data);
1587 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1589 if (!num_rsp)
1590 return;
1592 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1593 return;
1595 hci_dev_lock(hdev);
1597 for (; num_rsp; num_rsp--, info++) {
1598 bool name_known, ssp;
1600 bacpy(&data.bdaddr, &info->bdaddr);
1601 data.pscan_rep_mode = info->pscan_rep_mode;
1602 data.pscan_period_mode = info->pscan_period_mode;
1603 data.pscan_mode = info->pscan_mode;
1604 memcpy(data.dev_class, info->dev_class, 3);
1605 data.clock_offset = info->clock_offset;
1606 data.rssi = 0x00;
1607 data.ssp_mode = 0x00;
1609 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1610 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1611 info->dev_class, 0, !name_known, ssp, NULL,
1615 hci_dev_unlock(hdev);
1618 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1620 struct hci_ev_conn_complete *ev = (void *) skb->data;
1621 struct hci_conn *conn;
1623 BT_DBG("%s", hdev->name);
1625 hci_dev_lock(hdev);
1627 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1628 if (!conn) {
1629 if (ev->link_type != SCO_LINK)
1630 goto unlock;
1632 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1633 if (!conn)
1634 goto unlock;
1636 conn->type = SCO_LINK;
1639 if (!ev->status) {
1640 conn->handle = __le16_to_cpu(ev->handle);
1642 if (conn->type == ACL_LINK) {
1643 conn->state = BT_CONFIG;
1644 hci_conn_hold(conn);
1646 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1647 !hci_find_link_key(hdev, &ev->bdaddr))
1648 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1649 else
1650 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1651 } else
1652 conn->state = BT_CONNECTED;
1654 hci_conn_add_sysfs(conn);
1656 if (test_bit(HCI_AUTH, &hdev->flags))
1657 conn->link_mode |= HCI_LM_AUTH;
1659 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1660 conn->link_mode |= HCI_LM_ENCRYPT;
1662 /* Get remote features */
1663 if (conn->type == ACL_LINK) {
1664 struct hci_cp_read_remote_features cp;
1665 cp.handle = ev->handle;
1666 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1667 sizeof(cp), &cp);
1670 /* Set packet type for incoming connection */
1671 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1672 struct hci_cp_change_conn_ptype cp;
1673 cp.handle = ev->handle;
1674 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1675 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1676 &cp);
1678 } else {
1679 conn->state = BT_CLOSED;
1680 if (conn->type == ACL_LINK)
1681 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1682 conn->dst_type, ev->status);
1685 if (conn->type == ACL_LINK)
1686 hci_sco_setup(conn, ev->status);
1688 if (ev->status) {
1689 hci_proto_connect_cfm(conn, ev->status);
1690 hci_conn_del(conn);
1691 } else if (ev->link_type != ACL_LINK)
1692 hci_proto_connect_cfm(conn, ev->status);
1694 unlock:
1695 hci_dev_unlock(hdev);
1697 hci_conn_check_pending(hdev);
1700 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1702 struct hci_ev_conn_request *ev = (void *) skb->data;
1703 int mask = hdev->link_mode;
1704 __u8 flags = 0;
1706 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1707 ev->link_type);
1709 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1710 &flags);
1712 if ((mask & HCI_LM_ACCEPT) &&
1713 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1714 /* Connection accepted */
1715 struct inquiry_entry *ie;
1716 struct hci_conn *conn;
1718 hci_dev_lock(hdev);
1720 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1721 if (ie)
1722 memcpy(ie->data.dev_class, ev->dev_class, 3);
1724 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1725 &ev->bdaddr);
1726 if (!conn) {
1727 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1728 if (!conn) {
1729 BT_ERR("No memory for new connection");
1730 hci_dev_unlock(hdev);
1731 return;
1735 memcpy(conn->dev_class, ev->dev_class, 3);
1737 hci_dev_unlock(hdev);
1739 if (ev->link_type == ACL_LINK ||
1740 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1741 struct hci_cp_accept_conn_req cp;
1742 conn->state = BT_CONNECT;
1744 bacpy(&cp.bdaddr, &ev->bdaddr);
1746 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1747 cp.role = 0x00; /* Become master */
1748 else
1749 cp.role = 0x01; /* Remain slave */
1751 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1752 &cp);
1753 } else if (!(flags & HCI_PROTO_DEFER)) {
1754 struct hci_cp_accept_sync_conn_req cp;
1755 conn->state = BT_CONNECT;
1757 bacpy(&cp.bdaddr, &ev->bdaddr);
1758 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1760 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1761 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1762 cp.max_latency = __constant_cpu_to_le16(0xffff);
1763 cp.content_format = cpu_to_le16(hdev->voice_setting);
1764 cp.retrans_effort = 0xff;
1766 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1767 sizeof(cp), &cp);
1768 } else {
1769 conn->state = BT_CONNECT2;
1770 hci_proto_connect_cfm(conn, 0);
1772 } else {
1773 /* Connection rejected */
1774 struct hci_cp_reject_conn_req cp;
1776 bacpy(&cp.bdaddr, &ev->bdaddr);
1777 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1778 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1782 static u8 hci_to_mgmt_reason(u8 err)
1784 switch (err) {
1785 case HCI_ERROR_CONNECTION_TIMEOUT:
1786 return MGMT_DEV_DISCONN_TIMEOUT;
1787 case HCI_ERROR_REMOTE_USER_TERM:
1788 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1789 case HCI_ERROR_REMOTE_POWER_OFF:
1790 return MGMT_DEV_DISCONN_REMOTE;
1791 case HCI_ERROR_LOCAL_HOST_TERM:
1792 return MGMT_DEV_DISCONN_LOCAL_HOST;
1793 default:
1794 return MGMT_DEV_DISCONN_UNKNOWN;
1798 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1800 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1801 struct hci_conn *conn;
1803 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1805 hci_dev_lock(hdev);
1807 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1808 if (!conn)
1809 goto unlock;
1811 if (ev->status == 0)
1812 conn->state = BT_CLOSED;
1814 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1815 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1816 if (ev->status) {
1817 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1818 conn->dst_type, ev->status);
1819 } else {
1820 u8 reason = hci_to_mgmt_reason(ev->reason);
1822 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1823 conn->dst_type, reason);
1827 if (ev->status == 0) {
1828 if (conn->type == ACL_LINK && conn->flush_key)
1829 hci_remove_link_key(hdev, &conn->dst);
1830 hci_proto_disconn_cfm(conn, ev->reason);
1831 hci_conn_del(conn);
1834 unlock:
1835 hci_dev_unlock(hdev);
1838 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1840 struct hci_ev_auth_complete *ev = (void *) skb->data;
1841 struct hci_conn *conn;
1843 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1845 hci_dev_lock(hdev);
1847 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1848 if (!conn)
1849 goto unlock;
1851 if (!ev->status) {
1852 if (!hci_conn_ssp_enabled(conn) &&
1853 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1854 BT_INFO("re-auth of legacy device is not possible.");
1855 } else {
1856 conn->link_mode |= HCI_LM_AUTH;
1857 conn->sec_level = conn->pending_sec_level;
1859 } else {
1860 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1861 ev->status);
1864 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1865 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1867 if (conn->state == BT_CONFIG) {
1868 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1869 struct hci_cp_set_conn_encrypt cp;
1870 cp.handle = ev->handle;
1871 cp.encrypt = 0x01;
1872 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1873 &cp);
1874 } else {
1875 conn->state = BT_CONNECTED;
1876 hci_proto_connect_cfm(conn, ev->status);
1877 hci_conn_drop(conn);
1879 } else {
1880 hci_auth_cfm(conn, ev->status);
1882 hci_conn_hold(conn);
1883 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1884 hci_conn_drop(conn);
1887 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1888 if (!ev->status) {
1889 struct hci_cp_set_conn_encrypt cp;
1890 cp.handle = ev->handle;
1891 cp.encrypt = 0x01;
1892 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1893 &cp);
1894 } else {
1895 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1896 hci_encrypt_cfm(conn, ev->status, 0x00);
1900 unlock:
1901 hci_dev_unlock(hdev);
1904 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1906 struct hci_ev_remote_name *ev = (void *) skb->data;
1907 struct hci_conn *conn;
1909 BT_DBG("%s", hdev->name);
1911 hci_conn_check_pending(hdev);
1913 hci_dev_lock(hdev);
1915 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1917 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1918 goto check_auth;
1920 if (ev->status == 0)
1921 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1922 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1923 else
1924 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1926 check_auth:
1927 if (!conn)
1928 goto unlock;
1930 if (!hci_outgoing_auth_needed(hdev, conn))
1931 goto unlock;
1933 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1934 struct hci_cp_auth_requested cp;
1935 cp.handle = __cpu_to_le16(conn->handle);
1936 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1939 unlock:
1940 hci_dev_unlock(hdev);
1943 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1945 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1946 struct hci_conn *conn;
1948 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1950 hci_dev_lock(hdev);
1952 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1953 if (conn) {
1954 if (!ev->status) {
1955 if (ev->encrypt) {
1956 /* Encryption implies authentication */
1957 conn->link_mode |= HCI_LM_AUTH;
1958 conn->link_mode |= HCI_LM_ENCRYPT;
1959 conn->sec_level = conn->pending_sec_level;
1960 } else
1961 conn->link_mode &= ~HCI_LM_ENCRYPT;
1964 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1966 if (ev->status && conn->state == BT_CONNECTED) {
1967 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1968 hci_conn_drop(conn);
1969 goto unlock;
1972 if (conn->state == BT_CONFIG) {
1973 if (!ev->status)
1974 conn->state = BT_CONNECTED;
1976 hci_proto_connect_cfm(conn, ev->status);
1977 hci_conn_drop(conn);
1978 } else
1979 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1982 unlock:
1983 hci_dev_unlock(hdev);
1986 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
1987 struct sk_buff *skb)
1989 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1990 struct hci_conn *conn;
1992 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1994 hci_dev_lock(hdev);
1996 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1997 if (conn) {
1998 if (!ev->status)
1999 conn->link_mode |= HCI_LM_SECURE;
2001 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2003 hci_key_change_cfm(conn, ev->status);
2006 hci_dev_unlock(hdev);
2009 static void hci_remote_features_evt(struct hci_dev *hdev,
2010 struct sk_buff *skb)
2012 struct hci_ev_remote_features *ev = (void *) skb->data;
2013 struct hci_conn *conn;
2015 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2017 hci_dev_lock(hdev);
2019 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2020 if (!conn)
2021 goto unlock;
2023 if (!ev->status)
2024 memcpy(conn->features[0], ev->features, 8);
2026 if (conn->state != BT_CONFIG)
2027 goto unlock;
2029 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2030 struct hci_cp_read_remote_ext_features cp;
2031 cp.handle = ev->handle;
2032 cp.page = 0x01;
2033 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2034 sizeof(cp), &cp);
2035 goto unlock;
2038 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2039 struct hci_cp_remote_name_req cp;
2040 memset(&cp, 0, sizeof(cp));
2041 bacpy(&cp.bdaddr, &conn->dst);
2042 cp.pscan_rep_mode = 0x02;
2043 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2044 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2045 mgmt_device_connected(hdev, &conn->dst, conn->type,
2046 conn->dst_type, 0, NULL, 0,
2047 conn->dev_class);
2049 if (!hci_outgoing_auth_needed(hdev, conn)) {
2050 conn->state = BT_CONNECTED;
2051 hci_proto_connect_cfm(conn, ev->status);
2052 hci_conn_drop(conn);
2055 unlock:
2056 hci_dev_unlock(hdev);
2059 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2061 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2062 u8 status = skb->data[sizeof(*ev)];
2063 __u16 opcode;
2065 skb_pull(skb, sizeof(*ev));
2067 opcode = __le16_to_cpu(ev->opcode);
2069 switch (opcode) {
2070 case HCI_OP_INQUIRY_CANCEL:
2071 hci_cc_inquiry_cancel(hdev, skb);
2072 break;
2074 case HCI_OP_PERIODIC_INQ:
2075 hci_cc_periodic_inq(hdev, skb);
2076 break;
2078 case HCI_OP_EXIT_PERIODIC_INQ:
2079 hci_cc_exit_periodic_inq(hdev, skb);
2080 break;
2082 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2083 hci_cc_remote_name_req_cancel(hdev, skb);
2084 break;
2086 case HCI_OP_ROLE_DISCOVERY:
2087 hci_cc_role_discovery(hdev, skb);
2088 break;
2090 case HCI_OP_READ_LINK_POLICY:
2091 hci_cc_read_link_policy(hdev, skb);
2092 break;
2094 case HCI_OP_WRITE_LINK_POLICY:
2095 hci_cc_write_link_policy(hdev, skb);
2096 break;
2098 case HCI_OP_READ_DEF_LINK_POLICY:
2099 hci_cc_read_def_link_policy(hdev, skb);
2100 break;
2102 case HCI_OP_WRITE_DEF_LINK_POLICY:
2103 hci_cc_write_def_link_policy(hdev, skb);
2104 break;
2106 case HCI_OP_RESET:
2107 hci_cc_reset(hdev, skb);
2108 break;
2110 case HCI_OP_WRITE_LOCAL_NAME:
2111 hci_cc_write_local_name(hdev, skb);
2112 break;
2114 case HCI_OP_READ_LOCAL_NAME:
2115 hci_cc_read_local_name(hdev, skb);
2116 break;
2118 case HCI_OP_WRITE_AUTH_ENABLE:
2119 hci_cc_write_auth_enable(hdev, skb);
2120 break;
2122 case HCI_OP_WRITE_ENCRYPT_MODE:
2123 hci_cc_write_encrypt_mode(hdev, skb);
2124 break;
2126 case HCI_OP_WRITE_SCAN_ENABLE:
2127 hci_cc_write_scan_enable(hdev, skb);
2128 break;
2130 case HCI_OP_READ_CLASS_OF_DEV:
2131 hci_cc_read_class_of_dev(hdev, skb);
2132 break;
2134 case HCI_OP_WRITE_CLASS_OF_DEV:
2135 hci_cc_write_class_of_dev(hdev, skb);
2136 break;
2138 case HCI_OP_READ_VOICE_SETTING:
2139 hci_cc_read_voice_setting(hdev, skb);
2140 break;
2142 case HCI_OP_WRITE_VOICE_SETTING:
2143 hci_cc_write_voice_setting(hdev, skb);
2144 break;
2146 case HCI_OP_WRITE_SSP_MODE:
2147 hci_cc_write_ssp_mode(hdev, skb);
2148 break;
2150 case HCI_OP_READ_LOCAL_VERSION:
2151 hci_cc_read_local_version(hdev, skb);
2152 break;
2154 case HCI_OP_READ_LOCAL_COMMANDS:
2155 hci_cc_read_local_commands(hdev, skb);
2156 break;
2158 case HCI_OP_READ_LOCAL_FEATURES:
2159 hci_cc_read_local_features(hdev, skb);
2160 break;
2162 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2163 hci_cc_read_local_ext_features(hdev, skb);
2164 break;
2166 case HCI_OP_READ_BUFFER_SIZE:
2167 hci_cc_read_buffer_size(hdev, skb);
2168 break;
2170 case HCI_OP_READ_BD_ADDR:
2171 hci_cc_read_bd_addr(hdev, skb);
2172 break;
2174 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2175 hci_cc_read_page_scan_activity(hdev, skb);
2176 break;
2178 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2179 hci_cc_write_page_scan_activity(hdev, skb);
2180 break;
2182 case HCI_OP_READ_PAGE_SCAN_TYPE:
2183 hci_cc_read_page_scan_type(hdev, skb);
2184 break;
2186 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2187 hci_cc_write_page_scan_type(hdev, skb);
2188 break;
2190 case HCI_OP_READ_DATA_BLOCK_SIZE:
2191 hci_cc_read_data_block_size(hdev, skb);
2192 break;
2194 case HCI_OP_READ_FLOW_CONTROL_MODE:
2195 hci_cc_read_flow_control_mode(hdev, skb);
2196 break;
2198 case HCI_OP_READ_LOCAL_AMP_INFO:
2199 hci_cc_read_local_amp_info(hdev, skb);
2200 break;
2202 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2203 hci_cc_read_local_amp_assoc(hdev, skb);
2204 break;
2206 case HCI_OP_READ_INQ_RSP_TX_POWER:
2207 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2208 break;
2210 case HCI_OP_PIN_CODE_REPLY:
2211 hci_cc_pin_code_reply(hdev, skb);
2212 break;
2214 case HCI_OP_PIN_CODE_NEG_REPLY:
2215 hci_cc_pin_code_neg_reply(hdev, skb);
2216 break;
2218 case HCI_OP_READ_LOCAL_OOB_DATA:
2219 hci_cc_read_local_oob_data_reply(hdev, skb);
2220 break;
2222 case HCI_OP_LE_READ_BUFFER_SIZE:
2223 hci_cc_le_read_buffer_size(hdev, skb);
2224 break;
2226 case HCI_OP_LE_READ_LOCAL_FEATURES:
2227 hci_cc_le_read_local_features(hdev, skb);
2228 break;
2230 case HCI_OP_LE_READ_ADV_TX_POWER:
2231 hci_cc_le_read_adv_tx_power(hdev, skb);
2232 break;
2234 case HCI_OP_USER_CONFIRM_REPLY:
2235 hci_cc_user_confirm_reply(hdev, skb);
2236 break;
2238 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2239 hci_cc_user_confirm_neg_reply(hdev, skb);
2240 break;
2242 case HCI_OP_USER_PASSKEY_REPLY:
2243 hci_cc_user_passkey_reply(hdev, skb);
2244 break;
2246 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2247 hci_cc_user_passkey_neg_reply(hdev, skb);
2248 break;
2250 case HCI_OP_LE_SET_ADV_ENABLE:
2251 hci_cc_le_set_adv_enable(hdev, skb);
2252 break;
2254 case HCI_OP_LE_SET_SCAN_ENABLE:
2255 hci_cc_le_set_scan_enable(hdev, skb);
2256 break;
2258 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2259 hci_cc_le_read_white_list_size(hdev, skb);
2260 break;
2262 case HCI_OP_LE_READ_SUPPORTED_STATES:
2263 hci_cc_le_read_supported_states(hdev, skb);
2264 break;
2266 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2267 hci_cc_write_le_host_supported(hdev, skb);
2268 break;
2270 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2271 hci_cc_write_remote_amp_assoc(hdev, skb);
2272 break;
2274 default:
2275 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2276 break;
2279 if (opcode != HCI_OP_NOP)
2280 del_timer(&hdev->cmd_timer);
2282 hci_req_cmd_complete(hdev, opcode, status);
2284 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2285 atomic_set(&hdev->cmd_cnt, 1);
2286 if (!skb_queue_empty(&hdev->cmd_q))
2287 queue_work(hdev->workqueue, &hdev->cmd_work);
2291 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2293 struct hci_ev_cmd_status *ev = (void *) skb->data;
2294 __u16 opcode;
2296 skb_pull(skb, sizeof(*ev));
2298 opcode = __le16_to_cpu(ev->opcode);
2300 switch (opcode) {
2301 case HCI_OP_INQUIRY:
2302 hci_cs_inquiry(hdev, ev->status);
2303 break;
2305 case HCI_OP_CREATE_CONN:
2306 hci_cs_create_conn(hdev, ev->status);
2307 break;
2309 case HCI_OP_ADD_SCO:
2310 hci_cs_add_sco(hdev, ev->status);
2311 break;
2313 case HCI_OP_AUTH_REQUESTED:
2314 hci_cs_auth_requested(hdev, ev->status);
2315 break;
2317 case HCI_OP_SET_CONN_ENCRYPT:
2318 hci_cs_set_conn_encrypt(hdev, ev->status);
2319 break;
2321 case HCI_OP_REMOTE_NAME_REQ:
2322 hci_cs_remote_name_req(hdev, ev->status);
2323 break;
2325 case HCI_OP_READ_REMOTE_FEATURES:
2326 hci_cs_read_remote_features(hdev, ev->status);
2327 break;
2329 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2330 hci_cs_read_remote_ext_features(hdev, ev->status);
2331 break;
2333 case HCI_OP_SETUP_SYNC_CONN:
2334 hci_cs_setup_sync_conn(hdev, ev->status);
2335 break;
2337 case HCI_OP_SNIFF_MODE:
2338 hci_cs_sniff_mode(hdev, ev->status);
2339 break;
2341 case HCI_OP_EXIT_SNIFF_MODE:
2342 hci_cs_exit_sniff_mode(hdev, ev->status);
2343 break;
2345 case HCI_OP_DISCONNECT:
2346 hci_cs_disconnect(hdev, ev->status);
2347 break;
2349 case HCI_OP_LE_CREATE_CONN:
2350 hci_cs_le_create_conn(hdev, ev->status);
2351 break;
2353 case HCI_OP_CREATE_PHY_LINK:
2354 hci_cs_create_phylink(hdev, ev->status);
2355 break;
2357 case HCI_OP_ACCEPT_PHY_LINK:
2358 hci_cs_accept_phylink(hdev, ev->status);
2359 break;
2361 default:
2362 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2363 break;
2366 if (opcode != HCI_OP_NOP)
2367 del_timer(&hdev->cmd_timer);
2369 if (ev->status ||
2370 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2371 hci_req_cmd_complete(hdev, opcode, ev->status);
2373 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2374 atomic_set(&hdev->cmd_cnt, 1);
2375 if (!skb_queue_empty(&hdev->cmd_q))
2376 queue_work(hdev->workqueue, &hdev->cmd_work);
2380 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2382 struct hci_ev_role_change *ev = (void *) skb->data;
2383 struct hci_conn *conn;
2385 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2387 hci_dev_lock(hdev);
2389 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2390 if (conn) {
2391 if (!ev->status) {
2392 if (ev->role)
2393 conn->link_mode &= ~HCI_LM_MASTER;
2394 else
2395 conn->link_mode |= HCI_LM_MASTER;
2398 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2400 hci_role_switch_cfm(conn, ev->status, ev->role);
2403 hci_dev_unlock(hdev);
2406 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2408 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2409 int i;
2411 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2412 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2413 return;
2416 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2417 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2418 BT_DBG("%s bad parameters", hdev->name);
2419 return;
2422 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2424 for (i = 0; i < ev->num_hndl; i++) {
2425 struct hci_comp_pkts_info *info = &ev->handles[i];
2426 struct hci_conn *conn;
2427 __u16 handle, count;
2429 handle = __le16_to_cpu(info->handle);
2430 count = __le16_to_cpu(info->count);
2432 conn = hci_conn_hash_lookup_handle(hdev, handle);
2433 if (!conn)
2434 continue;
2436 conn->sent -= count;
2438 switch (conn->type) {
2439 case ACL_LINK:
2440 hdev->acl_cnt += count;
2441 if (hdev->acl_cnt > hdev->acl_pkts)
2442 hdev->acl_cnt = hdev->acl_pkts;
2443 break;
2445 case LE_LINK:
2446 if (hdev->le_pkts) {
2447 hdev->le_cnt += count;
2448 if (hdev->le_cnt > hdev->le_pkts)
2449 hdev->le_cnt = hdev->le_pkts;
2450 } else {
2451 hdev->acl_cnt += count;
2452 if (hdev->acl_cnt > hdev->acl_pkts)
2453 hdev->acl_cnt = hdev->acl_pkts;
2455 break;
2457 case SCO_LINK:
2458 hdev->sco_cnt += count;
2459 if (hdev->sco_cnt > hdev->sco_pkts)
2460 hdev->sco_cnt = hdev->sco_pkts;
2461 break;
2463 default:
2464 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2465 break;
2469 queue_work(hdev->workqueue, &hdev->tx_work);
2472 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2473 __u16 handle)
2475 struct hci_chan *chan;
2477 switch (hdev->dev_type) {
2478 case HCI_BREDR:
2479 return hci_conn_hash_lookup_handle(hdev, handle);
2480 case HCI_AMP:
2481 chan = hci_chan_lookup_handle(hdev, handle);
2482 if (chan)
2483 return chan->conn;
2484 break;
2485 default:
2486 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2487 break;
2490 return NULL;
2493 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2496 int i;
2498 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2499 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2500 return;
2503 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2504 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2505 BT_DBG("%s bad parameters", hdev->name);
2506 return;
2509 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2510 ev->num_hndl);
2512 for (i = 0; i < ev->num_hndl; i++) {
2513 struct hci_comp_blocks_info *info = &ev->handles[i];
2514 struct hci_conn *conn = NULL;
2515 __u16 handle, block_count;
2517 handle = __le16_to_cpu(info->handle);
2518 block_count = __le16_to_cpu(info->blocks);
2520 conn = __hci_conn_lookup_handle(hdev, handle);
2521 if (!conn)
2522 continue;
2524 conn->sent -= block_count;
2526 switch (conn->type) {
2527 case ACL_LINK:
2528 case AMP_LINK:
2529 hdev->block_cnt += block_count;
2530 if (hdev->block_cnt > hdev->num_blocks)
2531 hdev->block_cnt = hdev->num_blocks;
2532 break;
2534 default:
2535 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2536 break;
2540 queue_work(hdev->workqueue, &hdev->tx_work);
2543 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2545 struct hci_ev_mode_change *ev = (void *) skb->data;
2546 struct hci_conn *conn;
2548 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2550 hci_dev_lock(hdev);
2552 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2553 if (conn) {
2554 conn->mode = ev->mode;
2555 conn->interval = __le16_to_cpu(ev->interval);
2557 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2558 &conn->flags)) {
2559 if (conn->mode == HCI_CM_ACTIVE)
2560 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2561 else
2562 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2565 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2566 hci_sco_setup(conn, ev->status);
2569 hci_dev_unlock(hdev);
2572 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2574 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2575 struct hci_conn *conn;
2577 BT_DBG("%s", hdev->name);
2579 hci_dev_lock(hdev);
2581 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2582 if (!conn)
2583 goto unlock;
2585 if (conn->state == BT_CONNECTED) {
2586 hci_conn_hold(conn);
2587 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2588 hci_conn_drop(conn);
2591 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2592 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2593 sizeof(ev->bdaddr), &ev->bdaddr);
2594 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2595 u8 secure;
2597 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2598 secure = 1;
2599 else
2600 secure = 0;
2602 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2605 unlock:
2606 hci_dev_unlock(hdev);
2609 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611 struct hci_ev_link_key_req *ev = (void *) skb->data;
2612 struct hci_cp_link_key_reply cp;
2613 struct hci_conn *conn;
2614 struct link_key *key;
2616 BT_DBG("%s", hdev->name);
2618 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2619 return;
2621 hci_dev_lock(hdev);
2623 key = hci_find_link_key(hdev, &ev->bdaddr);
2624 if (!key) {
2625 BT_DBG("%s link key not found for %pMR", hdev->name,
2626 &ev->bdaddr);
2627 goto not_found;
2630 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2631 &ev->bdaddr);
2633 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2634 key->type == HCI_LK_DEBUG_COMBINATION) {
2635 BT_DBG("%s ignoring debug key", hdev->name);
2636 goto not_found;
2639 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2640 if (conn) {
2641 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2642 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2643 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2644 goto not_found;
2647 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2648 conn->pending_sec_level == BT_SECURITY_HIGH) {
2649 BT_DBG("%s ignoring key unauthenticated for high security",
2650 hdev->name);
2651 goto not_found;
2654 conn->key_type = key->type;
2655 conn->pin_length = key->pin_len;
2658 bacpy(&cp.bdaddr, &ev->bdaddr);
2659 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2661 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2663 hci_dev_unlock(hdev);
2665 return;
2667 not_found:
2668 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2669 hci_dev_unlock(hdev);
2672 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2674 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2675 struct hci_conn *conn;
2676 u8 pin_len = 0;
2678 BT_DBG("%s", hdev->name);
2680 hci_dev_lock(hdev);
2682 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2683 if (conn) {
2684 hci_conn_hold(conn);
2685 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2686 pin_len = conn->pin_length;
2688 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2689 conn->key_type = ev->key_type;
2691 hci_conn_drop(conn);
2694 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2695 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2696 ev->key_type, pin_len);
2698 hci_dev_unlock(hdev);
2701 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2703 struct hci_ev_clock_offset *ev = (void *) skb->data;
2704 struct hci_conn *conn;
2706 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2708 hci_dev_lock(hdev);
2710 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2711 if (conn && !ev->status) {
2712 struct inquiry_entry *ie;
2714 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2715 if (ie) {
2716 ie->data.clock_offset = ev->clock_offset;
2717 ie->timestamp = jiffies;
2721 hci_dev_unlock(hdev);
2724 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2726 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2727 struct hci_conn *conn;
2729 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2731 hci_dev_lock(hdev);
2733 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2734 if (conn && !ev->status)
2735 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2737 hci_dev_unlock(hdev);
2740 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2742 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2743 struct inquiry_entry *ie;
2745 BT_DBG("%s", hdev->name);
2747 hci_dev_lock(hdev);
2749 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2750 if (ie) {
2751 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2752 ie->timestamp = jiffies;
2755 hci_dev_unlock(hdev);
2758 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2759 struct sk_buff *skb)
2761 struct inquiry_data data;
2762 int num_rsp = *((__u8 *) skb->data);
2763 bool name_known, ssp;
2765 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2767 if (!num_rsp)
2768 return;
2770 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2771 return;
2773 hci_dev_lock(hdev);
2775 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2776 struct inquiry_info_with_rssi_and_pscan_mode *info;
2777 info = (void *) (skb->data + 1);
2779 for (; num_rsp; num_rsp--, info++) {
2780 bacpy(&data.bdaddr, &info->bdaddr);
2781 data.pscan_rep_mode = info->pscan_rep_mode;
2782 data.pscan_period_mode = info->pscan_period_mode;
2783 data.pscan_mode = info->pscan_mode;
2784 memcpy(data.dev_class, info->dev_class, 3);
2785 data.clock_offset = info->clock_offset;
2786 data.rssi = info->rssi;
2787 data.ssp_mode = 0x00;
2789 name_known = hci_inquiry_cache_update(hdev, &data,
2790 false, &ssp);
2791 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2792 info->dev_class, info->rssi,
2793 !name_known, ssp, NULL, 0);
2795 } else {
2796 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2798 for (; num_rsp; num_rsp--, info++) {
2799 bacpy(&data.bdaddr, &info->bdaddr);
2800 data.pscan_rep_mode = info->pscan_rep_mode;
2801 data.pscan_period_mode = info->pscan_period_mode;
2802 data.pscan_mode = 0x00;
2803 memcpy(data.dev_class, info->dev_class, 3);
2804 data.clock_offset = info->clock_offset;
2805 data.rssi = info->rssi;
2806 data.ssp_mode = 0x00;
2807 name_known = hci_inquiry_cache_update(hdev, &data,
2808 false, &ssp);
2809 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2810 info->dev_class, info->rssi,
2811 !name_known, ssp, NULL, 0);
2815 hci_dev_unlock(hdev);
2818 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2819 struct sk_buff *skb)
2821 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2822 struct hci_conn *conn;
2824 BT_DBG("%s", hdev->name);
2826 hci_dev_lock(hdev);
2828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2829 if (!conn)
2830 goto unlock;
2832 if (ev->page < HCI_MAX_PAGES)
2833 memcpy(conn->features[ev->page], ev->features, 8);
2835 if (!ev->status && ev->page == 0x01) {
2836 struct inquiry_entry *ie;
2838 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2839 if (ie)
2840 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2842 if (ev->features[0] & LMP_HOST_SSP) {
2843 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2844 } else {
2845 /* It is mandatory by the Bluetooth specification that
2846 * Extended Inquiry Results are only used when Secure
2847 * Simple Pairing is enabled, but some devices violate
2848 * this.
2850 * To make these devices work, the internal SSP
2851 * enabled flag needs to be cleared if the remote host
2852 * features do not indicate SSP support */
2853 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2857 if (conn->state != BT_CONFIG)
2858 goto unlock;
2860 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2861 struct hci_cp_remote_name_req cp;
2862 memset(&cp, 0, sizeof(cp));
2863 bacpy(&cp.bdaddr, &conn->dst);
2864 cp.pscan_rep_mode = 0x02;
2865 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2866 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2867 mgmt_device_connected(hdev, &conn->dst, conn->type,
2868 conn->dst_type, 0, NULL, 0,
2869 conn->dev_class);
2871 if (!hci_outgoing_auth_needed(hdev, conn)) {
2872 conn->state = BT_CONNECTED;
2873 hci_proto_connect_cfm(conn, ev->status);
2874 hci_conn_drop(conn);
2877 unlock:
2878 hci_dev_unlock(hdev);
2881 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2882 struct sk_buff *skb)
2884 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2885 struct hci_conn *conn;
2887 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2889 hci_dev_lock(hdev);
2891 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2892 if (!conn) {
2893 if (ev->link_type == ESCO_LINK)
2894 goto unlock;
2896 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2897 if (!conn)
2898 goto unlock;
2900 conn->type = SCO_LINK;
2903 switch (ev->status) {
2904 case 0x00:
2905 conn->handle = __le16_to_cpu(ev->handle);
2906 conn->state = BT_CONNECTED;
2908 hci_conn_add_sysfs(conn);
2909 break;
2911 case 0x0d: /* Connection Rejected due to Limited Resources */
2912 case 0x11: /* Unsupported Feature or Parameter Value */
2913 case 0x1c: /* SCO interval rejected */
2914 case 0x1a: /* Unsupported Remote Feature */
2915 case 0x1f: /* Unspecified error */
2916 if (conn->out) {
2917 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2918 (hdev->esco_type & EDR_ESCO_MASK);
2919 if (hci_setup_sync(conn, conn->link->handle))
2920 goto unlock;
2922 /* fall through */
2924 default:
2925 conn->state = BT_CLOSED;
2926 break;
2929 hci_proto_connect_cfm(conn, ev->status);
2930 if (ev->status)
2931 hci_conn_del(conn);
2933 unlock:
2934 hci_dev_unlock(hdev);
2937 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2938 struct sk_buff *skb)
2940 struct inquiry_data data;
2941 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2942 int num_rsp = *((__u8 *) skb->data);
2943 size_t eir_len;
2945 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2947 if (!num_rsp)
2948 return;
2950 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2951 return;
2953 hci_dev_lock(hdev);
2955 for (; num_rsp; num_rsp--, info++) {
2956 bool name_known, ssp;
2958 bacpy(&data.bdaddr, &info->bdaddr);
2959 data.pscan_rep_mode = info->pscan_rep_mode;
2960 data.pscan_period_mode = info->pscan_period_mode;
2961 data.pscan_mode = 0x00;
2962 memcpy(data.dev_class, info->dev_class, 3);
2963 data.clock_offset = info->clock_offset;
2964 data.rssi = info->rssi;
2965 data.ssp_mode = 0x01;
2967 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2968 name_known = eir_has_data_type(info->data,
2969 sizeof(info->data),
2970 EIR_NAME_COMPLETE);
2971 else
2972 name_known = true;
2974 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2975 &ssp);
2976 eir_len = eir_get_length(info->data, sizeof(info->data));
2977 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2978 info->dev_class, info->rssi, !name_known,
2979 ssp, info->data, eir_len);
2982 hci_dev_unlock(hdev);
2985 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2986 struct sk_buff *skb)
2988 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
2989 struct hci_conn *conn;
2991 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
2992 __le16_to_cpu(ev->handle));
2994 hci_dev_lock(hdev);
2996 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2997 if (!conn)
2998 goto unlock;
3000 /* For BR/EDR the necessary steps are taken through the
3001 * auth_complete event.
3003 if (conn->type != LE_LINK)
3004 goto unlock;
3006 if (!ev->status)
3007 conn->sec_level = conn->pending_sec_level;
3009 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3011 if (ev->status && conn->state == BT_CONNECTED) {
3012 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3013 hci_conn_drop(conn);
3014 goto unlock;
3017 if (conn->state == BT_CONFIG) {
3018 if (!ev->status)
3019 conn->state = BT_CONNECTED;
3021 hci_proto_connect_cfm(conn, ev->status);
3022 hci_conn_drop(conn);
3023 } else {
3024 hci_auth_cfm(conn, ev->status);
3026 hci_conn_hold(conn);
3027 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3028 hci_conn_drop(conn);
3031 unlock:
3032 hci_dev_unlock(hdev);
3035 static u8 hci_get_auth_req(struct hci_conn *conn)
3037 /* If remote requests dedicated bonding follow that lead */
3038 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3039 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3040 /* If both remote and local IO capabilities allow MITM
3041 * protection then require it, otherwise don't */
3042 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3043 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3044 return HCI_AT_DEDICATED_BONDING;
3045 else
3046 return HCI_AT_DEDICATED_BONDING_MITM;
3049 /* If remote requests no-bonding follow that lead */
3050 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3051 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3052 return conn->remote_auth | (conn->auth_type & 0x01);
3054 return conn->auth_type;
3057 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3059 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3060 struct hci_conn *conn;
3062 BT_DBG("%s", hdev->name);
3064 hci_dev_lock(hdev);
3066 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3067 if (!conn)
3068 goto unlock;
3070 hci_conn_hold(conn);
3072 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3073 goto unlock;
3075 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3076 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3077 struct hci_cp_io_capability_reply cp;
3079 bacpy(&cp.bdaddr, &ev->bdaddr);
3080 /* Change the IO capability from KeyboardDisplay
3081 * to DisplayYesNo as it is not supported by BT spec. */
3082 cp.capability = (conn->io_capability == 0x04) ?
3083 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3084 conn->auth_type = hci_get_auth_req(conn);
3085 cp.authentication = conn->auth_type;
3087 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3088 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3089 cp.oob_data = 0x01;
3090 else
3091 cp.oob_data = 0x00;
3093 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3094 sizeof(cp), &cp);
3095 } else {
3096 struct hci_cp_io_capability_neg_reply cp;
3098 bacpy(&cp.bdaddr, &ev->bdaddr);
3099 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3101 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3102 sizeof(cp), &cp);
3105 unlock:
3106 hci_dev_unlock(hdev);
3109 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3111 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3112 struct hci_conn *conn;
3114 BT_DBG("%s", hdev->name);
3116 hci_dev_lock(hdev);
3118 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3119 if (!conn)
3120 goto unlock;
3122 conn->remote_cap = ev->capability;
3123 conn->remote_auth = ev->authentication;
3124 if (ev->oob_data)
3125 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3127 unlock:
3128 hci_dev_unlock(hdev);
3131 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3132 struct sk_buff *skb)
3134 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3135 int loc_mitm, rem_mitm, confirm_hint = 0;
3136 struct hci_conn *conn;
3138 BT_DBG("%s", hdev->name);
3140 hci_dev_lock(hdev);
3142 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3143 goto unlock;
3145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3146 if (!conn)
3147 goto unlock;
3149 loc_mitm = (conn->auth_type & 0x01);
3150 rem_mitm = (conn->remote_auth & 0x01);
3152 /* If we require MITM but the remote device can't provide that
3153 * (it has NoInputNoOutput) then reject the confirmation
3154 * request. The only exception is when we're dedicated bonding
3155 * initiators (connect_cfm_cb set) since then we always have the MITM
3156 * bit set. */
3157 if (!conn->connect_cfm_cb && loc_mitm &&
3158 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3159 BT_DBG("Rejecting request: remote device can't provide MITM");
3160 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3161 sizeof(ev->bdaddr), &ev->bdaddr);
3162 goto unlock;
3165 /* If no side requires MITM protection; auto-accept */
3166 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3167 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3169 /* If we're not the initiators request authorization to
3170 * proceed from user space (mgmt_user_confirm with
3171 * confirm_hint set to 1). The exception is if neither
3172 * side had MITM in which case we do auto-accept.
3174 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3175 (loc_mitm || rem_mitm)) {
3176 BT_DBG("Confirming auto-accept as acceptor");
3177 confirm_hint = 1;
3178 goto confirm;
3181 BT_DBG("Auto-accept of user confirmation with %ums delay",
3182 hdev->auto_accept_delay);
3184 if (hdev->auto_accept_delay > 0) {
3185 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3186 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3187 goto unlock;
3190 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3191 sizeof(ev->bdaddr), &ev->bdaddr);
3192 goto unlock;
3195 confirm:
3196 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3197 confirm_hint);
3199 unlock:
3200 hci_dev_unlock(hdev);
3203 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3206 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3208 BT_DBG("%s", hdev->name);
3210 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3211 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3214 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3215 struct sk_buff *skb)
3217 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3218 struct hci_conn *conn;
3220 BT_DBG("%s", hdev->name);
3222 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3223 if (!conn)
3224 return;
3226 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3227 conn->passkey_entered = 0;
3229 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3230 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3231 conn->dst_type, conn->passkey_notify,
3232 conn->passkey_entered);
3235 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3237 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3238 struct hci_conn *conn;
3240 BT_DBG("%s", hdev->name);
3242 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3243 if (!conn)
3244 return;
3246 switch (ev->type) {
3247 case HCI_KEYPRESS_STARTED:
3248 conn->passkey_entered = 0;
3249 return;
3251 case HCI_KEYPRESS_ENTERED:
3252 conn->passkey_entered++;
3253 break;
3255 case HCI_KEYPRESS_ERASED:
3256 conn->passkey_entered--;
3257 break;
3259 case HCI_KEYPRESS_CLEARED:
3260 conn->passkey_entered = 0;
3261 break;
3263 case HCI_KEYPRESS_COMPLETED:
3264 return;
3267 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3268 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3269 conn->dst_type, conn->passkey_notify,
3270 conn->passkey_entered);
3273 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3274 struct sk_buff *skb)
3276 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3277 struct hci_conn *conn;
3279 BT_DBG("%s", hdev->name);
3281 hci_dev_lock(hdev);
3283 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3284 if (!conn)
3285 goto unlock;
3287 /* To avoid duplicate auth_failed events to user space we check
3288 * the HCI_CONN_AUTH_PEND flag which will be set if we
3289 * initiated the authentication. A traditional auth_complete
3290 * event gets always produced as initiator and is also mapped to
3291 * the mgmt_auth_failed event */
3292 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3293 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3294 ev->status);
3296 hci_conn_drop(conn);
3298 unlock:
3299 hci_dev_unlock(hdev);
3302 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3303 struct sk_buff *skb)
3305 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3306 struct inquiry_entry *ie;
3307 struct hci_conn *conn;
3309 BT_DBG("%s", hdev->name);
3311 hci_dev_lock(hdev);
3313 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3314 if (conn)
3315 memcpy(conn->features[1], ev->features, 8);
3317 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3318 if (ie)
3319 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3321 hci_dev_unlock(hdev);
3324 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3325 struct sk_buff *skb)
3327 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3328 struct oob_data *data;
3330 BT_DBG("%s", hdev->name);
3332 hci_dev_lock(hdev);
3334 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3335 goto unlock;
3337 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3338 if (data) {
3339 struct hci_cp_remote_oob_data_reply cp;
3341 bacpy(&cp.bdaddr, &ev->bdaddr);
3342 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3343 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3345 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3346 &cp);
3347 } else {
3348 struct hci_cp_remote_oob_data_neg_reply cp;
3350 bacpy(&cp.bdaddr, &ev->bdaddr);
3351 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3352 &cp);
3355 unlock:
3356 hci_dev_unlock(hdev);
3359 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3360 struct sk_buff *skb)
3362 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3363 struct hci_conn *hcon, *bredr_hcon;
3365 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3366 ev->status);
3368 hci_dev_lock(hdev);
3370 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3371 if (!hcon) {
3372 hci_dev_unlock(hdev);
3373 return;
3376 if (ev->status) {
3377 hci_conn_del(hcon);
3378 hci_dev_unlock(hdev);
3379 return;
3382 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3384 hcon->state = BT_CONNECTED;
3385 bacpy(&hcon->dst, &bredr_hcon->dst);
3387 hci_conn_hold(hcon);
3388 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3389 hci_conn_drop(hcon);
3391 hci_conn_add_sysfs(hcon);
3393 amp_physical_cfm(bredr_hcon, hcon);
3395 hci_dev_unlock(hdev);
3398 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3400 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3401 struct hci_conn *hcon;
3402 struct hci_chan *hchan;
3403 struct amp_mgr *mgr;
3405 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3406 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3407 ev->status);
3409 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3410 if (!hcon)
3411 return;
3413 /* Create AMP hchan */
3414 hchan = hci_chan_create(hcon);
3415 if (!hchan)
3416 return;
3418 hchan->handle = le16_to_cpu(ev->handle);
3420 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3422 mgr = hcon->amp_mgr;
3423 if (mgr && mgr->bredr_chan) {
3424 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3426 l2cap_chan_lock(bredr_chan);
3428 bredr_chan->conn->mtu = hdev->block_mtu;
3429 l2cap_logical_cfm(bredr_chan, hchan, 0);
3430 hci_conn_hold(hcon);
3432 l2cap_chan_unlock(bredr_chan);
3436 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3437 struct sk_buff *skb)
3439 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3440 struct hci_chan *hchan;
3442 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3443 le16_to_cpu(ev->handle), ev->status);
3445 if (ev->status)
3446 return;
3448 hci_dev_lock(hdev);
3450 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3451 if (!hchan)
3452 goto unlock;
3454 amp_destroy_logical_link(hchan, ev->reason);
3456 unlock:
3457 hci_dev_unlock(hdev);
3460 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3461 struct sk_buff *skb)
3463 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3464 struct hci_conn *hcon;
3466 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3468 if (ev->status)
3469 return;
3471 hci_dev_lock(hdev);
3473 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3474 if (hcon) {
3475 hcon->state = BT_CLOSED;
3476 hci_conn_del(hcon);
3479 hci_dev_unlock(hdev);
3482 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3484 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3485 struct hci_conn *conn;
3487 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3489 hci_dev_lock(hdev);
3491 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3492 if (!conn) {
3493 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3494 if (!conn) {
3495 BT_ERR("No memory for new connection");
3496 goto unlock;
3499 conn->dst_type = ev->bdaddr_type;
3501 if (ev->role == LE_CONN_ROLE_MASTER) {
3502 conn->out = true;
3503 conn->link_mode |= HCI_LM_MASTER;
3507 if (ev->status) {
3508 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3509 conn->dst_type, ev->status);
3510 hci_proto_connect_cfm(conn, ev->status);
3511 conn->state = BT_CLOSED;
3512 hci_conn_del(conn);
3513 goto unlock;
3516 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3517 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3518 conn->dst_type, 0, NULL, 0, NULL);
3520 conn->sec_level = BT_SECURITY_LOW;
3521 conn->handle = __le16_to_cpu(ev->handle);
3522 conn->state = BT_CONNECTED;
3524 hci_conn_add_sysfs(conn);
3526 hci_proto_connect_cfm(conn, ev->status);
3528 unlock:
3529 hci_dev_unlock(hdev);
3532 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3534 u8 num_reports = skb->data[0];
3535 void *ptr = &skb->data[1];
3536 s8 rssi;
3538 while (num_reports--) {
3539 struct hci_ev_le_advertising_info *ev = ptr;
3541 rssi = ev->data[ev->length];
3542 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3543 NULL, rssi, 0, 1, ev->data, ev->length);
3545 ptr += sizeof(*ev) + ev->length + 1;
3549 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3551 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3552 struct hci_cp_le_ltk_reply cp;
3553 struct hci_cp_le_ltk_neg_reply neg;
3554 struct hci_conn *conn;
3555 struct smp_ltk *ltk;
3557 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3559 hci_dev_lock(hdev);
3561 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3562 if (conn == NULL)
3563 goto not_found;
3565 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3566 if (ltk == NULL)
3567 goto not_found;
3569 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3570 cp.handle = cpu_to_le16(conn->handle);
3572 if (ltk->authenticated)
3573 conn->pending_sec_level = BT_SECURITY_HIGH;
3574 else
3575 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3577 conn->enc_key_size = ltk->enc_size;
3579 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3581 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
3582 * temporary key used to encrypt a connection following
3583 * pairing. It is used during the Encrypted Session Setup to
3584 * distribute the keys. Later, security can be re-established
3585 * using a distributed LTK.
3587 if (ltk->type == HCI_SMP_STK_SLAVE) {
3588 list_del(&ltk->list);
3589 kfree(ltk);
3592 hci_dev_unlock(hdev);
3594 return;
3596 not_found:
3597 neg.handle = ev->handle;
3598 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3599 hci_dev_unlock(hdev);
3602 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3604 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3606 skb_pull(skb, sizeof(*le_ev));
3608 switch (le_ev->subevent) {
3609 case HCI_EV_LE_CONN_COMPLETE:
3610 hci_le_conn_complete_evt(hdev, skb);
3611 break;
3613 case HCI_EV_LE_ADVERTISING_REPORT:
3614 hci_le_adv_report_evt(hdev, skb);
3615 break;
3617 case HCI_EV_LE_LTK_REQ:
3618 hci_le_ltk_request_evt(hdev, skb);
3619 break;
3621 default:
3622 break;
3626 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3628 struct hci_ev_channel_selected *ev = (void *) skb->data;
3629 struct hci_conn *hcon;
3631 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3633 skb_pull(skb, sizeof(*ev));
3635 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3636 if (!hcon)
3637 return;
3639 amp_read_loc_assoc_final_data(hdev, hcon);
3642 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3644 struct hci_event_hdr *hdr = (void *) skb->data;
3645 __u8 event = hdr->evt;
3647 hci_dev_lock(hdev);
3649 /* Received events are (currently) only needed when a request is
3650 * ongoing so avoid unnecessary memory allocation.
3652 if (hdev->req_status == HCI_REQ_PEND) {
3653 kfree_skb(hdev->recv_evt);
3654 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3657 hci_dev_unlock(hdev);
3659 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3661 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3662 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3663 u16 opcode = __le16_to_cpu(hdr->opcode);
3665 hci_req_cmd_complete(hdev, opcode, 0);
3668 switch (event) {
3669 case HCI_EV_INQUIRY_COMPLETE:
3670 hci_inquiry_complete_evt(hdev, skb);
3671 break;
3673 case HCI_EV_INQUIRY_RESULT:
3674 hci_inquiry_result_evt(hdev, skb);
3675 break;
3677 case HCI_EV_CONN_COMPLETE:
3678 hci_conn_complete_evt(hdev, skb);
3679 break;
3681 case HCI_EV_CONN_REQUEST:
3682 hci_conn_request_evt(hdev, skb);
3683 break;
3685 case HCI_EV_DISCONN_COMPLETE:
3686 hci_disconn_complete_evt(hdev, skb);
3687 break;
3689 case HCI_EV_AUTH_COMPLETE:
3690 hci_auth_complete_evt(hdev, skb);
3691 break;
3693 case HCI_EV_REMOTE_NAME:
3694 hci_remote_name_evt(hdev, skb);
3695 break;
3697 case HCI_EV_ENCRYPT_CHANGE:
3698 hci_encrypt_change_evt(hdev, skb);
3699 break;
3701 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3702 hci_change_link_key_complete_evt(hdev, skb);
3703 break;
3705 case HCI_EV_REMOTE_FEATURES:
3706 hci_remote_features_evt(hdev, skb);
3707 break;
3709 case HCI_EV_CMD_COMPLETE:
3710 hci_cmd_complete_evt(hdev, skb);
3711 break;
3713 case HCI_EV_CMD_STATUS:
3714 hci_cmd_status_evt(hdev, skb);
3715 break;
3717 case HCI_EV_ROLE_CHANGE:
3718 hci_role_change_evt(hdev, skb);
3719 break;
3721 case HCI_EV_NUM_COMP_PKTS:
3722 hci_num_comp_pkts_evt(hdev, skb);
3723 break;
3725 case HCI_EV_MODE_CHANGE:
3726 hci_mode_change_evt(hdev, skb);
3727 break;
3729 case HCI_EV_PIN_CODE_REQ:
3730 hci_pin_code_request_evt(hdev, skb);
3731 break;
3733 case HCI_EV_LINK_KEY_REQ:
3734 hci_link_key_request_evt(hdev, skb);
3735 break;
3737 case HCI_EV_LINK_KEY_NOTIFY:
3738 hci_link_key_notify_evt(hdev, skb);
3739 break;
3741 case HCI_EV_CLOCK_OFFSET:
3742 hci_clock_offset_evt(hdev, skb);
3743 break;
3745 case HCI_EV_PKT_TYPE_CHANGE:
3746 hci_pkt_type_change_evt(hdev, skb);
3747 break;
3749 case HCI_EV_PSCAN_REP_MODE:
3750 hci_pscan_rep_mode_evt(hdev, skb);
3751 break;
3753 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3754 hci_inquiry_result_with_rssi_evt(hdev, skb);
3755 break;
3757 case HCI_EV_REMOTE_EXT_FEATURES:
3758 hci_remote_ext_features_evt(hdev, skb);
3759 break;
3761 case HCI_EV_SYNC_CONN_COMPLETE:
3762 hci_sync_conn_complete_evt(hdev, skb);
3763 break;
3765 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3766 hci_extended_inquiry_result_evt(hdev, skb);
3767 break;
3769 case HCI_EV_KEY_REFRESH_COMPLETE:
3770 hci_key_refresh_complete_evt(hdev, skb);
3771 break;
3773 case HCI_EV_IO_CAPA_REQUEST:
3774 hci_io_capa_request_evt(hdev, skb);
3775 break;
3777 case HCI_EV_IO_CAPA_REPLY:
3778 hci_io_capa_reply_evt(hdev, skb);
3779 break;
3781 case HCI_EV_USER_CONFIRM_REQUEST:
3782 hci_user_confirm_request_evt(hdev, skb);
3783 break;
3785 case HCI_EV_USER_PASSKEY_REQUEST:
3786 hci_user_passkey_request_evt(hdev, skb);
3787 break;
3789 case HCI_EV_USER_PASSKEY_NOTIFY:
3790 hci_user_passkey_notify_evt(hdev, skb);
3791 break;
3793 case HCI_EV_KEYPRESS_NOTIFY:
3794 hci_keypress_notify_evt(hdev, skb);
3795 break;
3797 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3798 hci_simple_pair_complete_evt(hdev, skb);
3799 break;
3801 case HCI_EV_REMOTE_HOST_FEATURES:
3802 hci_remote_host_features_evt(hdev, skb);
3803 break;
3805 case HCI_EV_LE_META:
3806 hci_le_meta_evt(hdev, skb);
3807 break;
3809 case HCI_EV_CHANNEL_SELECTED:
3810 hci_chan_selected_evt(hdev, skb);
3811 break;
3813 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3814 hci_remote_oob_data_request_evt(hdev, skb);
3815 break;
3817 case HCI_EV_PHY_LINK_COMPLETE:
3818 hci_phy_link_complete_evt(hdev, skb);
3819 break;
3821 case HCI_EV_LOGICAL_LINK_COMPLETE:
3822 hci_loglink_complete_evt(hdev, skb);
3823 break;
3825 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3826 hci_disconn_loglink_complete_evt(hdev, skb);
3827 break;
3829 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3830 hci_disconn_phylink_complete_evt(hdev, skb);
3831 break;
3833 case HCI_EV_NUM_COMP_BLOCKS:
3834 hci_num_comp_blocks_evt(hdev, skb);
3835 break;
3837 default:
3838 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3839 break;
3842 kfree_skb(skb);
3843 hdev->stat.evt_rx++;