nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / net / bluetooth / hci_event.c
bloba40170e022e84e5e88f4ccbe4eb8724d5411c49d
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 static int enable_le;
50 /* Handle HCI Event packets */
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
54 __u8 status = *((__u8 *) skb->data);
56 BT_DBG("%s status 0x%x", hdev->name, status);
58 if (status)
59 return;
61 if (test_bit(HCI_MGMT, &hdev->flags) &&
62 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
63 mgmt_discovering(hdev->id, 0);
65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
67 hci_conn_check_pending(hdev);
70 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 __u8 status = *((__u8 *) skb->data);
74 BT_DBG("%s status 0x%x", hdev->name, status);
76 if (status)
77 return;
79 if (test_bit(HCI_MGMT, &hdev->flags) &&
80 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
81 mgmt_discovering(hdev->id, 0);
83 hci_conn_check_pending(hdev);
86 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
88 BT_DBG("%s", hdev->name);
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
96 BT_DBG("%s status 0x%x", hdev->name, rp->status);
98 if (rp->status)
99 return;
101 hci_dev_lock(hdev);
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 conn->link_mode &= ~HCI_LM_MASTER;
107 else
108 conn->link_mode |= HCI_LM_MASTER;
111 hci_dev_unlock(hdev);
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
119 BT_DBG("%s status 0x%x", hdev->name, rp->status);
121 if (rp->status)
122 return;
124 hci_dev_lock(hdev);
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
130 hci_dev_unlock(hdev);
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
139 BT_DBG("%s status 0x%x", hdev->name, rp->status);
141 if (rp->status)
142 return;
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
148 hci_dev_lock(hdev);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
154 hci_dev_unlock(hdev);
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161 BT_DBG("%s status 0x%x", hdev->name, rp->status);
163 if (rp->status)
164 return;
166 hdev->link_policy = __le16_to_cpu(rp->policy);
169 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
171 __u8 status = *((__u8 *) skb->data);
172 void *sent;
174 BT_DBG("%s status 0x%x", hdev->name, status);
176 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
177 if (!sent)
178 return;
180 if (!status)
181 hdev->link_policy = get_unaligned_le16(sent);
183 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
188 __u8 status = *((__u8 *) skb->data);
190 BT_DBG("%s status 0x%x", hdev->name, status);
192 clear_bit(HCI_RESET, &hdev->flags);
194 hci_req_complete(hdev, HCI_OP_RESET, status);
197 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
199 __u8 status = *((__u8 *) skb->data);
200 void *sent;
202 BT_DBG("%s status 0x%x", hdev->name, status);
204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
205 if (!sent)
206 return;
208 if (test_bit(HCI_MGMT, &hdev->flags))
209 mgmt_set_local_name_complete(hdev->id, sent, status);
211 if (status)
212 return;
214 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
219 struct hci_rp_read_local_name *rp = (void *) skb->data;
221 BT_DBG("%s status 0x%x", hdev->name, rp->status);
223 if (rp->status)
224 return;
226 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
229 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
231 __u8 status = *((__u8 *) skb->data);
232 void *sent;
234 BT_DBG("%s status 0x%x", hdev->name, status);
236 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
237 if (!sent)
238 return;
240 if (!status) {
241 __u8 param = *((__u8 *) sent);
243 if (param == AUTH_ENABLED)
244 set_bit(HCI_AUTH, &hdev->flags);
245 else
246 clear_bit(HCI_AUTH, &hdev->flags);
249 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
252 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
254 __u8 status = *((__u8 *) skb->data);
255 void *sent;
257 BT_DBG("%s status 0x%x", hdev->name, status);
259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
260 if (!sent)
261 return;
263 if (!status) {
264 __u8 param = *((__u8 *) sent);
266 if (param)
267 set_bit(HCI_ENCRYPT, &hdev->flags);
268 else
269 clear_bit(HCI_ENCRYPT, &hdev->flags);
272 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
275 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
277 __u8 status = *((__u8 *) skb->data);
278 void *sent;
280 BT_DBG("%s status 0x%x", hdev->name, status);
282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
283 if (!sent)
284 return;
286 if (!status) {
287 __u8 param = *((__u8 *) sent);
288 int old_pscan, old_iscan;
290 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
291 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
293 if (param & SCAN_INQUIRY) {
294 set_bit(HCI_ISCAN, &hdev->flags);
295 if (!old_iscan)
296 mgmt_discoverable(hdev->id, 1);
297 } else if (old_iscan)
298 mgmt_discoverable(hdev->id, 0);
300 if (param & SCAN_PAGE) {
301 set_bit(HCI_PSCAN, &hdev->flags);
302 if (!old_pscan)
303 mgmt_connectable(hdev->id, 1);
304 } else if (old_pscan)
305 mgmt_connectable(hdev->id, 0);
308 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
311 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
313 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
315 BT_DBG("%s status 0x%x", hdev->name, rp->status);
317 if (rp->status)
318 return;
320 memcpy(hdev->dev_class, rp->dev_class, 3);
322 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
323 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
326 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
328 __u8 status = *((__u8 *) skb->data);
329 void *sent;
331 BT_DBG("%s status 0x%x", hdev->name, status);
333 if (status)
334 return;
336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
337 if (!sent)
338 return;
340 memcpy(hdev->dev_class, sent, 3);
343 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
345 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
346 __u16 setting;
348 BT_DBG("%s status 0x%x", hdev->name, rp->status);
350 if (rp->status)
351 return;
353 setting = __le16_to_cpu(rp->voice_setting);
355 if (hdev->voice_setting == setting)
356 return;
358 hdev->voice_setting = setting;
360 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
362 if (hdev->notify) {
363 tasklet_disable(&hdev->tx_task);
364 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
365 tasklet_enable(&hdev->tx_task);
369 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
371 __u8 status = *((__u8 *) skb->data);
372 __u16 setting;
373 void *sent;
375 BT_DBG("%s status 0x%x", hdev->name, status);
377 if (status)
378 return;
380 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
381 if (!sent)
382 return;
384 setting = get_unaligned_le16(sent);
386 if (hdev->voice_setting == setting)
387 return;
389 hdev->voice_setting = setting;
391 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
393 if (hdev->notify) {
394 tasklet_disable(&hdev->tx_task);
395 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
396 tasklet_enable(&hdev->tx_task);
400 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
402 __u8 status = *((__u8 *) skb->data);
404 BT_DBG("%s status 0x%x", hdev->name, status);
406 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
409 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
411 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
413 BT_DBG("%s status 0x%x", hdev->name, rp->status);
415 if (rp->status)
416 return;
418 hdev->ssp_mode = rp->mode;
421 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
423 __u8 status = *((__u8 *) skb->data);
424 void *sent;
426 BT_DBG("%s status 0x%x", hdev->name, status);
428 if (status)
429 return;
431 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
432 if (!sent)
433 return;
435 hdev->ssp_mode = *((__u8 *) sent);
438 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
440 if (hdev->features[6] & LMP_EXT_INQ)
441 return 2;
443 if (hdev->features[3] & LMP_RSSI_INQ)
444 return 1;
446 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
447 hdev->lmp_subver == 0x0757)
448 return 1;
450 if (hdev->manufacturer == 15) {
451 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
452 return 1;
453 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
454 return 1;
455 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
456 return 1;
459 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
460 hdev->lmp_subver == 0x1805)
461 return 1;
463 return 0;
466 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
468 u8 mode;
470 mode = hci_get_inquiry_mode(hdev);
472 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
475 static void hci_setup_event_mask(struct hci_dev *hdev)
477 /* The second byte is 0xff instead of 0x9f (two reserved bits
478 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
479 * command otherwise */
480 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
482 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
483 * any event mask for pre 1.2 devices */
484 if (hdev->lmp_ver <= 1)
485 return;
487 events[4] |= 0x01; /* Flow Specification Complete */
488 events[4] |= 0x02; /* Inquiry Result with RSSI */
489 events[4] |= 0x04; /* Read Remote Extended Features Complete */
490 events[5] |= 0x08; /* Synchronous Connection Complete */
491 events[5] |= 0x10; /* Synchronous Connection Changed */
493 if (hdev->features[3] & LMP_RSSI_INQ)
494 events[4] |= 0x04; /* Inquiry Result with RSSI */
496 if (hdev->features[5] & LMP_SNIFF_SUBR)
497 events[5] |= 0x20; /* Sniff Subrating */
499 if (hdev->features[5] & LMP_PAUSE_ENC)
500 events[5] |= 0x80; /* Encryption Key Refresh Complete */
502 if (hdev->features[6] & LMP_EXT_INQ)
503 events[5] |= 0x40; /* Extended Inquiry Result */
505 if (hdev->features[6] & LMP_NO_FLUSH)
506 events[7] |= 0x01; /* Enhanced Flush Complete */
508 if (hdev->features[7] & LMP_LSTO)
509 events[6] |= 0x80; /* Link Supervision Timeout Changed */
511 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
512 events[6] |= 0x01; /* IO Capability Request */
513 events[6] |= 0x02; /* IO Capability Response */
514 events[6] |= 0x04; /* User Confirmation Request */
515 events[6] |= 0x08; /* User Passkey Request */
516 events[6] |= 0x10; /* Remote OOB Data Request */
517 events[6] |= 0x20; /* Simple Pairing Complete */
518 events[7] |= 0x04; /* User Passkey Notification */
519 events[7] |= 0x08; /* Keypress Notification */
520 events[7] |= 0x10; /* Remote Host Supported
521 * Features Notification */
524 if (hdev->features[4] & LMP_LE)
525 events[7] |= 0x20; /* LE Meta-Event */
527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
530 static void hci_set_le_support(struct hci_dev *hdev)
532 struct hci_cp_write_le_host_supported cp;
534 memset(&cp, 0, sizeof(cp));
536 if (enable_le) {
537 cp.le = 1;
538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
544 static void hci_setup(struct hci_dev *hdev)
546 hci_setup_event_mask(hdev);
548 if (hdev->lmp_ver > 1)
549 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
551 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
552 u8 mode = 0x01;
553 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
556 if (hdev->features[3] & LMP_RSSI_INQ)
557 hci_setup_inquiry_mode(hdev);
559 if (hdev->features[7] & LMP_INQ_TX_PWR)
560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
562 if (hdev->features[7] & LMP_EXTFEATURES) {
563 struct hci_cp_read_local_ext_features cp;
565 cp.page = 0x01;
566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
567 sizeof(cp), &cp);
570 if (hdev->features[4] & LMP_LE)
571 hci_set_le_support(hdev);
574 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
576 struct hci_rp_read_local_version *rp = (void *) skb->data;
578 BT_DBG("%s status 0x%x", hdev->name, rp->status);
580 if (rp->status)
581 return;
583 hdev->hci_ver = rp->hci_ver;
584 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
585 hdev->lmp_ver = rp->lmp_ver;
586 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
587 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
589 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
590 hdev->manufacturer,
591 hdev->hci_ver, hdev->hci_rev);
593 if (test_bit(HCI_INIT, &hdev->flags))
594 hci_setup(hdev);
597 static void hci_setup_link_policy(struct hci_dev *hdev)
599 u16 link_policy = 0;
601 if (hdev->features[0] & LMP_RSWITCH)
602 link_policy |= HCI_LP_RSWITCH;
603 if (hdev->features[0] & LMP_HOLD)
604 link_policy |= HCI_LP_HOLD;
605 if (hdev->features[0] & LMP_SNIFF)
606 link_policy |= HCI_LP_SNIFF;
607 if (hdev->features[1] & LMP_PARK)
608 link_policy |= HCI_LP_PARK;
610 link_policy = cpu_to_le16(link_policy);
611 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
612 sizeof(link_policy), &link_policy);
615 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
617 struct hci_rp_read_local_commands *rp = (void *) skb->data;
619 BT_DBG("%s status 0x%x", hdev->name, rp->status);
621 if (rp->status)
622 goto done;
624 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
626 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
627 hci_setup_link_policy(hdev);
629 done:
630 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
633 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
635 struct hci_rp_read_local_features *rp = (void *) skb->data;
637 BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 if (rp->status)
640 return;
642 memcpy(hdev->features, rp->features, 8);
644 /* Adjust default settings according to features
645 * supported by device. */
647 if (hdev->features[0] & LMP_3SLOT)
648 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
650 if (hdev->features[0] & LMP_5SLOT)
651 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
653 if (hdev->features[1] & LMP_HV2) {
654 hdev->pkt_type |= (HCI_HV2);
655 hdev->esco_type |= (ESCO_HV2);
658 if (hdev->features[1] & LMP_HV3) {
659 hdev->pkt_type |= (HCI_HV3);
660 hdev->esco_type |= (ESCO_HV3);
663 if (hdev->features[3] & LMP_ESCO)
664 hdev->esco_type |= (ESCO_EV3);
666 if (hdev->features[4] & LMP_EV4)
667 hdev->esco_type |= (ESCO_EV4);
669 if (hdev->features[4] & LMP_EV5)
670 hdev->esco_type |= (ESCO_EV5);
672 if (hdev->features[5] & LMP_EDR_ESCO_2M)
673 hdev->esco_type |= (ESCO_2EV3);
675 if (hdev->features[5] & LMP_EDR_ESCO_3M)
676 hdev->esco_type |= (ESCO_3EV3);
678 if (hdev->features[5] & LMP_EDR_3S_ESCO)
679 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
681 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
682 hdev->features[0], hdev->features[1],
683 hdev->features[2], hdev->features[3],
684 hdev->features[4], hdev->features[5],
685 hdev->features[6], hdev->features[7]);
688 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
689 struct sk_buff *skb)
691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
693 BT_DBG("%s status 0x%x", hdev->name, rp->status);
695 if (rp->status)
696 return;
698 memcpy(hdev->extfeatures, rp->features, 8);
700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
703 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
705 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
707 BT_DBG("%s status 0x%x", hdev->name, rp->status);
709 if (rp->status)
710 return;
712 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
713 hdev->sco_mtu = rp->sco_mtu;
714 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
715 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
717 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
718 hdev->sco_mtu = 64;
719 hdev->sco_pkts = 8;
722 hdev->acl_cnt = hdev->acl_pkts;
723 hdev->sco_cnt = hdev->sco_pkts;
725 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
726 hdev->acl_mtu, hdev->acl_pkts,
727 hdev->sco_mtu, hdev->sco_pkts);
730 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
732 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
734 BT_DBG("%s status 0x%x", hdev->name, rp->status);
736 if (!rp->status)
737 bacpy(&hdev->bdaddr, &rp->bdaddr);
739 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
742 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
744 __u8 status = *((__u8 *) skb->data);
746 BT_DBG("%s status 0x%x", hdev->name, status);
748 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
751 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
752 struct sk_buff *skb)
754 __u8 status = *((__u8 *) skb->data);
756 BT_DBG("%s status 0x%x", hdev->name, status);
758 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
761 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
763 __u8 status = *((__u8 *) skb->data);
765 BT_DBG("%s status 0x%x", hdev->name, status);
767 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
770 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
771 struct sk_buff *skb)
773 __u8 status = *((__u8 *) skb->data);
775 BT_DBG("%s status 0x%x", hdev->name, status);
777 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
780 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
781 struct sk_buff *skb)
783 __u8 status = *((__u8 *) skb->data);
785 BT_DBG("%s status 0x%x", hdev->name, status);
787 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
790 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
792 __u8 status = *((__u8 *) skb->data);
794 BT_DBG("%s status 0x%x", hdev->name, status);
796 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
799 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
801 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
802 struct hci_cp_pin_code_reply *cp;
803 struct hci_conn *conn;
805 BT_DBG("%s status 0x%x", hdev->name, rp->status);
807 if (test_bit(HCI_MGMT, &hdev->flags))
808 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
810 if (rp->status != 0)
811 return;
813 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
814 if (!cp)
815 return;
817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
818 if (conn)
819 conn->pin_length = cp->pin_len;
822 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
824 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
826 BT_DBG("%s status 0x%x", hdev->name, rp->status);
828 if (test_bit(HCI_MGMT, &hdev->flags))
829 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
830 rp->status);
832 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
833 struct sk_buff *skb)
835 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
837 BT_DBG("%s status 0x%x", hdev->name, rp->status);
839 if (rp->status)
840 return;
842 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
843 hdev->le_pkts = rp->le_max_pkt;
845 hdev->le_cnt = hdev->le_pkts;
847 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
849 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
852 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
854 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
856 BT_DBG("%s status 0x%x", hdev->name, rp->status);
858 if (test_bit(HCI_MGMT, &hdev->flags))
859 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
860 rp->status);
863 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
864 struct sk_buff *skb)
866 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
868 BT_DBG("%s status 0x%x", hdev->name, rp->status);
870 if (test_bit(HCI_MGMT, &hdev->flags))
871 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
872 rp->status);
875 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
876 struct sk_buff *skb)
878 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
880 BT_DBG("%s status 0x%x", hdev->name, rp->status);
882 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
883 rp->randomizer, rp->status);
886 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
887 struct sk_buff *skb)
889 struct hci_cp_le_set_scan_enable *cp;
890 __u8 status = *((__u8 *) skb->data);
892 BT_DBG("%s status 0x%x", hdev->name, status);
894 if (status)
895 return;
897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
898 if (!cp)
899 return;
901 hci_dev_lock(hdev);
903 if (cp->enable == 0x01) {
904 del_timer(&hdev->adv_timer);
905 hci_adv_entries_clear(hdev);
906 } else if (cp->enable == 0x00) {
907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
910 hci_dev_unlock(hdev);
913 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
915 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
917 BT_DBG("%s status 0x%x", hdev->name, rp->status);
919 if (rp->status)
920 return;
922 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
925 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
927 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
929 BT_DBG("%s status 0x%x", hdev->name, rp->status);
931 if (rp->status)
932 return;
934 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
937 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
938 struct sk_buff *skb)
940 struct hci_cp_read_local_ext_features cp;
941 __u8 status = *((__u8 *) skb->data);
943 BT_DBG("%s status 0x%x", hdev->name, status);
945 if (status)
946 return;
948 cp.page = 0x01;
949 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
952 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
954 BT_DBG("%s status 0x%x", hdev->name, status);
956 if (status) {
957 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
958 hci_conn_check_pending(hdev);
959 return;
962 if (test_bit(HCI_MGMT, &hdev->flags) &&
963 !test_and_set_bit(HCI_INQUIRY,
964 &hdev->flags))
965 mgmt_discovering(hdev->id, 1);
968 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
970 struct hci_cp_create_conn *cp;
971 struct hci_conn *conn;
973 BT_DBG("%s status 0x%x", hdev->name, status);
975 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
976 if (!cp)
977 return;
979 hci_dev_lock(hdev);
981 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
983 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
985 if (status) {
986 if (conn && conn->state == BT_CONNECT) {
987 if (status != 0x0c || conn->attempt > 2) {
988 conn->state = BT_CLOSED;
989 hci_proto_connect_cfm(conn, status);
990 hci_conn_del(conn);
991 } else
992 conn->state = BT_CONNECT2;
994 } else {
995 if (!conn) {
996 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
997 if (conn) {
998 conn->out = 1;
999 conn->link_mode |= HCI_LM_MASTER;
1000 } else
1001 BT_ERR("No memory for new connection");
1005 hci_dev_unlock(hdev);
1008 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1010 struct hci_cp_add_sco *cp;
1011 struct hci_conn *acl, *sco;
1012 __u16 handle;
1014 BT_DBG("%s status 0x%x", hdev->name, status);
1016 if (!status)
1017 return;
1019 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1020 if (!cp)
1021 return;
1023 handle = __le16_to_cpu(cp->handle);
1025 BT_DBG("%s handle %d", hdev->name, handle);
1027 hci_dev_lock(hdev);
1029 acl = hci_conn_hash_lookup_handle(hdev, handle);
1030 if (acl) {
1031 sco = acl->link;
1032 if (sco) {
1033 sco->state = BT_CLOSED;
1035 hci_proto_connect_cfm(sco, status);
1036 hci_conn_del(sco);
1040 hci_dev_unlock(hdev);
1043 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1045 struct hci_cp_auth_requested *cp;
1046 struct hci_conn *conn;
1048 BT_DBG("%s status 0x%x", hdev->name, status);
1050 if (!status)
1051 return;
1053 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1054 if (!cp)
1055 return;
1057 hci_dev_lock(hdev);
1059 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1060 if (conn) {
1061 if (conn->state == BT_CONFIG) {
1062 hci_proto_connect_cfm(conn, status);
1063 hci_conn_put(conn);
1067 hci_dev_unlock(hdev);
1070 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1072 struct hci_cp_set_conn_encrypt *cp;
1073 struct hci_conn *conn;
1075 BT_DBG("%s status 0x%x", hdev->name, status);
1077 if (!status)
1078 return;
1080 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1081 if (!cp)
1082 return;
1084 hci_dev_lock(hdev);
1086 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1087 if (conn) {
1088 if (conn->state == BT_CONFIG) {
1089 hci_proto_connect_cfm(conn, status);
1090 hci_conn_put(conn);
1094 hci_dev_unlock(hdev);
1097 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1098 struct hci_conn *conn)
1100 if (conn->state != BT_CONFIG || !conn->out)
1101 return 0;
1103 if (conn->pending_sec_level == BT_SECURITY_SDP)
1104 return 0;
1106 /* Only request authentication for SSP connections or non-SSP
1107 * devices with sec_level HIGH */
1108 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1109 conn->pending_sec_level != BT_SECURITY_HIGH)
1110 return 0;
1112 return 1;
1115 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1117 struct hci_cp_remote_name_req *cp;
1118 struct hci_conn *conn;
1120 BT_DBG("%s status 0x%x", hdev->name, status);
1122 /* If successful wait for the name req complete event before
1123 * checking for the need to do authentication */
1124 if (!status)
1125 return;
1127 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1128 if (!cp)
1129 return;
1131 hci_dev_lock(hdev);
1133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1134 if (!conn)
1135 goto unlock;
1137 if (!hci_outgoing_auth_needed(hdev, conn))
1138 goto unlock;
1140 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1141 struct hci_cp_auth_requested cp;
1142 cp.handle = __cpu_to_le16(conn->handle);
1143 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1146 unlock:
1147 hci_dev_unlock(hdev);
1150 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1152 struct hci_cp_read_remote_features *cp;
1153 struct hci_conn *conn;
1155 BT_DBG("%s status 0x%x", hdev->name, status);
1157 if (!status)
1158 return;
1160 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1161 if (!cp)
1162 return;
1164 hci_dev_lock(hdev);
1166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1167 if (conn) {
1168 if (conn->state == BT_CONFIG) {
1169 hci_proto_connect_cfm(conn, status);
1170 hci_conn_put(conn);
1174 hci_dev_unlock(hdev);
1177 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1179 struct hci_cp_read_remote_ext_features *cp;
1180 struct hci_conn *conn;
1182 BT_DBG("%s status 0x%x", hdev->name, status);
1184 if (!status)
1185 return;
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1188 if (!cp)
1189 return;
1191 hci_dev_lock(hdev);
1193 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1194 if (conn) {
1195 if (conn->state == BT_CONFIG) {
1196 hci_proto_connect_cfm(conn, status);
1197 hci_conn_put(conn);
1201 hci_dev_unlock(hdev);
1204 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1206 struct hci_cp_setup_sync_conn *cp;
1207 struct hci_conn *acl, *sco;
1208 __u16 handle;
1210 BT_DBG("%s status 0x%x", hdev->name, status);
1212 if (!status)
1213 return;
1215 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1216 if (!cp)
1217 return;
1219 handle = __le16_to_cpu(cp->handle);
1221 BT_DBG("%s handle %d", hdev->name, handle);
1223 hci_dev_lock(hdev);
1225 acl = hci_conn_hash_lookup_handle(hdev, handle);
1226 if (acl) {
1227 sco = acl->link;
1228 if (sco) {
1229 sco->state = BT_CLOSED;
1231 hci_proto_connect_cfm(sco, status);
1232 hci_conn_del(sco);
1236 hci_dev_unlock(hdev);
1239 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1241 struct hci_cp_sniff_mode *cp;
1242 struct hci_conn *conn;
1244 BT_DBG("%s status 0x%x", hdev->name, status);
1246 if (!status)
1247 return;
1249 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1250 if (!cp)
1251 return;
1253 hci_dev_lock(hdev);
1255 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1256 if (conn) {
1257 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1259 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1260 hci_sco_setup(conn, status);
1263 hci_dev_unlock(hdev);
1266 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1268 struct hci_cp_exit_sniff_mode *cp;
1269 struct hci_conn *conn;
1271 BT_DBG("%s status 0x%x", hdev->name, status);
1273 if (!status)
1274 return;
1276 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1277 if (!cp)
1278 return;
1280 hci_dev_lock(hdev);
1282 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1283 if (conn) {
1284 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1286 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1287 hci_sco_setup(conn, status);
1290 hci_dev_unlock(hdev);
1293 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1295 struct hci_cp_le_create_conn *cp;
1296 struct hci_conn *conn;
1298 BT_DBG("%s status 0x%x", hdev->name, status);
1300 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1301 if (!cp)
1302 return;
1304 hci_dev_lock(hdev);
1306 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1308 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1309 conn);
1311 if (status) {
1312 if (conn && conn->state == BT_CONNECT) {
1313 conn->state = BT_CLOSED;
1314 hci_proto_connect_cfm(conn, status);
1315 hci_conn_del(conn);
1317 } else {
1318 if (!conn) {
1319 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1320 if (conn) {
1321 conn->dst_type = cp->peer_addr_type;
1322 conn->out = 1;
1323 } else {
1324 BT_ERR("No memory for new connection");
1329 hci_dev_unlock(hdev);
1332 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1334 BT_DBG("%s status 0x%x", hdev->name, status);
1337 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1339 __u8 status = *((__u8 *) skb->data);
1341 BT_DBG("%s status %d", hdev->name, status);
1343 if (test_bit(HCI_MGMT, &hdev->flags) &&
1344 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1345 mgmt_discovering(hdev->id, 0);
1347 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1349 hci_conn_check_pending(hdev);
1352 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1354 struct inquiry_data data;
1355 struct inquiry_info *info = (void *) (skb->data + 1);
1356 int num_rsp = *((__u8 *) skb->data);
1358 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1360 if (!num_rsp)
1361 return;
1363 hci_dev_lock(hdev);
1365 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1367 if (test_bit(HCI_MGMT, &hdev->flags))
1368 mgmt_discovering(hdev->id, 1);
1371 for (; num_rsp; num_rsp--, info++) {
1372 bacpy(&data.bdaddr, &info->bdaddr);
1373 data.pscan_rep_mode = info->pscan_rep_mode;
1374 data.pscan_period_mode = info->pscan_period_mode;
1375 data.pscan_mode = info->pscan_mode;
1376 memcpy(data.dev_class, info->dev_class, 3);
1377 data.clock_offset = info->clock_offset;
1378 data.rssi = 0x00;
1379 data.ssp_mode = 0x00;
1380 hci_inquiry_cache_update(hdev, &data);
1381 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1382 NULL);
1385 hci_dev_unlock(hdev);
1388 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1390 struct hci_ev_conn_complete *ev = (void *) skb->data;
1391 struct hci_conn *conn;
1393 BT_DBG("%s", hdev->name);
1395 hci_dev_lock(hdev);
1397 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1398 if (!conn) {
1399 if (ev->link_type != SCO_LINK)
1400 goto unlock;
1402 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1403 if (!conn)
1404 goto unlock;
1406 conn->type = SCO_LINK;
1409 if (!ev->status) {
1410 conn->handle = __le16_to_cpu(ev->handle);
1412 if (conn->type == ACL_LINK) {
1413 conn->state = BT_CONFIG;
1414 hci_conn_hold(conn);
1415 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1416 mgmt_connected(hdev->id, &ev->bdaddr);
1417 } else
1418 conn->state = BT_CONNECTED;
1420 hci_conn_hold_device(conn);
1421 hci_conn_add_sysfs(conn);
1423 if (test_bit(HCI_AUTH, &hdev->flags))
1424 conn->link_mode |= HCI_LM_AUTH;
1426 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1427 conn->link_mode |= HCI_LM_ENCRYPT;
1429 /* Get remote features */
1430 if (conn->type == ACL_LINK) {
1431 struct hci_cp_read_remote_features cp;
1432 cp.handle = ev->handle;
1433 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1434 sizeof(cp), &cp);
1437 /* Set packet type for incoming connection */
1438 if (!conn->out && hdev->hci_ver < 3) {
1439 struct hci_cp_change_conn_ptype cp;
1440 cp.handle = ev->handle;
1441 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1442 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1443 sizeof(cp), &cp);
1445 } else {
1446 conn->state = BT_CLOSED;
1447 if (conn->type == ACL_LINK)
1448 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
1451 if (conn->type == ACL_LINK)
1452 hci_sco_setup(conn, ev->status);
1454 if (ev->status) {
1455 hci_proto_connect_cfm(conn, ev->status);
1456 hci_conn_del(conn);
1457 } else if (ev->link_type != ACL_LINK)
1458 hci_proto_connect_cfm(conn, ev->status);
1460 unlock:
1461 hci_dev_unlock(hdev);
1463 hci_conn_check_pending(hdev);
1466 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1468 struct hci_ev_conn_request *ev = (void *) skb->data;
1469 int mask = hdev->link_mode;
1471 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1472 batostr(&ev->bdaddr), ev->link_type);
1474 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1476 if ((mask & HCI_LM_ACCEPT) &&
1477 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1478 /* Connection accepted */
1479 struct inquiry_entry *ie;
1480 struct hci_conn *conn;
1482 hci_dev_lock(hdev);
1484 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1485 if (ie)
1486 memcpy(ie->data.dev_class, ev->dev_class, 3);
1488 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1489 if (!conn) {
1490 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1491 if (!conn) {
1492 BT_ERR("No memory for new connection");
1493 hci_dev_unlock(hdev);
1494 return;
1498 memcpy(conn->dev_class, ev->dev_class, 3);
1499 conn->state = BT_CONNECT;
1501 hci_dev_unlock(hdev);
1503 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1504 struct hci_cp_accept_conn_req cp;
1506 bacpy(&cp.bdaddr, &ev->bdaddr);
1508 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1509 cp.role = 0x00; /* Become master */
1510 else
1511 cp.role = 0x01; /* Remain slave */
1513 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1514 sizeof(cp), &cp);
1515 } else {
1516 struct hci_cp_accept_sync_conn_req cp;
1518 bacpy(&cp.bdaddr, &ev->bdaddr);
1519 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1521 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1522 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1523 cp.max_latency = cpu_to_le16(0xffff);
1524 cp.content_format = cpu_to_le16(hdev->voice_setting);
1525 cp.retrans_effort = 0xff;
1527 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1528 sizeof(cp), &cp);
1530 } else {
1531 /* Connection rejected */
1532 struct hci_cp_reject_conn_req cp;
1534 bacpy(&cp.bdaddr, &ev->bdaddr);
1535 cp.reason = 0x0f;
1536 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1540 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1542 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1543 struct hci_conn *conn;
1545 BT_DBG("%s status %d", hdev->name, ev->status);
1547 if (ev->status) {
1548 mgmt_disconnect_failed(hdev->id);
1549 return;
1552 hci_dev_lock(hdev);
1554 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1555 if (!conn)
1556 goto unlock;
1558 conn->state = BT_CLOSED;
1560 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1561 mgmt_disconnected(hdev->id, &conn->dst);
1563 hci_proto_disconn_cfm(conn, ev->reason);
1564 hci_conn_del(conn);
1566 unlock:
1567 hci_dev_unlock(hdev);
1570 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1572 struct hci_ev_auth_complete *ev = (void *) skb->data;
1573 struct hci_conn *conn;
1575 BT_DBG("%s status %d", hdev->name, ev->status);
1577 hci_dev_lock(hdev);
1579 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1580 if (!conn)
1581 goto unlock;
1583 if (!ev->status) {
1584 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1585 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1586 BT_INFO("re-auth of legacy device is not possible.");
1587 } else {
1588 conn->link_mode |= HCI_LM_AUTH;
1589 conn->sec_level = conn->pending_sec_level;
1591 } else {
1592 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1595 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1596 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1598 if (conn->state == BT_CONFIG) {
1599 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1600 struct hci_cp_set_conn_encrypt cp;
1601 cp.handle = ev->handle;
1602 cp.encrypt = 0x01;
1603 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1604 &cp);
1605 } else {
1606 conn->state = BT_CONNECTED;
1607 hci_proto_connect_cfm(conn, ev->status);
1608 hci_conn_put(conn);
1610 } else {
1611 hci_auth_cfm(conn, ev->status);
1613 hci_conn_hold(conn);
1614 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1615 hci_conn_put(conn);
1618 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1619 if (!ev->status) {
1620 struct hci_cp_set_conn_encrypt cp;
1621 cp.handle = ev->handle;
1622 cp.encrypt = 0x01;
1623 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1624 &cp);
1625 } else {
1626 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1627 hci_encrypt_cfm(conn, ev->status, 0x00);
1631 unlock:
1632 hci_dev_unlock(hdev);
1635 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1637 struct hci_ev_remote_name *ev = (void *) skb->data;
1638 struct hci_conn *conn;
1640 BT_DBG("%s", hdev->name);
1642 hci_conn_check_pending(hdev);
1644 hci_dev_lock(hdev);
1646 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1647 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1650 if (!conn)
1651 goto unlock;
1653 if (!hci_outgoing_auth_needed(hdev, conn))
1654 goto unlock;
1656 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1657 struct hci_cp_auth_requested cp;
1658 cp.handle = __cpu_to_le16(conn->handle);
1659 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1662 unlock:
1663 hci_dev_unlock(hdev);
1666 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1668 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1669 struct hci_conn *conn;
1671 BT_DBG("%s status %d", hdev->name, ev->status);
1673 hci_dev_lock(hdev);
1675 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1676 if (conn) {
1677 if (!ev->status) {
1678 if (ev->encrypt) {
1679 /* Encryption implies authentication */
1680 conn->link_mode |= HCI_LM_AUTH;
1681 conn->link_mode |= HCI_LM_ENCRYPT;
1682 conn->sec_level = conn->pending_sec_level;
1683 } else
1684 conn->link_mode &= ~HCI_LM_ENCRYPT;
1687 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1689 if (conn->state == BT_CONFIG) {
1690 if (!ev->status)
1691 conn->state = BT_CONNECTED;
1693 hci_proto_connect_cfm(conn, ev->status);
1694 hci_conn_put(conn);
1695 } else
1696 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1699 hci_dev_unlock(hdev);
1702 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1704 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1705 struct hci_conn *conn;
1707 BT_DBG("%s status %d", hdev->name, ev->status);
1709 hci_dev_lock(hdev);
1711 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1712 if (conn) {
1713 if (!ev->status)
1714 conn->link_mode |= HCI_LM_SECURE;
1716 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1718 hci_key_change_cfm(conn, ev->status);
1721 hci_dev_unlock(hdev);
1724 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1726 struct hci_ev_remote_features *ev = (void *) skb->data;
1727 struct hci_conn *conn;
1729 BT_DBG("%s status %d", hdev->name, ev->status);
1731 hci_dev_lock(hdev);
1733 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1734 if (!conn)
1735 goto unlock;
1737 if (!ev->status)
1738 memcpy(conn->features, ev->features, 8);
1740 if (conn->state != BT_CONFIG)
1741 goto unlock;
1743 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1744 struct hci_cp_read_remote_ext_features cp;
1745 cp.handle = ev->handle;
1746 cp.page = 0x01;
1747 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1748 sizeof(cp), &cp);
1749 goto unlock;
1752 if (!ev->status) {
1753 struct hci_cp_remote_name_req cp;
1754 memset(&cp, 0, sizeof(cp));
1755 bacpy(&cp.bdaddr, &conn->dst);
1756 cp.pscan_rep_mode = 0x02;
1757 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1760 if (!hci_outgoing_auth_needed(hdev, conn)) {
1761 conn->state = BT_CONNECTED;
1762 hci_proto_connect_cfm(conn, ev->status);
1763 hci_conn_put(conn);
1766 unlock:
1767 hci_dev_unlock(hdev);
1770 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1772 BT_DBG("%s", hdev->name);
1775 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1777 BT_DBG("%s", hdev->name);
1780 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1782 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1783 __u16 opcode;
1785 skb_pull(skb, sizeof(*ev));
1787 opcode = __le16_to_cpu(ev->opcode);
1789 switch (opcode) {
1790 case HCI_OP_INQUIRY_CANCEL:
1791 hci_cc_inquiry_cancel(hdev, skb);
1792 break;
1794 case HCI_OP_EXIT_PERIODIC_INQ:
1795 hci_cc_exit_periodic_inq(hdev, skb);
1796 break;
1798 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1799 hci_cc_remote_name_req_cancel(hdev, skb);
1800 break;
1802 case HCI_OP_ROLE_DISCOVERY:
1803 hci_cc_role_discovery(hdev, skb);
1804 break;
1806 case HCI_OP_READ_LINK_POLICY:
1807 hci_cc_read_link_policy(hdev, skb);
1808 break;
1810 case HCI_OP_WRITE_LINK_POLICY:
1811 hci_cc_write_link_policy(hdev, skb);
1812 break;
1814 case HCI_OP_READ_DEF_LINK_POLICY:
1815 hci_cc_read_def_link_policy(hdev, skb);
1816 break;
1818 case HCI_OP_WRITE_DEF_LINK_POLICY:
1819 hci_cc_write_def_link_policy(hdev, skb);
1820 break;
1822 case HCI_OP_RESET:
1823 hci_cc_reset(hdev, skb);
1824 break;
1826 case HCI_OP_WRITE_LOCAL_NAME:
1827 hci_cc_write_local_name(hdev, skb);
1828 break;
1830 case HCI_OP_READ_LOCAL_NAME:
1831 hci_cc_read_local_name(hdev, skb);
1832 break;
1834 case HCI_OP_WRITE_AUTH_ENABLE:
1835 hci_cc_write_auth_enable(hdev, skb);
1836 break;
1838 case HCI_OP_WRITE_ENCRYPT_MODE:
1839 hci_cc_write_encrypt_mode(hdev, skb);
1840 break;
1842 case HCI_OP_WRITE_SCAN_ENABLE:
1843 hci_cc_write_scan_enable(hdev, skb);
1844 break;
1846 case HCI_OP_READ_CLASS_OF_DEV:
1847 hci_cc_read_class_of_dev(hdev, skb);
1848 break;
1850 case HCI_OP_WRITE_CLASS_OF_DEV:
1851 hci_cc_write_class_of_dev(hdev, skb);
1852 break;
1854 case HCI_OP_READ_VOICE_SETTING:
1855 hci_cc_read_voice_setting(hdev, skb);
1856 break;
1858 case HCI_OP_WRITE_VOICE_SETTING:
1859 hci_cc_write_voice_setting(hdev, skb);
1860 break;
1862 case HCI_OP_HOST_BUFFER_SIZE:
1863 hci_cc_host_buffer_size(hdev, skb);
1864 break;
1866 case HCI_OP_READ_SSP_MODE:
1867 hci_cc_read_ssp_mode(hdev, skb);
1868 break;
1870 case HCI_OP_WRITE_SSP_MODE:
1871 hci_cc_write_ssp_mode(hdev, skb);
1872 break;
1874 case HCI_OP_READ_LOCAL_VERSION:
1875 hci_cc_read_local_version(hdev, skb);
1876 break;
1878 case HCI_OP_READ_LOCAL_COMMANDS:
1879 hci_cc_read_local_commands(hdev, skb);
1880 break;
1882 case HCI_OP_READ_LOCAL_FEATURES:
1883 hci_cc_read_local_features(hdev, skb);
1884 break;
1886 case HCI_OP_READ_LOCAL_EXT_FEATURES:
1887 hci_cc_read_local_ext_features(hdev, skb);
1888 break;
1890 case HCI_OP_READ_BUFFER_SIZE:
1891 hci_cc_read_buffer_size(hdev, skb);
1892 break;
1894 case HCI_OP_READ_BD_ADDR:
1895 hci_cc_read_bd_addr(hdev, skb);
1896 break;
1898 case HCI_OP_WRITE_CA_TIMEOUT:
1899 hci_cc_write_ca_timeout(hdev, skb);
1900 break;
1902 case HCI_OP_DELETE_STORED_LINK_KEY:
1903 hci_cc_delete_stored_link_key(hdev, skb);
1904 break;
1906 case HCI_OP_SET_EVENT_MASK:
1907 hci_cc_set_event_mask(hdev, skb);
1908 break;
1910 case HCI_OP_WRITE_INQUIRY_MODE:
1911 hci_cc_write_inquiry_mode(hdev, skb);
1912 break;
1914 case HCI_OP_READ_INQ_RSP_TX_POWER:
1915 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1916 break;
1918 case HCI_OP_SET_EVENT_FLT:
1919 hci_cc_set_event_flt(hdev, skb);
1920 break;
1922 case HCI_OP_PIN_CODE_REPLY:
1923 hci_cc_pin_code_reply(hdev, skb);
1924 break;
1926 case HCI_OP_PIN_CODE_NEG_REPLY:
1927 hci_cc_pin_code_neg_reply(hdev, skb);
1928 break;
1930 case HCI_OP_READ_LOCAL_OOB_DATA:
1931 hci_cc_read_local_oob_data_reply(hdev, skb);
1932 break;
1934 case HCI_OP_LE_READ_BUFFER_SIZE:
1935 hci_cc_le_read_buffer_size(hdev, skb);
1936 break;
1938 case HCI_OP_USER_CONFIRM_REPLY:
1939 hci_cc_user_confirm_reply(hdev, skb);
1940 break;
1942 case HCI_OP_USER_CONFIRM_NEG_REPLY:
1943 hci_cc_user_confirm_neg_reply(hdev, skb);
1944 break;
1946 case HCI_OP_LE_SET_SCAN_ENABLE:
1947 hci_cc_le_set_scan_enable(hdev, skb);
1948 break;
1950 case HCI_OP_LE_LTK_REPLY:
1951 hci_cc_le_ltk_reply(hdev, skb);
1952 break;
1954 case HCI_OP_LE_LTK_NEG_REPLY:
1955 hci_cc_le_ltk_neg_reply(hdev, skb);
1956 break;
1958 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
1959 hci_cc_write_le_host_supported(hdev, skb);
1960 break;
1962 default:
1963 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1964 break;
1967 if (ev->opcode != HCI_OP_NOP)
1968 del_timer(&hdev->cmd_timer);
1970 if (ev->ncmd) {
1971 atomic_set(&hdev->cmd_cnt, 1);
1972 if (!skb_queue_empty(&hdev->cmd_q))
1973 tasklet_schedule(&hdev->cmd_task);
1977 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1979 struct hci_ev_cmd_status *ev = (void *) skb->data;
1980 __u16 opcode;
1982 skb_pull(skb, sizeof(*ev));
1984 opcode = __le16_to_cpu(ev->opcode);
1986 switch (opcode) {
1987 case HCI_OP_INQUIRY:
1988 hci_cs_inquiry(hdev, ev->status);
1989 break;
1991 case HCI_OP_CREATE_CONN:
1992 hci_cs_create_conn(hdev, ev->status);
1993 break;
1995 case HCI_OP_ADD_SCO:
1996 hci_cs_add_sco(hdev, ev->status);
1997 break;
1999 case HCI_OP_AUTH_REQUESTED:
2000 hci_cs_auth_requested(hdev, ev->status);
2001 break;
2003 case HCI_OP_SET_CONN_ENCRYPT:
2004 hci_cs_set_conn_encrypt(hdev, ev->status);
2005 break;
2007 case HCI_OP_REMOTE_NAME_REQ:
2008 hci_cs_remote_name_req(hdev, ev->status);
2009 break;
2011 case HCI_OP_READ_REMOTE_FEATURES:
2012 hci_cs_read_remote_features(hdev, ev->status);
2013 break;
2015 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2016 hci_cs_read_remote_ext_features(hdev, ev->status);
2017 break;
2019 case HCI_OP_SETUP_SYNC_CONN:
2020 hci_cs_setup_sync_conn(hdev, ev->status);
2021 break;
2023 case HCI_OP_SNIFF_MODE:
2024 hci_cs_sniff_mode(hdev, ev->status);
2025 break;
2027 case HCI_OP_EXIT_SNIFF_MODE:
2028 hci_cs_exit_sniff_mode(hdev, ev->status);
2029 break;
2031 case HCI_OP_DISCONNECT:
2032 if (ev->status != 0)
2033 mgmt_disconnect_failed(hdev->id);
2034 break;
2036 case HCI_OP_LE_CREATE_CONN:
2037 hci_cs_le_create_conn(hdev, ev->status);
2038 break;
2040 case HCI_OP_LE_START_ENC:
2041 hci_cs_le_start_enc(hdev, ev->status);
2042 break;
2044 default:
2045 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2046 break;
2049 if (ev->opcode != HCI_OP_NOP)
2050 del_timer(&hdev->cmd_timer);
2052 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2053 atomic_set(&hdev->cmd_cnt, 1);
2054 if (!skb_queue_empty(&hdev->cmd_q))
2055 tasklet_schedule(&hdev->cmd_task);
2059 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2061 struct hci_ev_role_change *ev = (void *) skb->data;
2062 struct hci_conn *conn;
2064 BT_DBG("%s status %d", hdev->name, ev->status);
2066 hci_dev_lock(hdev);
2068 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2069 if (conn) {
2070 if (!ev->status) {
2071 if (ev->role)
2072 conn->link_mode &= ~HCI_LM_MASTER;
2073 else
2074 conn->link_mode |= HCI_LM_MASTER;
2077 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2079 hci_role_switch_cfm(conn, ev->status, ev->role);
2082 hci_dev_unlock(hdev);
2085 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2087 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2088 __le16 *ptr;
2089 int i;
2091 skb_pull(skb, sizeof(*ev));
2093 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2095 if (skb->len < ev->num_hndl * 4) {
2096 BT_DBG("%s bad parameters", hdev->name);
2097 return;
2100 tasklet_disable(&hdev->tx_task);
2102 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
2103 struct hci_conn *conn;
2104 __u16 handle, count;
2106 handle = get_unaligned_le16(ptr++);
2107 count = get_unaligned_le16(ptr++);
2109 conn = hci_conn_hash_lookup_handle(hdev, handle);
2110 if (conn) {
2111 conn->sent -= count;
2113 if (conn->type == ACL_LINK) {
2114 hdev->acl_cnt += count;
2115 if (hdev->acl_cnt > hdev->acl_pkts)
2116 hdev->acl_cnt = hdev->acl_pkts;
2117 } else if (conn->type == LE_LINK) {
2118 if (hdev->le_pkts) {
2119 hdev->le_cnt += count;
2120 if (hdev->le_cnt > hdev->le_pkts)
2121 hdev->le_cnt = hdev->le_pkts;
2122 } else {
2123 hdev->acl_cnt += count;
2124 if (hdev->acl_cnt > hdev->acl_pkts)
2125 hdev->acl_cnt = hdev->acl_pkts;
2127 } else {
2128 hdev->sco_cnt += count;
2129 if (hdev->sco_cnt > hdev->sco_pkts)
2130 hdev->sco_cnt = hdev->sco_pkts;
2135 tasklet_schedule(&hdev->tx_task);
2137 tasklet_enable(&hdev->tx_task);
2140 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2142 struct hci_ev_mode_change *ev = (void *) skb->data;
2143 struct hci_conn *conn;
2145 BT_DBG("%s status %d", hdev->name, ev->status);
2147 hci_dev_lock(hdev);
2149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2150 if (conn) {
2151 conn->mode = ev->mode;
2152 conn->interval = __le16_to_cpu(ev->interval);
2154 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2155 if (conn->mode == HCI_CM_ACTIVE)
2156 conn->power_save = 1;
2157 else
2158 conn->power_save = 0;
2161 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2162 hci_sco_setup(conn, ev->status);
2165 hci_dev_unlock(hdev);
2168 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2170 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2171 struct hci_conn *conn;
2173 BT_DBG("%s", hdev->name);
2175 hci_dev_lock(hdev);
2177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2178 if (conn && conn->state == BT_CONNECTED) {
2179 hci_conn_hold(conn);
2180 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2181 hci_conn_put(conn);
2184 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2185 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2186 sizeof(ev->bdaddr), &ev->bdaddr);
2187 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2188 u8 secure;
2190 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2191 secure = 1;
2192 else
2193 secure = 0;
2195 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2198 hci_dev_unlock(hdev);
2201 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2203 struct hci_ev_link_key_req *ev = (void *) skb->data;
2204 struct hci_cp_link_key_reply cp;
2205 struct hci_conn *conn;
2206 struct link_key *key;
2208 BT_DBG("%s", hdev->name);
2210 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2211 return;
2213 hci_dev_lock(hdev);
2215 key = hci_find_link_key(hdev, &ev->bdaddr);
2216 if (!key) {
2217 BT_DBG("%s link key not found for %s", hdev->name,
2218 batostr(&ev->bdaddr));
2219 goto not_found;
2222 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2223 batostr(&ev->bdaddr));
2225 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2226 key->type == HCI_LK_DEBUG_COMBINATION) {
2227 BT_DBG("%s ignoring debug key", hdev->name);
2228 goto not_found;
2231 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2232 if (conn) {
2233 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2234 conn->auth_type != 0xff &&
2235 (conn->auth_type & 0x01)) {
2236 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2237 goto not_found;
2240 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2241 conn->pending_sec_level == BT_SECURITY_HIGH) {
2242 BT_DBG("%s ignoring key unauthenticated for high \
2243 security", hdev->name);
2244 goto not_found;
2247 conn->key_type = key->type;
2248 conn->pin_length = key->pin_len;
2251 bacpy(&cp.bdaddr, &ev->bdaddr);
2252 memcpy(cp.link_key, key->val, 16);
2254 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2256 hci_dev_unlock(hdev);
2258 return;
2260 not_found:
2261 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2262 hci_dev_unlock(hdev);
2265 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2267 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2268 struct hci_conn *conn;
2269 u8 pin_len = 0;
2271 BT_DBG("%s", hdev->name);
2273 hci_dev_lock(hdev);
2275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2276 if (conn) {
2277 hci_conn_hold(conn);
2278 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2279 pin_len = conn->pin_length;
2281 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2282 conn->key_type = ev->key_type;
2284 hci_conn_put(conn);
2287 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2288 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2289 ev->key_type, pin_len);
2291 hci_dev_unlock(hdev);
2294 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2296 struct hci_ev_clock_offset *ev = (void *) skb->data;
2297 struct hci_conn *conn;
2299 BT_DBG("%s status %d", hdev->name, ev->status);
2301 hci_dev_lock(hdev);
2303 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2304 if (conn && !ev->status) {
2305 struct inquiry_entry *ie;
2307 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2308 if (ie) {
2309 ie->data.clock_offset = ev->clock_offset;
2310 ie->timestamp = jiffies;
2314 hci_dev_unlock(hdev);
2317 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2319 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2320 struct hci_conn *conn;
2322 BT_DBG("%s status %d", hdev->name, ev->status);
2324 hci_dev_lock(hdev);
2326 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2327 if (conn && !ev->status)
2328 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2330 hci_dev_unlock(hdev);
2333 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2335 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2336 struct inquiry_entry *ie;
2338 BT_DBG("%s", hdev->name);
2340 hci_dev_lock(hdev);
2342 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2343 if (ie) {
2344 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2345 ie->timestamp = jiffies;
2348 hci_dev_unlock(hdev);
2351 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2353 struct inquiry_data data;
2354 int num_rsp = *((__u8 *) skb->data);
2356 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2358 if (!num_rsp)
2359 return;
2361 hci_dev_lock(hdev);
2363 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2365 if (test_bit(HCI_MGMT, &hdev->flags))
2366 mgmt_discovering(hdev->id, 1);
2369 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2370 struct inquiry_info_with_rssi_and_pscan_mode *info;
2371 info = (void *) (skb->data + 1);
2373 for (; num_rsp; num_rsp--, info++) {
2374 bacpy(&data.bdaddr, &info->bdaddr);
2375 data.pscan_rep_mode = info->pscan_rep_mode;
2376 data.pscan_period_mode = info->pscan_period_mode;
2377 data.pscan_mode = info->pscan_mode;
2378 memcpy(data.dev_class, info->dev_class, 3);
2379 data.clock_offset = info->clock_offset;
2380 data.rssi = info->rssi;
2381 data.ssp_mode = 0x00;
2382 hci_inquiry_cache_update(hdev, &data);
2383 mgmt_device_found(hdev->id, &info->bdaddr,
2384 info->dev_class, info->rssi,
2385 NULL);
2387 } else {
2388 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2390 for (; num_rsp; num_rsp--, info++) {
2391 bacpy(&data.bdaddr, &info->bdaddr);
2392 data.pscan_rep_mode = info->pscan_rep_mode;
2393 data.pscan_period_mode = info->pscan_period_mode;
2394 data.pscan_mode = 0x00;
2395 memcpy(data.dev_class, info->dev_class, 3);
2396 data.clock_offset = info->clock_offset;
2397 data.rssi = info->rssi;
2398 data.ssp_mode = 0x00;
2399 hci_inquiry_cache_update(hdev, &data);
2400 mgmt_device_found(hdev->id, &info->bdaddr,
2401 info->dev_class, info->rssi,
2402 NULL);
2406 hci_dev_unlock(hdev);
2409 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2411 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2412 struct hci_conn *conn;
2414 BT_DBG("%s", hdev->name);
2416 hci_dev_lock(hdev);
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2419 if (!conn)
2420 goto unlock;
2422 if (!ev->status && ev->page == 0x01) {
2423 struct inquiry_entry *ie;
2425 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2426 if (ie)
2427 ie->data.ssp_mode = (ev->features[0] & 0x01);
2429 conn->ssp_mode = (ev->features[0] & 0x01);
2432 if (conn->state != BT_CONFIG)
2433 goto unlock;
2435 if (!ev->status) {
2436 struct hci_cp_remote_name_req cp;
2437 memset(&cp, 0, sizeof(cp));
2438 bacpy(&cp.bdaddr, &conn->dst);
2439 cp.pscan_rep_mode = 0x02;
2440 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2443 if (!hci_outgoing_auth_needed(hdev, conn)) {
2444 conn->state = BT_CONNECTED;
2445 hci_proto_connect_cfm(conn, ev->status);
2446 hci_conn_put(conn);
2449 unlock:
2450 hci_dev_unlock(hdev);
2453 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2455 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2456 struct hci_conn *conn;
2458 BT_DBG("%s status %d", hdev->name, ev->status);
2460 hci_dev_lock(hdev);
2462 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2463 if (!conn) {
2464 if (ev->link_type == ESCO_LINK)
2465 goto unlock;
2467 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2468 if (!conn)
2469 goto unlock;
2471 conn->type = SCO_LINK;
2474 switch (ev->status) {
2475 case 0x00:
2476 conn->handle = __le16_to_cpu(ev->handle);
2477 conn->state = BT_CONNECTED;
2479 hci_conn_hold_device(conn);
2480 hci_conn_add_sysfs(conn);
2481 break;
2483 case 0x11: /* Unsupported Feature or Parameter Value */
2484 case 0x1c: /* SCO interval rejected */
2485 case 0x1a: /* Unsupported Remote Feature */
2486 case 0x1f: /* Unspecified error */
2487 if (conn->out && conn->attempt < 2) {
2488 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2489 (hdev->esco_type & EDR_ESCO_MASK);
2490 hci_setup_sync(conn, conn->link->handle);
2491 goto unlock;
2493 /* fall through */
2495 default:
2496 conn->state = BT_CLOSED;
2497 break;
2500 hci_proto_connect_cfm(conn, ev->status);
2501 if (ev->status)
2502 hci_conn_del(conn);
2504 unlock:
2505 hci_dev_unlock(hdev);
2508 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2510 BT_DBG("%s", hdev->name);
2513 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2515 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2517 BT_DBG("%s status %d", hdev->name, ev->status);
2520 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2522 struct inquiry_data data;
2523 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2524 int num_rsp = *((__u8 *) skb->data);
2526 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2528 if (!num_rsp)
2529 return;
2531 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2533 if (test_bit(HCI_MGMT, &hdev->flags))
2534 mgmt_discovering(hdev->id, 1);
2537 hci_dev_lock(hdev);
2539 for (; num_rsp; num_rsp--, info++) {
2540 bacpy(&data.bdaddr, &info->bdaddr);
2541 data.pscan_rep_mode = info->pscan_rep_mode;
2542 data.pscan_period_mode = info->pscan_period_mode;
2543 data.pscan_mode = 0x00;
2544 memcpy(data.dev_class, info->dev_class, 3);
2545 data.clock_offset = info->clock_offset;
2546 data.rssi = info->rssi;
2547 data.ssp_mode = 0x01;
2548 hci_inquiry_cache_update(hdev, &data);
2549 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2550 info->rssi, info->data);
2553 hci_dev_unlock(hdev);
2556 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2558 /* If remote requests dedicated bonding follow that lead */
2559 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2560 /* If both remote and local IO capabilities allow MITM
2561 * protection then require it, otherwise don't */
2562 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2563 return 0x02;
2564 else
2565 return 0x03;
2568 /* If remote requests no-bonding follow that lead */
2569 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2570 return conn->remote_auth | (conn->auth_type & 0x01);
2572 return conn->auth_type;
2575 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2577 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2578 struct hci_conn *conn;
2580 BT_DBG("%s", hdev->name);
2582 hci_dev_lock(hdev);
2584 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2585 if (!conn)
2586 goto unlock;
2588 hci_conn_hold(conn);
2590 if (!test_bit(HCI_MGMT, &hdev->flags))
2591 goto unlock;
2593 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2594 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2595 struct hci_cp_io_capability_reply cp;
2597 bacpy(&cp.bdaddr, &ev->bdaddr);
2598 cp.capability = conn->io_capability;
2599 conn->auth_type = hci_get_auth_req(conn);
2600 cp.authentication = conn->auth_type;
2602 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2603 hci_find_remote_oob_data(hdev, &conn->dst))
2604 cp.oob_data = 0x01;
2605 else
2606 cp.oob_data = 0x00;
2608 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2609 sizeof(cp), &cp);
2610 } else {
2611 struct hci_cp_io_capability_neg_reply cp;
2613 bacpy(&cp.bdaddr, &ev->bdaddr);
2614 cp.reason = 0x18; /* Pairing not allowed */
2616 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2617 sizeof(cp), &cp);
2620 unlock:
2621 hci_dev_unlock(hdev);
2624 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2626 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2627 struct hci_conn *conn;
2629 BT_DBG("%s", hdev->name);
2631 hci_dev_lock(hdev);
2633 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2634 if (!conn)
2635 goto unlock;
2637 conn->remote_cap = ev->capability;
2638 conn->remote_oob = ev->oob_data;
2639 conn->remote_auth = ev->authentication;
2641 unlock:
2642 hci_dev_unlock(hdev);
2645 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2646 struct sk_buff *skb)
2648 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2649 int loc_mitm, rem_mitm, confirm_hint = 0;
2650 struct hci_conn *conn;
2652 BT_DBG("%s", hdev->name);
2654 hci_dev_lock(hdev);
2656 if (!test_bit(HCI_MGMT, &hdev->flags))
2657 goto unlock;
2659 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2660 if (!conn)
2661 goto unlock;
2663 loc_mitm = (conn->auth_type & 0x01);
2664 rem_mitm = (conn->remote_auth & 0x01);
2666 /* If we require MITM but the remote device can't provide that
2667 * (it has NoInputNoOutput) then reject the confirmation
2668 * request. The only exception is when we're dedicated bonding
2669 * initiators (connect_cfm_cb set) since then we always have the MITM
2670 * bit set. */
2671 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2672 BT_DBG("Rejecting request: remote device can't provide MITM");
2673 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2674 sizeof(ev->bdaddr), &ev->bdaddr);
2675 goto unlock;
2678 /* If no side requires MITM protection; auto-accept */
2679 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2680 (!rem_mitm || conn->io_capability == 0x03)) {
2682 /* If we're not the initiators request authorization to
2683 * proceed from user space (mgmt_user_confirm with
2684 * confirm_hint set to 1). */
2685 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2686 BT_DBG("Confirming auto-accept as acceptor");
2687 confirm_hint = 1;
2688 goto confirm;
2691 BT_DBG("Auto-accept of user confirmation with %ums delay",
2692 hdev->auto_accept_delay);
2694 if (hdev->auto_accept_delay > 0) {
2695 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2696 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2697 goto unlock;
2700 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2701 sizeof(ev->bdaddr), &ev->bdaddr);
2702 goto unlock;
2705 confirm:
2706 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2707 confirm_hint);
2709 unlock:
2710 hci_dev_unlock(hdev);
2713 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2715 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2716 struct hci_conn *conn;
2718 BT_DBG("%s", hdev->name);
2720 hci_dev_lock(hdev);
2722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2723 if (!conn)
2724 goto unlock;
2726 /* To avoid duplicate auth_failed events to user space we check
2727 * the HCI_CONN_AUTH_PEND flag which will be set if we
2728 * initiated the authentication. A traditional auth_complete
2729 * event gets always produced as initiator and is also mapped to
2730 * the mgmt_auth_failed event */
2731 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2732 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
2734 hci_conn_put(conn);
2736 unlock:
2737 hci_dev_unlock(hdev);
2740 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2742 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2743 struct inquiry_entry *ie;
2745 BT_DBG("%s", hdev->name);
2747 hci_dev_lock(hdev);
2749 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2750 if (ie)
2751 ie->data.ssp_mode = (ev->features[0] & 0x01);
2753 hci_dev_unlock(hdev);
2756 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2757 struct sk_buff *skb)
2759 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2760 struct oob_data *data;
2762 BT_DBG("%s", hdev->name);
2764 hci_dev_lock(hdev);
2766 if (!test_bit(HCI_MGMT, &hdev->flags))
2767 goto unlock;
2769 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2770 if (data) {
2771 struct hci_cp_remote_oob_data_reply cp;
2773 bacpy(&cp.bdaddr, &ev->bdaddr);
2774 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2775 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2777 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2778 &cp);
2779 } else {
2780 struct hci_cp_remote_oob_data_neg_reply cp;
2782 bacpy(&cp.bdaddr, &ev->bdaddr);
2783 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2784 &cp);
2787 unlock:
2788 hci_dev_unlock(hdev);
2791 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2793 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2794 struct hci_conn *conn;
2796 BT_DBG("%s status %d", hdev->name, ev->status);
2798 hci_dev_lock(hdev);
2800 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2801 if (!conn) {
2802 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2803 if (!conn) {
2804 BT_ERR("No memory for new connection");
2805 hci_dev_unlock(hdev);
2806 return;
2809 conn->dst_type = ev->bdaddr_type;
2812 if (ev->status) {
2813 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2814 hci_proto_connect_cfm(conn, ev->status);
2815 conn->state = BT_CLOSED;
2816 hci_conn_del(conn);
2817 goto unlock;
2820 mgmt_connected(hdev->id, &ev->bdaddr);
2822 conn->sec_level = BT_SECURITY_LOW;
2823 conn->handle = __le16_to_cpu(ev->handle);
2824 conn->state = BT_CONNECTED;
2826 hci_conn_hold_device(conn);
2827 hci_conn_add_sysfs(conn);
2829 hci_proto_connect_cfm(conn, ev->status);
2831 unlock:
2832 hci_dev_unlock(hdev);
2835 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2836 struct sk_buff *skb)
2838 struct hci_ev_le_advertising_info *ev;
2839 u8 num_reports;
2841 num_reports = skb->data[0];
2842 ev = (void *) &skb->data[1];
2844 hci_dev_lock(hdev);
2846 hci_add_adv_entry(hdev, ev);
2848 while (--num_reports) {
2849 ev = (void *) (ev->data + ev->length + 1);
2850 hci_add_adv_entry(hdev, ev);
2853 hci_dev_unlock(hdev);
2856 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
2857 struct sk_buff *skb)
2859 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
2860 struct hci_cp_le_ltk_reply cp;
2861 struct hci_cp_le_ltk_neg_reply neg;
2862 struct hci_conn *conn;
2863 struct link_key *ltk;
2865 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
2867 hci_dev_lock(hdev);
2869 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2870 if (conn == NULL)
2871 goto not_found;
2873 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
2874 if (ltk == NULL)
2875 goto not_found;
2877 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
2878 cp.handle = cpu_to_le16(conn->handle);
2879 conn->pin_length = ltk->pin_len;
2881 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
2883 hci_dev_unlock(hdev);
2885 return;
2887 not_found:
2888 neg.handle = ev->handle;
2889 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
2890 hci_dev_unlock(hdev);
2893 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2895 struct hci_ev_le_meta *le_ev = (void *) skb->data;
2897 skb_pull(skb, sizeof(*le_ev));
2899 switch (le_ev->subevent) {
2900 case HCI_EV_LE_CONN_COMPLETE:
2901 hci_le_conn_complete_evt(hdev, skb);
2902 break;
2904 case HCI_EV_LE_ADVERTISING_REPORT:
2905 hci_le_adv_report_evt(hdev, skb);
2906 break;
2908 case HCI_EV_LE_LTK_REQ:
2909 hci_le_ltk_request_evt(hdev, skb);
2910 break;
2912 default:
2913 break;
2917 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2919 struct hci_event_hdr *hdr = (void *) skb->data;
2920 __u8 event = hdr->evt;
2922 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2924 switch (event) {
2925 case HCI_EV_INQUIRY_COMPLETE:
2926 hci_inquiry_complete_evt(hdev, skb);
2927 break;
2929 case HCI_EV_INQUIRY_RESULT:
2930 hci_inquiry_result_evt(hdev, skb);
2931 break;
2933 case HCI_EV_CONN_COMPLETE:
2934 hci_conn_complete_evt(hdev, skb);
2935 break;
2937 case HCI_EV_CONN_REQUEST:
2938 hci_conn_request_evt(hdev, skb);
2939 break;
2941 case HCI_EV_DISCONN_COMPLETE:
2942 hci_disconn_complete_evt(hdev, skb);
2943 break;
2945 case HCI_EV_AUTH_COMPLETE:
2946 hci_auth_complete_evt(hdev, skb);
2947 break;
2949 case HCI_EV_REMOTE_NAME:
2950 hci_remote_name_evt(hdev, skb);
2951 break;
2953 case HCI_EV_ENCRYPT_CHANGE:
2954 hci_encrypt_change_evt(hdev, skb);
2955 break;
2957 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2958 hci_change_link_key_complete_evt(hdev, skb);
2959 break;
2961 case HCI_EV_REMOTE_FEATURES:
2962 hci_remote_features_evt(hdev, skb);
2963 break;
2965 case HCI_EV_REMOTE_VERSION:
2966 hci_remote_version_evt(hdev, skb);
2967 break;
2969 case HCI_EV_QOS_SETUP_COMPLETE:
2970 hci_qos_setup_complete_evt(hdev, skb);
2971 break;
2973 case HCI_EV_CMD_COMPLETE:
2974 hci_cmd_complete_evt(hdev, skb);
2975 break;
2977 case HCI_EV_CMD_STATUS:
2978 hci_cmd_status_evt(hdev, skb);
2979 break;
2981 case HCI_EV_ROLE_CHANGE:
2982 hci_role_change_evt(hdev, skb);
2983 break;
2985 case HCI_EV_NUM_COMP_PKTS:
2986 hci_num_comp_pkts_evt(hdev, skb);
2987 break;
2989 case HCI_EV_MODE_CHANGE:
2990 hci_mode_change_evt(hdev, skb);
2991 break;
2993 case HCI_EV_PIN_CODE_REQ:
2994 hci_pin_code_request_evt(hdev, skb);
2995 break;
2997 case HCI_EV_LINK_KEY_REQ:
2998 hci_link_key_request_evt(hdev, skb);
2999 break;
3001 case HCI_EV_LINK_KEY_NOTIFY:
3002 hci_link_key_notify_evt(hdev, skb);
3003 break;
3005 case HCI_EV_CLOCK_OFFSET:
3006 hci_clock_offset_evt(hdev, skb);
3007 break;
3009 case HCI_EV_PKT_TYPE_CHANGE:
3010 hci_pkt_type_change_evt(hdev, skb);
3011 break;
3013 case HCI_EV_PSCAN_REP_MODE:
3014 hci_pscan_rep_mode_evt(hdev, skb);
3015 break;
3017 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3018 hci_inquiry_result_with_rssi_evt(hdev, skb);
3019 break;
3021 case HCI_EV_REMOTE_EXT_FEATURES:
3022 hci_remote_ext_features_evt(hdev, skb);
3023 break;
3025 case HCI_EV_SYNC_CONN_COMPLETE:
3026 hci_sync_conn_complete_evt(hdev, skb);
3027 break;
3029 case HCI_EV_SYNC_CONN_CHANGED:
3030 hci_sync_conn_changed_evt(hdev, skb);
3031 break;
3033 case HCI_EV_SNIFF_SUBRATE:
3034 hci_sniff_subrate_evt(hdev, skb);
3035 break;
3037 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3038 hci_extended_inquiry_result_evt(hdev, skb);
3039 break;
3041 case HCI_EV_IO_CAPA_REQUEST:
3042 hci_io_capa_request_evt(hdev, skb);
3043 break;
3045 case HCI_EV_IO_CAPA_REPLY:
3046 hci_io_capa_reply_evt(hdev, skb);
3047 break;
3049 case HCI_EV_USER_CONFIRM_REQUEST:
3050 hci_user_confirm_request_evt(hdev, skb);
3051 break;
3053 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3054 hci_simple_pair_complete_evt(hdev, skb);
3055 break;
3057 case HCI_EV_REMOTE_HOST_FEATURES:
3058 hci_remote_host_features_evt(hdev, skb);
3059 break;
3061 case HCI_EV_LE_META:
3062 hci_le_meta_evt(hdev, skb);
3063 break;
3065 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3066 hci_remote_oob_data_request_evt(hdev, skb);
3067 break;
3069 default:
3070 BT_DBG("%s event 0x%x", hdev->name, event);
3071 break;
3074 kfree_skb(skb);
3075 hdev->stat.evt_rx++;
3078 /* Generate internal stack event */
3079 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3081 struct hci_event_hdr *hdr;
3082 struct hci_ev_stack_internal *ev;
3083 struct sk_buff *skb;
3085 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3086 if (!skb)
3087 return;
3089 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3090 hdr->evt = HCI_EV_STACK_INTERNAL;
3091 hdr->plen = sizeof(*ev) + dlen;
3093 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3094 ev->type = type;
3095 memcpy(ev->data, data, dlen);
3097 bt_cb(skb)->incoming = 1;
3098 __net_timestamp(skb);
3100 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3101 skb->dev = (void *) hdev;
3102 hci_send_to_sock(hdev, skb, NULL);
3103 kfree_skb(skb);
3106 module_param(enable_le, bool, 0444);
3107 MODULE_PARM_DESC(enable_le, "Enable LE support");