dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / net / bluetooth / hci_core.c
blob5d0b1358c75474ea8bb91e15849bf64fe3317a86
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI debugfs entries ---- */
70 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
77 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
82 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
102 return -EALREADY;
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
116 kfree_skb(skb);
118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
120 return count;
123 static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
130 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
142 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
172 if (err < 0)
173 return err;
175 done:
176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
181 return count;
184 static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
191 static void hci_debugfs_create_basic(struct hci_dev *hdev)
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
201 /* ---- HCI requests ---- */
203 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
211 if (skb)
212 hdev->req_skb = skb_get(skb);
213 wake_up_interruptible(&hdev->req_wait_q);
217 static void hci_req_cancel(struct hci_dev *hdev, int err)
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
228 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
229 const void *param, u8 event, u32 timeout)
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
233 struct sk_buff *skb;
234 int err = 0;
236 BT_DBG("%s", hdev->name);
238 hci_req_init(&req, hdev);
240 hci_req_add_ev(&req, opcode, plen, param, event);
242 hdev->req_status = HCI_REQ_PEND;
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
247 err = hci_req_run_skb(&req, hci_req_sync_complete);
248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
250 set_current_state(TASK_RUNNING);
251 return ERR_PTR(err);
254 schedule_timeout(timeout);
256 remove_wait_queue(&hdev->req_wait_q, &wait);
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
270 default:
271 err = -ETIMEDOUT;
272 break;
275 hdev->req_status = hdev->req_result = 0;
276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
279 BT_DBG("%s end: err %d", hdev->name, err);
281 if (err < 0) {
282 kfree_skb(skb);
283 return ERR_PTR(err);
286 if (!skb)
287 return ERR_PTR(-ENODATA);
289 return skb;
291 EXPORT_SYMBOL(__hci_cmd_sync_ev);
293 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
294 const void *param, u32 timeout)
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
298 EXPORT_SYMBOL(__hci_cmd_sync);
300 /* Execute request and wait for completion. */
301 static int __hci_req_sync(struct hci_dev *hdev,
302 void (*func)(struct hci_request *req,
303 unsigned long opt),
304 unsigned long opt, __u32 timeout)
306 struct hci_request req;
307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
310 BT_DBG("%s start", hdev->name);
312 hci_req_init(&req, hdev);
314 hdev->req_status = HCI_REQ_PEND;
316 func(&req, opt);
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
321 err = hci_req_run_skb(&req, hci_req_sync_complete);
322 if (err < 0) {
323 hdev->req_status = 0;
325 remove_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_RUNNING);
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
333 if (err == -ENODATA)
334 return 0;
336 return err;
339 schedule_timeout(timeout);
341 remove_wait_queue(&hdev->req_wait_q, &wait);
343 if (signal_pending(current))
344 return -EINTR;
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
348 err = -bt_to_errno(hdev->req_result);
349 break;
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
355 default:
356 err = -ETIMEDOUT;
357 break;
360 hdev->req_status = hdev->req_result = 0;
362 BT_DBG("%s end: err %d", hdev->name, err);
364 return err;
367 static int hci_req_sync(struct hci_dev *hdev,
368 void (*req)(struct hci_request *req,
369 unsigned long opt),
370 unsigned long opt, __u32 timeout)
372 int ret;
374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
377 /* Serialize all requests */
378 hci_req_lock(hdev);
379 ret = __hci_req_sync(hdev, req, opt, timeout);
380 hci_req_unlock(hdev);
382 return ret;
385 static void hci_reset_req(struct hci_request *req, unsigned long opt)
387 BT_DBG("%s %ld", req->hdev->name, opt);
389 /* Reset device */
390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
394 static void bredr_init(struct hci_request *req)
396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
398 /* Read Local Supported Features */
399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
401 /* Read Local Version */
402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
404 /* Read BD Address */
405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
408 static void amp_init1(struct hci_request *req)
410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
412 /* Read Local Version */
413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
418 /* Read Local AMP Info */
419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
421 /* Read Data Blk size */
422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
431 static void amp_init2(struct hci_request *req)
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
441 static void hci_init1_req(struct hci_request *req, unsigned long opt)
443 struct hci_dev *hdev = req->hdev;
445 BT_DBG("%s %ld", hdev->name, opt);
447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
449 hci_reset_req(req, 0);
451 switch (hdev->dev_type) {
452 case HCI_BREDR:
453 bredr_init(req);
454 break;
456 case HCI_AMP:
457 amp_init1(req);
458 break;
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
466 static void bredr_setup(struct hci_request *req)
468 __le16 param;
469 __u8 flt_type;
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
474 /* Read Class of Device */
475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
477 /* Read Local Name */
478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
480 /* Read Voice Setting */
481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
493 /* Connection accept timeout ~20 secs */
494 param = cpu_to_le16(0x7d00);
495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
498 static void le_setup(struct hci_request *req)
500 struct hci_dev *hdev = req->hdev;
502 /* Read LE Buffer Size */
503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
505 /* Read LE Local Supported Features */
506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
511 /* LE-only controllers have LE implicitly enabled */
512 if (!lmp_bredr_capable(hdev))
513 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
516 static void hci_setup_event_mask(struct hci_request *req)
518 struct hci_dev *hdev = req->hdev;
520 /* The second byte is 0xff instead of 0x9f (two reserved bits
521 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522 * command otherwise.
524 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
526 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527 * any event mask for pre 1.2 devices.
529 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530 return;
532 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */
534 events[4] |= 0x02; /* Inquiry Result with RSSI */
535 events[4] |= 0x04; /* Read Remote Extended Features Complete */
536 events[5] |= 0x08; /* Synchronous Connection Complete */
537 events[5] |= 0x10; /* Synchronous Connection Changed */
538 } else {
539 /* Use a different default for LE-only devices */
540 memset(events, 0, sizeof(events));
541 events[0] |= 0x10; /* Disconnection Complete */
542 events[1] |= 0x08; /* Read Remote Version Information Complete */
543 events[1] |= 0x20; /* Command Complete */
544 events[1] |= 0x40; /* Command Status */
545 events[1] |= 0x80; /* Hardware Error */
546 events[2] |= 0x04; /* Number of Completed Packets */
547 events[3] |= 0x02; /* Data Buffer Overflow */
549 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
550 events[0] |= 0x80; /* Encryption Change */
551 events[5] |= 0x80; /* Encryption Key Refresh Complete */
555 if (lmp_inq_rssi_capable(hdev))
556 events[4] |= 0x02; /* Inquiry Result with RSSI */
558 if (lmp_sniffsubr_capable(hdev))
559 events[5] |= 0x20; /* Sniff Subrating */
561 if (lmp_pause_enc_capable(hdev))
562 events[5] |= 0x80; /* Encryption Key Refresh Complete */
564 if (lmp_ext_inq_capable(hdev))
565 events[5] |= 0x40; /* Extended Inquiry Result */
567 if (lmp_no_flush_capable(hdev))
568 events[7] |= 0x01; /* Enhanced Flush Complete */
570 if (lmp_lsto_capable(hdev))
571 events[6] |= 0x80; /* Link Supervision Timeout Changed */
573 if (lmp_ssp_capable(hdev)) {
574 events[6] |= 0x01; /* IO Capability Request */
575 events[6] |= 0x02; /* IO Capability Response */
576 events[6] |= 0x04; /* User Confirmation Request */
577 events[6] |= 0x08; /* User Passkey Request */
578 events[6] |= 0x10; /* Remote OOB Data Request */
579 events[6] |= 0x20; /* Simple Pairing Complete */
580 events[7] |= 0x04; /* User Passkey Notification */
581 events[7] |= 0x08; /* Keypress Notification */
582 events[7] |= 0x10; /* Remote Host Supported
583 * Features Notification
587 if (lmp_le_capable(hdev))
588 events[7] |= 0x20; /* LE Meta-Event */
590 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
593 static void hci_init2_req(struct hci_request *req, unsigned long opt)
595 struct hci_dev *hdev = req->hdev;
597 if (hdev->dev_type == HCI_AMP)
598 return amp_init2(req);
600 if (lmp_bredr_capable(hdev))
601 bredr_setup(req);
602 else
603 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
605 if (lmp_le_capable(hdev))
606 le_setup(req);
608 /* All Bluetooth 1.2 and later controllers should support the
609 * HCI command for reading the local supported commands.
611 * Unfortunately some controllers indicate Bluetooth 1.2 support,
612 * but do not have support for this command. If that is the case,
613 * the driver can quirk the behavior and skip reading the local
614 * supported commands.
616 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
617 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
618 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
620 if (lmp_ssp_capable(hdev)) {
621 /* When SSP is available, then the host features page
622 * should also be available as well. However some
623 * controllers list the max_page as 0 as long as SSP
624 * has not been enabled. To achieve proper debugging
625 * output, force the minimum max_page to 1 at least.
627 hdev->max_page = 0x01;
629 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
630 u8 mode = 0x01;
632 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
633 sizeof(mode), &mode);
634 } else {
635 struct hci_cp_write_eir cp;
637 memset(hdev->eir, 0, sizeof(hdev->eir));
638 memset(&cp, 0, sizeof(cp));
640 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
644 if (lmp_inq_rssi_capable(hdev) ||
645 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
646 u8 mode;
648 /* If Extended Inquiry Result events are supported, then
649 * they are clearly preferred over Inquiry Result with RSSI
650 * events.
652 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
654 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
657 if (lmp_inq_tx_pwr_capable(hdev))
658 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
660 if (lmp_ext_feat_capable(hdev)) {
661 struct hci_cp_read_local_ext_features cp;
663 cp.page = 0x01;
664 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
665 sizeof(cp), &cp);
668 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
669 u8 enable = 1;
670 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
671 &enable);
675 static void hci_setup_link_policy(struct hci_request *req)
677 struct hci_dev *hdev = req->hdev;
678 struct hci_cp_write_def_link_policy cp;
679 u16 link_policy = 0;
681 if (lmp_rswitch_capable(hdev))
682 link_policy |= HCI_LP_RSWITCH;
683 if (lmp_hold_capable(hdev))
684 link_policy |= HCI_LP_HOLD;
685 if (lmp_sniff_capable(hdev))
686 link_policy |= HCI_LP_SNIFF;
687 if (lmp_park_capable(hdev))
688 link_policy |= HCI_LP_PARK;
690 cp.policy = cpu_to_le16(link_policy);
691 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
694 static void hci_set_le_support(struct hci_request *req)
696 struct hci_dev *hdev = req->hdev;
697 struct hci_cp_write_le_host_supported cp;
699 /* LE-only devices do not support explicit enablement */
700 if (!lmp_bredr_capable(hdev))
701 return;
703 memset(&cp, 0, sizeof(cp));
705 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
706 cp.le = 0x01;
707 cp.simul = 0x00;
710 if (cp.le != lmp_host_le_capable(hdev))
711 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
712 &cp);
715 static void hci_set_event_mask_page_2(struct hci_request *req)
717 struct hci_dev *hdev = req->hdev;
718 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
719 bool changed = false;
721 /* If Connectionless Slave Broadcast master role is supported
722 * enable all necessary events for it.
724 if (lmp_csb_master_capable(hdev)) {
725 events[1] |= 0x40; /* Triggered Clock Capture */
726 events[1] |= 0x80; /* Synchronization Train Complete */
727 events[2] |= 0x10; /* Slave Page Response Timeout */
728 events[2] |= 0x20; /* CSB Channel Map Change */
729 changed = true;
732 /* If Connectionless Slave Broadcast slave role is supported
733 * enable all necessary events for it.
735 if (lmp_csb_slave_capable(hdev)) {
736 events[2] |= 0x01; /* Synchronization Train Received */
737 events[2] |= 0x02; /* CSB Receive */
738 events[2] |= 0x04; /* CSB Timeout */
739 events[2] |= 0x08; /* Truncated Page Complete */
740 changed = true;
743 /* Enable Authenticated Payload Timeout Expired event if supported */
744 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
745 events[2] |= 0x80;
746 changed = true;
749 /* Some Broadcom based controllers indicate support for Set Event
750 * Mask Page 2 command, but then actually do not support it. Since
751 * the default value is all bits set to zero, the command is only
752 * required if the event mask has to be changed. In case no change
753 * to the event mask is needed, skip this command.
755 if (changed)
756 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
757 sizeof(events), events);
760 static void hci_init3_req(struct hci_request *req, unsigned long opt)
762 struct hci_dev *hdev = req->hdev;
763 u8 p;
765 hci_setup_event_mask(req);
767 if (hdev->commands[6] & 0x20 &&
768 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
769 struct hci_cp_read_stored_link_key cp;
771 bacpy(&cp.bdaddr, BDADDR_ANY);
772 cp.read_all = 0x01;
773 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
776 if (hdev->commands[5] & 0x10)
777 hci_setup_link_policy(req);
779 if (hdev->commands[8] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
782 /* Some older Broadcom based Bluetooth 1.2 controllers do not
783 * support the Read Page Scan Type command. Check support for
784 * this command in the bit mask of supported commands.
786 if (hdev->commands[13] & 0x01)
787 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
789 if (lmp_le_capable(hdev)) {
790 u8 events[8];
792 memset(events, 0, sizeof(events));
793 events[0] = 0x0f;
795 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
796 events[0] |= 0x10; /* LE Long Term Key Request */
798 /* If controller supports the Connection Parameters Request
799 * Link Layer Procedure, enable the corresponding event.
801 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
802 events[0] |= 0x20; /* LE Remote Connection
803 * Parameter Request
806 /* If the controller supports the Data Length Extension
807 * feature, enable the corresponding event.
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
810 events[0] |= 0x40; /* LE Data Length Change */
812 /* If the controller supports Extended Scanner Filter
813 * Policies, enable the correspondig event.
815 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
816 events[1] |= 0x04; /* LE Direct Advertising
817 * Report
820 /* If the controller supports the LE Read Local P-256
821 * Public Key command, enable the corresponding event.
823 if (hdev->commands[34] & 0x02)
824 events[0] |= 0x80; /* LE Read Local P-256
825 * Public Key Complete
828 /* If the controller supports the LE Generate DHKey
829 * command, enable the corresponding event.
831 if (hdev->commands[34] & 0x04)
832 events[1] |= 0x01; /* LE Generate DHKey Complete */
834 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
835 events);
837 if (hdev->commands[25] & 0x40) {
838 /* Read LE Advertising Channel TX Power */
839 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
842 if (hdev->commands[26] & 0x40) {
843 /* Read LE White List Size */
844 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
845 0, NULL);
848 if (hdev->commands[26] & 0x80) {
849 /* Clear LE White List */
850 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
853 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
854 /* Read LE Maximum Data Length */
855 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
857 /* Read LE Suggested Default Data Length */
858 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
861 hci_set_le_support(req);
864 /* Read features beyond page 1 if available */
865 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
866 struct hci_cp_read_local_ext_features cp;
868 cp.page = p;
869 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
870 sizeof(cp), &cp);
874 static void hci_init4_req(struct hci_request *req, unsigned long opt)
876 struct hci_dev *hdev = req->hdev;
878 /* Some Broadcom based Bluetooth controllers do not support the
879 * Delete Stored Link Key command. They are clearly indicating its
880 * absence in the bit mask of supported commands.
882 * Check the supported commands and only if the the command is marked
883 * as supported send it. If not supported assume that the controller
884 * does not have actual support for stored link keys which makes this
885 * command redundant anyway.
887 * Some controllers indicate that they support handling deleting
888 * stored link keys, but they don't. The quirk lets a driver
889 * just disable this command.
891 if (hdev->commands[6] & 0x80 &&
892 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
893 struct hci_cp_delete_stored_link_key cp;
895 bacpy(&cp.bdaddr, BDADDR_ANY);
896 cp.delete_all = 0x01;
897 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
898 sizeof(cp), &cp);
901 /* Set event mask page 2 if the HCI command for it is supported */
902 if (hdev->commands[22] & 0x04)
903 hci_set_event_mask_page_2(req);
905 /* Read local codec list if the HCI command is supported */
906 if (hdev->commands[29] & 0x20)
907 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
909 /* Get MWS transport configuration if the HCI command is supported */
910 if (hdev->commands[30] & 0x08)
911 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
913 /* Check for Synchronization Train support */
914 if (lmp_sync_train_capable(hdev))
915 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
917 /* Enable Secure Connections if supported and configured */
918 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
919 bredr_sc_enabled(hdev)) {
920 u8 support = 0x01;
922 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
923 sizeof(support), &support);
927 static int __hci_init(struct hci_dev *hdev)
929 int err;
931 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
932 if (err < 0)
933 return err;
935 if (hci_dev_test_flag(hdev, HCI_SETUP))
936 hci_debugfs_create_basic(hdev);
938 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
939 if (err < 0)
940 return err;
942 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
943 * BR/EDR/LE type controllers. AMP controllers only need the
944 * first two stages of init.
946 if (hdev->dev_type != HCI_BREDR)
947 return 0;
949 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
950 if (err < 0)
951 return err;
953 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
954 if (err < 0)
955 return err;
957 /* This function is only called when the controller is actually in
958 * configured state. When the controller is marked as unconfigured,
959 * this initialization procedure is not run.
961 * It means that it is possible that a controller runs through its
962 * setup phase and then discovers missing settings. If that is the
963 * case, then this function will not be called. It then will only
964 * be called during the config phase.
966 * So only when in setup phase or config phase, create the debugfs
967 * entries and register the SMP channels.
969 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
970 !hci_dev_test_flag(hdev, HCI_CONFIG))
971 return 0;
973 hci_debugfs_create_common(hdev);
975 if (lmp_bredr_capable(hdev))
976 hci_debugfs_create_bredr(hdev);
978 if (lmp_le_capable(hdev))
979 hci_debugfs_create_le(hdev);
981 return 0;
984 static void hci_init0_req(struct hci_request *req, unsigned long opt)
986 struct hci_dev *hdev = req->hdev;
988 BT_DBG("%s %ld", hdev->name, opt);
990 /* Reset */
991 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
992 hci_reset_req(req, 0);
994 /* Read Local Version */
995 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
997 /* Read BD Address */
998 if (hdev->set_bdaddr)
999 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1002 static int __hci_unconf_init(struct hci_dev *hdev)
1004 int err;
1006 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1007 return 0;
1009 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1010 if (err < 0)
1011 return err;
1013 if (hci_dev_test_flag(hdev, HCI_SETUP))
1014 hci_debugfs_create_basic(hdev);
1016 return 0;
1019 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1021 __u8 scan = opt;
1023 BT_DBG("%s %x", req->hdev->name, scan);
1025 /* Inquiry and Page scans */
1026 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1029 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1031 __u8 auth = opt;
1033 BT_DBG("%s %x", req->hdev->name, auth);
1035 /* Authentication */
1036 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1039 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1041 __u8 encrypt = opt;
1043 BT_DBG("%s %x", req->hdev->name, encrypt);
1045 /* Encryption */
1046 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1049 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1051 __le16 policy = cpu_to_le16(opt);
1053 BT_DBG("%s %x", req->hdev->name, policy);
1055 /* Default link policy */
1056 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1059 /* Get HCI device by index.
1060 * Device is held on return. */
1061 struct hci_dev *hci_dev_get(int index)
1063 struct hci_dev *hdev = NULL, *d;
1065 BT_DBG("%d", index);
1067 if (index < 0)
1068 return NULL;
1070 read_lock(&hci_dev_list_lock);
1071 list_for_each_entry(d, &hci_dev_list, list) {
1072 if (d->id == index) {
1073 hdev = hci_dev_hold(d);
1074 break;
1077 read_unlock(&hci_dev_list_lock);
1078 return hdev;
1081 /* ---- Inquiry support ---- */
1083 bool hci_discovery_active(struct hci_dev *hdev)
1085 struct discovery_state *discov = &hdev->discovery;
1087 switch (discov->state) {
1088 case DISCOVERY_FINDING:
1089 case DISCOVERY_RESOLVING:
1090 return true;
1092 default:
1093 return false;
1097 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1099 int old_state = hdev->discovery.state;
1101 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1103 if (old_state == state)
1104 return;
1106 hdev->discovery.state = state;
1108 switch (state) {
1109 case DISCOVERY_STOPPED:
1110 hci_update_background_scan(hdev);
1112 if (old_state != DISCOVERY_STARTING)
1113 mgmt_discovering(hdev, 0);
1114 break;
1115 case DISCOVERY_STARTING:
1116 break;
1117 case DISCOVERY_FINDING:
1118 mgmt_discovering(hdev, 1);
1119 break;
1120 case DISCOVERY_RESOLVING:
1121 break;
1122 case DISCOVERY_STOPPING:
1123 break;
1127 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1129 struct discovery_state *cache = &hdev->discovery;
1130 struct inquiry_entry *p, *n;
1132 list_for_each_entry_safe(p, n, &cache->all, all) {
1133 list_del(&p->all);
1134 kfree(p);
1137 INIT_LIST_HEAD(&cache->unknown);
1138 INIT_LIST_HEAD(&cache->resolve);
1141 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr)
1144 struct discovery_state *cache = &hdev->discovery;
1145 struct inquiry_entry *e;
1147 BT_DBG("cache %p, %pMR", cache, bdaddr);
1149 list_for_each_entry(e, &cache->all, all) {
1150 if (!bacmp(&e->data.bdaddr, bdaddr))
1151 return e;
1154 return NULL;
1157 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1158 bdaddr_t *bdaddr)
1160 struct discovery_state *cache = &hdev->discovery;
1161 struct inquiry_entry *e;
1163 BT_DBG("cache %p, %pMR", cache, bdaddr);
1165 list_for_each_entry(e, &cache->unknown, list) {
1166 if (!bacmp(&e->data.bdaddr, bdaddr))
1167 return e;
1170 return NULL;
1173 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1174 bdaddr_t *bdaddr,
1175 int state)
1177 struct discovery_state *cache = &hdev->discovery;
1178 struct inquiry_entry *e;
1180 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1182 list_for_each_entry(e, &cache->resolve, list) {
1183 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1184 return e;
1185 if (!bacmp(&e->data.bdaddr, bdaddr))
1186 return e;
1189 return NULL;
1192 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1193 struct inquiry_entry *ie)
1195 struct discovery_state *cache = &hdev->discovery;
1196 struct list_head *pos = &cache->resolve;
1197 struct inquiry_entry *p;
1199 list_del(&ie->list);
1201 list_for_each_entry(p, &cache->resolve, list) {
1202 if (p->name_state != NAME_PENDING &&
1203 abs(p->data.rssi) >= abs(ie->data.rssi))
1204 break;
1205 pos = &p->list;
1208 list_add(&ie->list, pos);
1211 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1212 bool name_known)
1214 struct discovery_state *cache = &hdev->discovery;
1215 struct inquiry_entry *ie;
1216 u32 flags = 0;
1218 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1220 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1222 if (!data->ssp_mode)
1223 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1225 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1226 if (ie) {
1227 if (!ie->data.ssp_mode)
1228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1230 if (ie->name_state == NAME_NEEDED &&
1231 data->rssi != ie->data.rssi) {
1232 ie->data.rssi = data->rssi;
1233 hci_inquiry_cache_update_resolve(hdev, ie);
1236 goto update;
1239 /* Entry not in the cache. Add new one. */
1240 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1241 if (!ie) {
1242 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1243 goto done;
1246 list_add(&ie->all, &cache->all);
1248 if (name_known) {
1249 ie->name_state = NAME_KNOWN;
1250 } else {
1251 ie->name_state = NAME_NOT_KNOWN;
1252 list_add(&ie->list, &cache->unknown);
1255 update:
1256 if (name_known && ie->name_state != NAME_KNOWN &&
1257 ie->name_state != NAME_PENDING) {
1258 ie->name_state = NAME_KNOWN;
1259 list_del(&ie->list);
1262 memcpy(&ie->data, data, sizeof(*data));
1263 ie->timestamp = jiffies;
1264 cache->timestamp = jiffies;
1266 if (ie->name_state == NAME_NOT_KNOWN)
1267 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1269 done:
1270 return flags;
1273 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1275 struct discovery_state *cache = &hdev->discovery;
1276 struct inquiry_info *info = (struct inquiry_info *) buf;
1277 struct inquiry_entry *e;
1278 int copied = 0;
1280 list_for_each_entry(e, &cache->all, all) {
1281 struct inquiry_data *data = &e->data;
1283 if (copied >= num)
1284 break;
1286 bacpy(&info->bdaddr, &data->bdaddr);
1287 info->pscan_rep_mode = data->pscan_rep_mode;
1288 info->pscan_period_mode = data->pscan_period_mode;
1289 info->pscan_mode = data->pscan_mode;
1290 memcpy(info->dev_class, data->dev_class, 3);
1291 info->clock_offset = data->clock_offset;
1293 info++;
1294 copied++;
1297 BT_DBG("cache %p, copied %d", cache, copied);
1298 return copied;
1301 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1303 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1304 struct hci_dev *hdev = req->hdev;
1305 struct hci_cp_inquiry cp;
1307 BT_DBG("%s", hdev->name);
1309 if (test_bit(HCI_INQUIRY, &hdev->flags))
1310 return;
1312 /* Start Inquiry */
1313 memcpy(&cp.lap, &ir->lap, 3);
1314 cp.length = ir->length;
1315 cp.num_rsp = ir->num_rsp;
1316 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1319 int hci_inquiry(void __user *arg)
1321 __u8 __user *ptr = arg;
1322 struct hci_inquiry_req ir;
1323 struct hci_dev *hdev;
1324 int err = 0, do_inquiry = 0, max_rsp;
1325 long timeo;
1326 __u8 *buf;
1328 if (copy_from_user(&ir, ptr, sizeof(ir)))
1329 return -EFAULT;
1331 hdev = hci_dev_get(ir.dev_id);
1332 if (!hdev)
1333 return -ENODEV;
1335 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1336 err = -EBUSY;
1337 goto done;
1340 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1341 err = -EOPNOTSUPP;
1342 goto done;
1345 if (hdev->dev_type != HCI_BREDR) {
1346 err = -EOPNOTSUPP;
1347 goto done;
1350 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1351 err = -EOPNOTSUPP;
1352 goto done;
1355 hci_dev_lock(hdev);
1356 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1357 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1358 hci_inquiry_cache_flush(hdev);
1359 do_inquiry = 1;
1361 hci_dev_unlock(hdev);
1363 timeo = ir.length * msecs_to_jiffies(2000);
1365 if (do_inquiry) {
1366 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1367 timeo);
1368 if (err < 0)
1369 goto done;
1371 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1372 * cleared). If it is interrupted by a signal, return -EINTR.
1374 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1375 TASK_INTERRUPTIBLE))
1376 return -EINTR;
1379 /* for unlimited number of responses we will use buffer with
1380 * 255 entries
1382 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1384 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1385 * copy it to the user space.
1387 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1388 if (!buf) {
1389 err = -ENOMEM;
1390 goto done;
1393 hci_dev_lock(hdev);
1394 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1395 hci_dev_unlock(hdev);
1397 BT_DBG("num_rsp %d", ir.num_rsp);
1399 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1400 ptr += sizeof(ir);
1401 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1402 ir.num_rsp))
1403 err = -EFAULT;
1404 } else
1405 err = -EFAULT;
1407 kfree(buf);
1409 done:
1410 hci_dev_put(hdev);
1411 return err;
1414 static int hci_dev_do_open(struct hci_dev *hdev)
1416 int ret = 0;
1418 BT_DBG("%s %p", hdev->name, hdev);
1420 hci_req_lock(hdev);
1422 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1423 ret = -ENODEV;
1424 goto done;
1427 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1428 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1429 /* Check for rfkill but allow the HCI setup stage to
1430 * proceed (which in itself doesn't cause any RF activity).
1432 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1433 ret = -ERFKILL;
1434 goto done;
1437 /* Check for valid public address or a configured static
1438 * random adddress, but let the HCI setup proceed to
1439 * be able to determine if there is a public address
1440 * or not.
1442 * In case of user channel usage, it is not important
1443 * if a public address or static random address is
1444 * available.
1446 * This check is only valid for BR/EDR controllers
1447 * since AMP controllers do not have an address.
1449 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1450 hdev->dev_type == HCI_BREDR &&
1451 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1452 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1453 ret = -EADDRNOTAVAIL;
1454 goto done;
1458 if (test_bit(HCI_UP, &hdev->flags)) {
1459 ret = -EALREADY;
1460 goto done;
1463 if (hdev->open(hdev)) {
1464 ret = -EIO;
1465 goto done;
1468 set_bit(HCI_RUNNING, &hdev->flags);
1469 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1471 atomic_set(&hdev->cmd_cnt, 1);
1472 set_bit(HCI_INIT, &hdev->flags);
1474 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1475 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1477 if (hdev->setup)
1478 ret = hdev->setup(hdev);
1480 /* The transport driver can set these quirks before
1481 * creating the HCI device or in its setup callback.
1483 * In case any of them is set, the controller has to
1484 * start up as unconfigured.
1486 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1487 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1488 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1490 /* For an unconfigured controller it is required to
1491 * read at least the version information provided by
1492 * the Read Local Version Information command.
1494 * If the set_bdaddr driver callback is provided, then
1495 * also the original Bluetooth public device address
1496 * will be read using the Read BD Address command.
1498 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1499 ret = __hci_unconf_init(hdev);
1502 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1503 /* If public address change is configured, ensure that
1504 * the address gets programmed. If the driver does not
1505 * support changing the public address, fail the power
1506 * on procedure.
1508 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1509 hdev->set_bdaddr)
1510 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1511 else
1512 ret = -EADDRNOTAVAIL;
1515 if (!ret) {
1516 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1517 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1518 ret = __hci_init(hdev);
1519 if (!ret && hdev->post_init)
1520 ret = hdev->post_init(hdev);
1524 /* If the HCI Reset command is clearing all diagnostic settings,
1525 * then they need to be reprogrammed after the init procedure
1526 * completed.
1528 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1529 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1530 ret = hdev->set_diag(hdev, true);
1532 clear_bit(HCI_INIT, &hdev->flags);
1534 if (!ret) {
1535 hci_dev_hold(hdev);
1536 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1537 set_bit(HCI_UP, &hdev->flags);
1538 hci_sock_dev_event(hdev, HCI_DEV_UP);
1539 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1540 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1541 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1542 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1543 hdev->dev_type == HCI_BREDR) {
1544 hci_dev_lock(hdev);
1545 mgmt_powered(hdev, 1);
1546 hci_dev_unlock(hdev);
1548 } else {
1549 /* Init failed, cleanup */
1550 flush_work(&hdev->tx_work);
1551 flush_work(&hdev->cmd_work);
1552 flush_work(&hdev->rx_work);
1554 skb_queue_purge(&hdev->cmd_q);
1555 skb_queue_purge(&hdev->rx_q);
1557 if (hdev->flush)
1558 hdev->flush(hdev);
1560 if (hdev->sent_cmd) {
1561 kfree_skb(hdev->sent_cmd);
1562 hdev->sent_cmd = NULL;
1565 clear_bit(HCI_RUNNING, &hdev->flags);
1566 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1568 hdev->close(hdev);
1569 hdev->flags &= BIT(HCI_RAW);
1572 done:
1573 hci_req_unlock(hdev);
1574 return ret;
1577 /* ---- HCI ioctl helpers ---- */
1579 int hci_dev_open(__u16 dev)
1581 struct hci_dev *hdev;
1582 int err;
1584 hdev = hci_dev_get(dev);
1585 if (!hdev)
1586 return -ENODEV;
1588 /* Devices that are marked as unconfigured can only be powered
1589 * up as user channel. Trying to bring them up as normal devices
1590 * will result into a failure. Only user channel operation is
1591 * possible.
1593 * When this function is called for a user channel, the flag
1594 * HCI_USER_CHANNEL will be set first before attempting to
1595 * open the device.
1597 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1598 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1599 err = -EOPNOTSUPP;
1600 goto done;
1603 /* We need to ensure that no other power on/off work is pending
1604 * before proceeding to call hci_dev_do_open. This is
1605 * particularly important if the setup procedure has not yet
1606 * completed.
1608 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1609 cancel_delayed_work(&hdev->power_off);
1611 /* After this call it is guaranteed that the setup procedure
1612 * has finished. This means that error conditions like RFKILL
1613 * or no valid public or static random address apply.
1615 flush_workqueue(hdev->req_workqueue);
1617 /* For controllers not using the management interface and that
1618 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1619 * so that pairing works for them. Once the management interface
1620 * is in use this bit will be cleared again and userspace has
1621 * to explicitly enable it.
1623 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1624 !hci_dev_test_flag(hdev, HCI_MGMT))
1625 hci_dev_set_flag(hdev, HCI_BONDABLE);
1627 err = hci_dev_do_open(hdev);
1629 done:
1630 hci_dev_put(hdev);
1631 return err;
1634 /* This function requires the caller holds hdev->lock */
1635 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1637 struct hci_conn_params *p;
1639 list_for_each_entry(p, &hdev->le_conn_params, list) {
1640 if (p->conn) {
1641 hci_conn_drop(p->conn);
1642 hci_conn_put(p->conn);
1643 p->conn = NULL;
1645 list_del_init(&p->action);
1648 BT_DBG("All LE pending actions cleared");
1651 int hci_dev_do_close(struct hci_dev *hdev)
1653 bool auto_off;
1655 BT_DBG("%s %p", hdev->name, hdev);
1657 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1658 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1659 test_bit(HCI_UP, &hdev->flags)) {
1660 /* Execute vendor specific shutdown routine */
1661 if (hdev->shutdown)
1662 hdev->shutdown(hdev);
1665 cancel_delayed_work(&hdev->power_off);
1667 hci_req_cancel(hdev, ENODEV);
1668 hci_req_lock(hdev);
1670 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1671 cancel_delayed_work_sync(&hdev->cmd_timer);
1672 hci_req_unlock(hdev);
1673 return 0;
1676 /* Flush RX and TX works */
1677 flush_work(&hdev->tx_work);
1678 flush_work(&hdev->rx_work);
1680 if (hdev->discov_timeout > 0) {
1681 cancel_delayed_work(&hdev->discov_off);
1682 hdev->discov_timeout = 0;
1683 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1684 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1687 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1688 cancel_delayed_work(&hdev->service_cache);
1690 cancel_delayed_work_sync(&hdev->le_scan_disable);
1691 cancel_delayed_work_sync(&hdev->le_scan_restart);
1693 if (hci_dev_test_flag(hdev, HCI_MGMT))
1694 cancel_delayed_work_sync(&hdev->rpa_expired);
1696 if (hdev->adv_instance_timeout) {
1697 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1698 hdev->adv_instance_timeout = 0;
1701 /* Avoid potential lockdep warnings from the *_flush() calls by
1702 * ensuring the workqueue is empty up front.
1704 drain_workqueue(hdev->workqueue);
1706 hci_dev_lock(hdev);
1708 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1710 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1712 if (!auto_off && hdev->dev_type == HCI_BREDR)
1713 mgmt_powered(hdev, 0);
1715 hci_inquiry_cache_flush(hdev);
1716 hci_pend_le_actions_clear(hdev);
1717 hci_conn_hash_flush(hdev);
1718 hci_dev_unlock(hdev);
1720 smp_unregister(hdev);
1722 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1724 if (hdev->flush)
1725 hdev->flush(hdev);
1727 /* Reset device */
1728 skb_queue_purge(&hdev->cmd_q);
1729 atomic_set(&hdev->cmd_cnt, 1);
1730 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1731 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1732 set_bit(HCI_INIT, &hdev->flags);
1733 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1734 clear_bit(HCI_INIT, &hdev->flags);
1737 /* flush cmd work */
1738 flush_work(&hdev->cmd_work);
1740 /* Drop queues */
1741 skb_queue_purge(&hdev->rx_q);
1742 skb_queue_purge(&hdev->cmd_q);
1743 skb_queue_purge(&hdev->raw_q);
1745 /* Drop last sent command */
1746 if (hdev->sent_cmd) {
1747 cancel_delayed_work_sync(&hdev->cmd_timer);
1748 kfree_skb(hdev->sent_cmd);
1749 hdev->sent_cmd = NULL;
1752 clear_bit(HCI_RUNNING, &hdev->flags);
1753 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1755 /* After this point our queues are empty
1756 * and no tasks are scheduled. */
1757 hdev->close(hdev);
1759 /* Clear flags */
1760 hdev->flags &= BIT(HCI_RAW);
1761 hci_dev_clear_volatile_flags(hdev);
1763 /* Controller radio is available but is currently powered down */
1764 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1766 memset(hdev->eir, 0, sizeof(hdev->eir));
1767 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1768 bacpy(&hdev->random_addr, BDADDR_ANY);
1770 hci_req_unlock(hdev);
1772 hci_dev_put(hdev);
1773 return 0;
1776 int hci_dev_close(__u16 dev)
1778 struct hci_dev *hdev;
1779 int err;
1781 hdev = hci_dev_get(dev);
1782 if (!hdev)
1783 return -ENODEV;
1785 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1786 err = -EBUSY;
1787 goto done;
1790 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1791 cancel_delayed_work(&hdev->power_off);
1793 err = hci_dev_do_close(hdev);
1795 done:
1796 hci_dev_put(hdev);
1797 return err;
1800 static int hci_dev_do_reset(struct hci_dev *hdev)
1802 int ret;
1804 BT_DBG("%s %p", hdev->name, hdev);
1806 hci_req_lock(hdev);
1808 /* Drop queues */
1809 skb_queue_purge(&hdev->rx_q);
1810 skb_queue_purge(&hdev->cmd_q);
1812 /* Avoid potential lockdep warnings from the *_flush() calls by
1813 * ensuring the workqueue is empty up front.
1815 drain_workqueue(hdev->workqueue);
1817 hci_dev_lock(hdev);
1818 hci_inquiry_cache_flush(hdev);
1819 hci_conn_hash_flush(hdev);
1820 hci_dev_unlock(hdev);
1822 if (hdev->flush)
1823 hdev->flush(hdev);
1825 atomic_set(&hdev->cmd_cnt, 1);
1826 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1828 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1830 hci_req_unlock(hdev);
1831 return ret;
1834 int hci_dev_reset(__u16 dev)
1836 struct hci_dev *hdev;
1837 int err;
1839 hdev = hci_dev_get(dev);
1840 if (!hdev)
1841 return -ENODEV;
1843 if (!test_bit(HCI_UP, &hdev->flags)) {
1844 err = -ENETDOWN;
1845 goto done;
1848 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1849 err = -EBUSY;
1850 goto done;
1853 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1854 err = -EOPNOTSUPP;
1855 goto done;
1858 err = hci_dev_do_reset(hdev);
1860 done:
1861 hci_dev_put(hdev);
1862 return err;
1865 int hci_dev_reset_stat(__u16 dev)
1867 struct hci_dev *hdev;
1868 int ret = 0;
1870 hdev = hci_dev_get(dev);
1871 if (!hdev)
1872 return -ENODEV;
1874 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1875 ret = -EBUSY;
1876 goto done;
1879 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1880 ret = -EOPNOTSUPP;
1881 goto done;
1884 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1886 done:
1887 hci_dev_put(hdev);
1888 return ret;
1891 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1893 bool conn_changed, discov_changed;
1895 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1897 if ((scan & SCAN_PAGE))
1898 conn_changed = !hci_dev_test_and_set_flag(hdev,
1899 HCI_CONNECTABLE);
1900 else
1901 conn_changed = hci_dev_test_and_clear_flag(hdev,
1902 HCI_CONNECTABLE);
1904 if ((scan & SCAN_INQUIRY)) {
1905 discov_changed = !hci_dev_test_and_set_flag(hdev,
1906 HCI_DISCOVERABLE);
1907 } else {
1908 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1909 discov_changed = hci_dev_test_and_clear_flag(hdev,
1910 HCI_DISCOVERABLE);
1913 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1914 return;
1916 if (conn_changed || discov_changed) {
1917 /* In case this was disabled through mgmt */
1918 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1920 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1921 mgmt_update_adv_data(hdev);
1923 mgmt_new_settings(hdev);
1927 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1929 struct hci_dev *hdev;
1930 struct hci_dev_req dr;
1931 int err = 0;
1933 if (copy_from_user(&dr, arg, sizeof(dr)))
1934 return -EFAULT;
1936 hdev = hci_dev_get(dr.dev_id);
1937 if (!hdev)
1938 return -ENODEV;
1940 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1941 err = -EBUSY;
1942 goto done;
1945 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1946 err = -EOPNOTSUPP;
1947 goto done;
1950 if (hdev->dev_type != HCI_BREDR) {
1951 err = -EOPNOTSUPP;
1952 goto done;
1955 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1956 err = -EOPNOTSUPP;
1957 goto done;
1960 switch (cmd) {
1961 case HCISETAUTH:
1962 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1963 HCI_INIT_TIMEOUT);
1964 break;
1966 case HCISETENCRYPT:
1967 if (!lmp_encrypt_capable(hdev)) {
1968 err = -EOPNOTSUPP;
1969 break;
1972 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1973 /* Auth must be enabled first */
1974 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1975 HCI_INIT_TIMEOUT);
1976 if (err)
1977 break;
1980 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1981 HCI_INIT_TIMEOUT);
1982 break;
1984 case HCISETSCAN:
1985 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1986 HCI_INIT_TIMEOUT);
1988 /* Ensure that the connectable and discoverable states
1989 * get correctly modified as this was a non-mgmt change.
1991 if (!err)
1992 hci_update_scan_state(hdev, dr.dev_opt);
1993 break;
1995 case HCISETLINKPOL:
1996 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1997 HCI_INIT_TIMEOUT);
1998 break;
2000 case HCISETLINKMODE:
2001 hdev->link_mode = ((__u16) dr.dev_opt) &
2002 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2003 break;
2005 case HCISETPTYPE:
2006 hdev->pkt_type = (__u16) dr.dev_opt;
2007 break;
2009 case HCISETACLMTU:
2010 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2011 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2012 break;
2014 case HCISETSCOMTU:
2015 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2016 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2017 break;
2019 default:
2020 err = -EINVAL;
2021 break;
2024 done:
2025 hci_dev_put(hdev);
2026 return err;
2029 int hci_get_dev_list(void __user *arg)
2031 struct hci_dev *hdev;
2032 struct hci_dev_list_req *dl;
2033 struct hci_dev_req *dr;
2034 int n = 0, size, err;
2035 __u16 dev_num;
2037 if (get_user(dev_num, (__u16 __user *) arg))
2038 return -EFAULT;
2040 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2041 return -EINVAL;
2043 size = sizeof(*dl) + dev_num * sizeof(*dr);
2045 dl = kzalloc(size, GFP_KERNEL);
2046 if (!dl)
2047 return -ENOMEM;
2049 dr = dl->dev_req;
2051 read_lock(&hci_dev_list_lock);
2052 list_for_each_entry(hdev, &hci_dev_list, list) {
2053 unsigned long flags = hdev->flags;
2055 /* When the auto-off is configured it means the transport
2056 * is running, but in that case still indicate that the
2057 * device is actually down.
2059 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2060 flags &= ~BIT(HCI_UP);
2062 (dr + n)->dev_id = hdev->id;
2063 (dr + n)->dev_opt = flags;
2065 if (++n >= dev_num)
2066 break;
2068 read_unlock(&hci_dev_list_lock);
2070 dl->dev_num = n;
2071 size = sizeof(*dl) + n * sizeof(*dr);
2073 err = copy_to_user(arg, dl, size);
2074 kfree(dl);
2076 return err ? -EFAULT : 0;
2079 int hci_get_dev_info(void __user *arg)
2081 struct hci_dev *hdev;
2082 struct hci_dev_info di;
2083 unsigned long flags;
2084 int err = 0;
2086 if (copy_from_user(&di, arg, sizeof(di)))
2087 return -EFAULT;
2089 hdev = hci_dev_get(di.dev_id);
2090 if (!hdev)
2091 return -ENODEV;
2093 /* When the auto-off is configured it means the transport
2094 * is running, but in that case still indicate that the
2095 * device is actually down.
2097 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2098 flags = hdev->flags & ~BIT(HCI_UP);
2099 else
2100 flags = hdev->flags;
2102 strcpy(di.name, hdev->name);
2103 di.bdaddr = hdev->bdaddr;
2104 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2105 di.flags = flags;
2106 di.pkt_type = hdev->pkt_type;
2107 if (lmp_bredr_capable(hdev)) {
2108 di.acl_mtu = hdev->acl_mtu;
2109 di.acl_pkts = hdev->acl_pkts;
2110 di.sco_mtu = hdev->sco_mtu;
2111 di.sco_pkts = hdev->sco_pkts;
2112 } else {
2113 di.acl_mtu = hdev->le_mtu;
2114 di.acl_pkts = hdev->le_pkts;
2115 di.sco_mtu = 0;
2116 di.sco_pkts = 0;
2118 di.link_policy = hdev->link_policy;
2119 di.link_mode = hdev->link_mode;
2121 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2122 memcpy(&di.features, &hdev->features, sizeof(di.features));
2124 if (copy_to_user(arg, &di, sizeof(di)))
2125 err = -EFAULT;
2127 hci_dev_put(hdev);
2129 return err;
2132 /* ---- Interface to HCI drivers ---- */
2134 static int hci_rfkill_set_block(void *data, bool blocked)
2136 struct hci_dev *hdev = data;
2138 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2140 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2141 return -EBUSY;
2143 if (blocked) {
2144 hci_dev_set_flag(hdev, HCI_RFKILLED);
2145 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2146 !hci_dev_test_flag(hdev, HCI_CONFIG))
2147 hci_dev_do_close(hdev);
2148 } else {
2149 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2152 return 0;
2155 static const struct rfkill_ops hci_rfkill_ops = {
2156 .set_block = hci_rfkill_set_block,
2159 static void hci_power_on(struct work_struct *work)
2161 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2162 int err;
2164 BT_DBG("%s", hdev->name);
2166 err = hci_dev_do_open(hdev);
2167 if (err < 0) {
2168 hci_dev_lock(hdev);
2169 mgmt_set_powered_failed(hdev, err);
2170 hci_dev_unlock(hdev);
2171 return;
2174 /* During the HCI setup phase, a few error conditions are
2175 * ignored and they need to be checked now. If they are still
2176 * valid, it is important to turn the device back off.
2178 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2179 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2180 (hdev->dev_type == HCI_BREDR &&
2181 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2182 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2183 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2184 hci_dev_do_close(hdev);
2185 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2186 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2187 HCI_AUTO_OFF_TIMEOUT);
2190 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2191 /* For unconfigured devices, set the HCI_RAW flag
2192 * so that userspace can easily identify them.
2194 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2195 set_bit(HCI_RAW, &hdev->flags);
2197 /* For fully configured devices, this will send
2198 * the Index Added event. For unconfigured devices,
2199 * it will send Unconfigued Index Added event.
2201 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2202 * and no event will be send.
2204 mgmt_index_added(hdev);
2205 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2206 /* When the controller is now configured, then it
2207 * is important to clear the HCI_RAW flag.
2209 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2210 clear_bit(HCI_RAW, &hdev->flags);
2212 /* Powering on the controller with HCI_CONFIG set only
2213 * happens with the transition from unconfigured to
2214 * configured. This will send the Index Added event.
2216 mgmt_index_added(hdev);
2220 static void hci_power_off(struct work_struct *work)
2222 struct hci_dev *hdev = container_of(work, struct hci_dev,
2223 power_off.work);
2225 BT_DBG("%s", hdev->name);
2227 hci_dev_do_close(hdev);
2230 static void hci_error_reset(struct work_struct *work)
2232 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2234 BT_DBG("%s", hdev->name);
2236 if (hdev->hw_error)
2237 hdev->hw_error(hdev, hdev->hw_error_code);
2238 else
2239 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2240 hdev->hw_error_code);
2242 if (hci_dev_do_close(hdev))
2243 return;
2245 hci_dev_do_open(hdev);
2248 static void hci_discov_off(struct work_struct *work)
2250 struct hci_dev *hdev;
2252 hdev = container_of(work, struct hci_dev, discov_off.work);
2254 BT_DBG("%s", hdev->name);
2256 mgmt_discoverable_timeout(hdev);
2259 static void hci_adv_timeout_expire(struct work_struct *work)
2261 struct hci_dev *hdev;
2263 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2265 BT_DBG("%s", hdev->name);
2267 mgmt_adv_timeout_expired(hdev);
2270 void hci_uuids_clear(struct hci_dev *hdev)
2272 struct bt_uuid *uuid, *tmp;
2274 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2275 list_del(&uuid->list);
2276 kfree(uuid);
2280 void hci_link_keys_clear(struct hci_dev *hdev)
2282 struct link_key *key;
2284 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2285 list_del_rcu(&key->list);
2286 kfree_rcu(key, rcu);
2290 void hci_smp_ltks_clear(struct hci_dev *hdev)
2292 struct smp_ltk *k;
2294 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2295 list_del_rcu(&k->list);
2296 kfree_rcu(k, rcu);
2300 void hci_smp_irks_clear(struct hci_dev *hdev)
2302 struct smp_irk *k;
2304 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2305 list_del_rcu(&k->list);
2306 kfree_rcu(k, rcu);
2310 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2312 struct link_key *k;
2314 rcu_read_lock();
2315 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2316 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2317 rcu_read_unlock();
2318 return k;
2321 rcu_read_unlock();
2323 return NULL;
2326 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2327 u8 key_type, u8 old_key_type)
2329 /* Legacy key */
2330 if (key_type < 0x03)
2331 return true;
2333 /* Debug keys are insecure so don't store them persistently */
2334 if (key_type == HCI_LK_DEBUG_COMBINATION)
2335 return false;
2337 /* Changed combination key and there's no previous one */
2338 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2339 return false;
2341 /* Security mode 3 case */
2342 if (!conn)
2343 return true;
2345 /* BR/EDR key derived using SC from an LE link */
2346 if (conn->type == LE_LINK)
2347 return true;
2349 /* Neither local nor remote side had no-bonding as requirement */
2350 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2351 return true;
2353 /* Local side had dedicated bonding as requirement */
2354 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2355 return true;
2357 /* Remote side had dedicated bonding as requirement */
2358 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2359 return true;
2361 /* If none of the above criteria match, then don't store the key
2362 * persistently */
2363 return false;
2366 static u8 ltk_role(u8 type)
2368 if (type == SMP_LTK)
2369 return HCI_ROLE_MASTER;
2371 return HCI_ROLE_SLAVE;
2374 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2375 u8 addr_type, u8 role)
2377 struct smp_ltk *k;
2379 rcu_read_lock();
2380 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2381 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2382 continue;
2384 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2385 rcu_read_unlock();
2386 return k;
2389 rcu_read_unlock();
2391 return NULL;
2394 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2396 struct smp_irk *irk;
2398 rcu_read_lock();
2399 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2400 if (!bacmp(&irk->rpa, rpa)) {
2401 rcu_read_unlock();
2402 return irk;
2406 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2407 if (smp_irk_matches(hdev, irk->val, rpa)) {
2408 bacpy(&irk->rpa, rpa);
2409 rcu_read_unlock();
2410 return irk;
2413 rcu_read_unlock();
2415 return NULL;
2418 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2419 u8 addr_type)
2421 struct smp_irk *irk;
2423 /* Identity Address must be public or static random */
2424 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2425 return NULL;
2427 rcu_read_lock();
2428 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2429 if (addr_type == irk->addr_type &&
2430 bacmp(bdaddr, &irk->bdaddr) == 0) {
2431 rcu_read_unlock();
2432 return irk;
2435 rcu_read_unlock();
2437 return NULL;
2440 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2441 bdaddr_t *bdaddr, u8 *val, u8 type,
2442 u8 pin_len, bool *persistent)
2444 struct link_key *key, *old_key;
2445 u8 old_key_type;
2447 old_key = hci_find_link_key(hdev, bdaddr);
2448 if (old_key) {
2449 old_key_type = old_key->type;
2450 key = old_key;
2451 } else {
2452 old_key_type = conn ? conn->key_type : 0xff;
2453 key = kzalloc(sizeof(*key), GFP_KERNEL);
2454 if (!key)
2455 return NULL;
2456 list_add_rcu(&key->list, &hdev->link_keys);
2459 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2461 /* Some buggy controller combinations generate a changed
2462 * combination key for legacy pairing even when there's no
2463 * previous key */
2464 if (type == HCI_LK_CHANGED_COMBINATION &&
2465 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2466 type = HCI_LK_COMBINATION;
2467 if (conn)
2468 conn->key_type = type;
2471 bacpy(&key->bdaddr, bdaddr);
2472 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2473 key->pin_len = pin_len;
2475 if (type == HCI_LK_CHANGED_COMBINATION)
2476 key->type = old_key_type;
2477 else
2478 key->type = type;
2480 if (persistent)
2481 *persistent = hci_persistent_key(hdev, conn, type,
2482 old_key_type);
2484 return key;
2487 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2488 u8 addr_type, u8 type, u8 authenticated,
2489 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2491 struct smp_ltk *key, *old_key;
2492 u8 role = ltk_role(type);
2494 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2495 if (old_key)
2496 key = old_key;
2497 else {
2498 key = kzalloc(sizeof(*key), GFP_KERNEL);
2499 if (!key)
2500 return NULL;
2501 list_add_rcu(&key->list, &hdev->long_term_keys);
2504 bacpy(&key->bdaddr, bdaddr);
2505 key->bdaddr_type = addr_type;
2506 memcpy(key->val, tk, sizeof(key->val));
2507 key->authenticated = authenticated;
2508 key->ediv = ediv;
2509 key->rand = rand;
2510 key->enc_size = enc_size;
2511 key->type = type;
2513 return key;
2516 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2517 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2519 struct smp_irk *irk;
2521 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2522 if (!irk) {
2523 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2524 if (!irk)
2525 return NULL;
2527 bacpy(&irk->bdaddr, bdaddr);
2528 irk->addr_type = addr_type;
2530 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2533 memcpy(irk->val, val, 16);
2534 bacpy(&irk->rpa, rpa);
2536 return irk;
2539 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2541 struct link_key *key;
2543 key = hci_find_link_key(hdev, bdaddr);
2544 if (!key)
2545 return -ENOENT;
2547 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2549 list_del_rcu(&key->list);
2550 kfree_rcu(key, rcu);
2552 return 0;
2555 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2557 struct smp_ltk *k;
2558 int removed = 0;
2560 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2561 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2562 continue;
2564 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2566 list_del_rcu(&k->list);
2567 kfree_rcu(k, rcu);
2568 removed++;
2571 return removed ? 0 : -ENOENT;
2574 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2576 struct smp_irk *k;
2578 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2579 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2580 continue;
2582 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2584 list_del_rcu(&k->list);
2585 kfree_rcu(k, rcu);
2589 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2591 struct smp_ltk *k;
2592 struct smp_irk *irk;
2593 u8 addr_type;
2595 if (type == BDADDR_BREDR) {
2596 if (hci_find_link_key(hdev, bdaddr))
2597 return true;
2598 return false;
2601 /* Convert to HCI addr type which struct smp_ltk uses */
2602 if (type == BDADDR_LE_PUBLIC)
2603 addr_type = ADDR_LE_DEV_PUBLIC;
2604 else
2605 addr_type = ADDR_LE_DEV_RANDOM;
2607 irk = hci_get_irk(hdev, bdaddr, addr_type);
2608 if (irk) {
2609 bdaddr = &irk->bdaddr;
2610 addr_type = irk->addr_type;
2613 rcu_read_lock();
2614 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2615 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2616 rcu_read_unlock();
2617 return true;
2620 rcu_read_unlock();
2622 return false;
2625 /* HCI command timer function */
2626 static void hci_cmd_timeout(struct work_struct *work)
2628 struct hci_dev *hdev = container_of(work, struct hci_dev,
2629 cmd_timer.work);
2631 if (hdev->sent_cmd) {
2632 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2633 u16 opcode = __le16_to_cpu(sent->opcode);
2635 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2636 } else {
2637 BT_ERR("%s command tx timeout", hdev->name);
2640 atomic_set(&hdev->cmd_cnt, 1);
2641 queue_work(hdev->workqueue, &hdev->cmd_work);
2644 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2645 bdaddr_t *bdaddr, u8 bdaddr_type)
2647 struct oob_data *data;
2649 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2650 if (bacmp(bdaddr, &data->bdaddr) != 0)
2651 continue;
2652 if (data->bdaddr_type != bdaddr_type)
2653 continue;
2654 return data;
2657 return NULL;
2660 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 bdaddr_type)
2663 struct oob_data *data;
2665 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2666 if (!data)
2667 return -ENOENT;
2669 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2671 list_del(&data->list);
2672 kfree(data);
2674 return 0;
2677 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2679 struct oob_data *data, *n;
2681 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2682 list_del(&data->list);
2683 kfree(data);
2687 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2688 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2689 u8 *hash256, u8 *rand256)
2691 struct oob_data *data;
2693 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2694 if (!data) {
2695 data = kmalloc(sizeof(*data), GFP_KERNEL);
2696 if (!data)
2697 return -ENOMEM;
2699 bacpy(&data->bdaddr, bdaddr);
2700 data->bdaddr_type = bdaddr_type;
2701 list_add(&data->list, &hdev->remote_oob_data);
2704 if (hash192 && rand192) {
2705 memcpy(data->hash192, hash192, sizeof(data->hash192));
2706 memcpy(data->rand192, rand192, sizeof(data->rand192));
2707 if (hash256 && rand256)
2708 data->present = 0x03;
2709 } else {
2710 memset(data->hash192, 0, sizeof(data->hash192));
2711 memset(data->rand192, 0, sizeof(data->rand192));
2712 if (hash256 && rand256)
2713 data->present = 0x02;
2714 else
2715 data->present = 0x00;
2718 if (hash256 && rand256) {
2719 memcpy(data->hash256, hash256, sizeof(data->hash256));
2720 memcpy(data->rand256, rand256, sizeof(data->rand256));
2721 } else {
2722 memset(data->hash256, 0, sizeof(data->hash256));
2723 memset(data->rand256, 0, sizeof(data->rand256));
2724 if (hash192 && rand192)
2725 data->present = 0x01;
2728 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2730 return 0;
2733 /* This function requires the caller holds hdev->lock */
2734 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2736 struct adv_info *adv_instance;
2738 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2739 if (adv_instance->instance == instance)
2740 return adv_instance;
2743 return NULL;
2746 /* This function requires the caller holds hdev->lock */
2747 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2748 struct adv_info *cur_instance;
2750 cur_instance = hci_find_adv_instance(hdev, instance);
2751 if (!cur_instance)
2752 return NULL;
2754 if (cur_instance == list_last_entry(&hdev->adv_instances,
2755 struct adv_info, list))
2756 return list_first_entry(&hdev->adv_instances,
2757 struct adv_info, list);
2758 else
2759 return list_next_entry(cur_instance, list);
2762 /* This function requires the caller holds hdev->lock */
2763 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2765 struct adv_info *adv_instance;
2767 adv_instance = hci_find_adv_instance(hdev, instance);
2768 if (!adv_instance)
2769 return -ENOENT;
2771 BT_DBG("%s removing %dMR", hdev->name, instance);
2773 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2774 cancel_delayed_work(&hdev->adv_instance_expire);
2775 hdev->adv_instance_timeout = 0;
2778 list_del(&adv_instance->list);
2779 kfree(adv_instance);
2781 hdev->adv_instance_cnt--;
2783 return 0;
2786 /* This function requires the caller holds hdev->lock */
2787 void hci_adv_instances_clear(struct hci_dev *hdev)
2789 struct adv_info *adv_instance, *n;
2791 if (hdev->adv_instance_timeout) {
2792 cancel_delayed_work(&hdev->adv_instance_expire);
2793 hdev->adv_instance_timeout = 0;
2796 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2797 list_del(&adv_instance->list);
2798 kfree(adv_instance);
2801 hdev->adv_instance_cnt = 0;
2804 /* This function requires the caller holds hdev->lock */
2805 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2806 u16 adv_data_len, u8 *adv_data,
2807 u16 scan_rsp_len, u8 *scan_rsp_data,
2808 u16 timeout, u16 duration)
2810 struct adv_info *adv_instance;
2812 adv_instance = hci_find_adv_instance(hdev, instance);
2813 if (adv_instance) {
2814 memset(adv_instance->adv_data, 0,
2815 sizeof(adv_instance->adv_data));
2816 memset(adv_instance->scan_rsp_data, 0,
2817 sizeof(adv_instance->scan_rsp_data));
2818 } else {
2819 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2820 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2821 return -EOVERFLOW;
2823 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2824 if (!adv_instance)
2825 return -ENOMEM;
2827 adv_instance->pending = true;
2828 adv_instance->instance = instance;
2829 list_add(&adv_instance->list, &hdev->adv_instances);
2830 hdev->adv_instance_cnt++;
2833 adv_instance->flags = flags;
2834 adv_instance->adv_data_len = adv_data_len;
2835 adv_instance->scan_rsp_len = scan_rsp_len;
2837 if (adv_data_len)
2838 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2840 if (scan_rsp_len)
2841 memcpy(adv_instance->scan_rsp_data,
2842 scan_rsp_data, scan_rsp_len);
2844 adv_instance->timeout = timeout;
2845 adv_instance->remaining_time = timeout;
2847 if (duration == 0)
2848 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2849 else
2850 adv_instance->duration = duration;
2852 BT_DBG("%s for %dMR", hdev->name, instance);
2854 return 0;
2857 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2858 bdaddr_t *bdaddr, u8 type)
2860 struct bdaddr_list *b;
2862 list_for_each_entry(b, bdaddr_list, list) {
2863 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2864 return b;
2867 return NULL;
2870 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2872 struct list_head *p, *n;
2874 list_for_each_safe(p, n, bdaddr_list) {
2875 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2877 list_del(p);
2878 kfree(b);
2882 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2884 struct bdaddr_list *entry;
2886 if (!bacmp(bdaddr, BDADDR_ANY))
2887 return -EBADF;
2889 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2890 return -EEXIST;
2892 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2893 if (!entry)
2894 return -ENOMEM;
2896 bacpy(&entry->bdaddr, bdaddr);
2897 entry->bdaddr_type = type;
2899 list_add(&entry->list, list);
2901 return 0;
2904 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2906 struct bdaddr_list *entry;
2908 if (!bacmp(bdaddr, BDADDR_ANY)) {
2909 hci_bdaddr_list_clear(list);
2910 return 0;
2913 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2914 if (!entry)
2915 return -ENOENT;
2917 list_del(&entry->list);
2918 kfree(entry);
2920 return 0;
2923 /* This function requires the caller holds hdev->lock */
2924 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2925 bdaddr_t *addr, u8 addr_type)
2927 struct hci_conn_params *params;
2929 list_for_each_entry(params, &hdev->le_conn_params, list) {
2930 if (bacmp(&params->addr, addr) == 0 &&
2931 params->addr_type == addr_type) {
2932 return params;
2936 return NULL;
2939 /* This function requires the caller holds hdev->lock */
2940 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2941 bdaddr_t *addr, u8 addr_type)
2943 struct hci_conn_params *param;
2945 list_for_each_entry(param, list, action) {
2946 if (bacmp(&param->addr, addr) == 0 &&
2947 param->addr_type == addr_type)
2948 return param;
2951 return NULL;
2954 /* This function requires the caller holds hdev->lock */
2955 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2956 bdaddr_t *addr, u8 addr_type)
2958 struct hci_conn_params *params;
2960 params = hci_conn_params_lookup(hdev, addr, addr_type);
2961 if (params)
2962 return params;
2964 params = kzalloc(sizeof(*params), GFP_KERNEL);
2965 if (!params) {
2966 BT_ERR("Out of memory");
2967 return NULL;
2970 bacpy(&params->addr, addr);
2971 params->addr_type = addr_type;
2973 list_add(&params->list, &hdev->le_conn_params);
2974 INIT_LIST_HEAD(&params->action);
2976 params->conn_min_interval = hdev->le_conn_min_interval;
2977 params->conn_max_interval = hdev->le_conn_max_interval;
2978 params->conn_latency = hdev->le_conn_latency;
2979 params->supervision_timeout = hdev->le_supv_timeout;
2980 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2982 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2984 return params;
2987 static void hci_conn_params_free(struct hci_conn_params *params)
2989 if (params->conn) {
2990 hci_conn_drop(params->conn);
2991 hci_conn_put(params->conn);
2994 list_del(&params->action);
2995 list_del(&params->list);
2996 kfree(params);
2999 /* This function requires the caller holds hdev->lock */
3000 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3002 struct hci_conn_params *params;
3004 params = hci_conn_params_lookup(hdev, addr, addr_type);
3005 if (!params)
3006 return;
3008 hci_conn_params_free(params);
3010 hci_update_background_scan(hdev);
3012 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3015 /* This function requires the caller holds hdev->lock */
3016 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3018 struct hci_conn_params *params, *tmp;
3020 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3021 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3022 continue;
3024 /* If trying to estabilish one time connection to disabled
3025 * device, leave the params, but mark them as just once.
3027 if (params->explicit_connect) {
3028 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3029 continue;
3032 list_del(&params->list);
3033 kfree(params);
3036 BT_DBG("All LE disabled connection parameters were removed");
3039 /* This function requires the caller holds hdev->lock */
3040 void hci_conn_params_clear_all(struct hci_dev *hdev)
3042 struct hci_conn_params *params, *tmp;
3044 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3045 hci_conn_params_free(params);
3047 hci_update_background_scan(hdev);
3049 BT_DBG("All LE connection parameters were removed");
3052 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3054 if (status) {
3055 BT_ERR("Failed to start inquiry: status %d", status);
3057 hci_dev_lock(hdev);
3058 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3059 hci_dev_unlock(hdev);
3060 return;
3064 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3065 u16 opcode)
3067 /* General inquiry access code (GIAC) */
3068 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3069 struct hci_cp_inquiry cp;
3070 int err;
3072 if (status) {
3073 BT_ERR("Failed to disable LE scanning: status %d", status);
3074 return;
3077 hdev->discovery.scan_start = 0;
3079 switch (hdev->discovery.type) {
3080 case DISCOV_TYPE_LE:
3081 hci_dev_lock(hdev);
3082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3083 hci_dev_unlock(hdev);
3084 break;
3086 case DISCOV_TYPE_INTERLEAVED:
3087 hci_dev_lock(hdev);
3089 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3090 &hdev->quirks)) {
3091 /* If we were running LE only scan, change discovery
3092 * state. If we were running both LE and BR/EDR inquiry
3093 * simultaneously, and BR/EDR inquiry is already
3094 * finished, stop discovery, otherwise BR/EDR inquiry
3095 * will stop discovery when finished. If we will resolve
3096 * remote device name, do not change discovery state.
3098 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3099 hdev->discovery.state != DISCOVERY_RESOLVING)
3100 hci_discovery_set_state(hdev,
3101 DISCOVERY_STOPPED);
3102 } else {
3103 struct hci_request req;
3105 hci_inquiry_cache_flush(hdev);
3107 hci_req_init(&req, hdev);
3109 memset(&cp, 0, sizeof(cp));
3110 memcpy(&cp.lap, lap, sizeof(cp.lap));
3111 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3112 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3114 err = hci_req_run(&req, inquiry_complete);
3115 if (err) {
3116 BT_ERR("Inquiry request failed: err %d", err);
3117 hci_discovery_set_state(hdev,
3118 DISCOVERY_STOPPED);
3122 hci_dev_unlock(hdev);
3123 break;
3127 static void le_scan_disable_work(struct work_struct *work)
3129 struct hci_dev *hdev = container_of(work, struct hci_dev,
3130 le_scan_disable.work);
3131 struct hci_request req;
3132 int err;
3134 BT_DBG("%s", hdev->name);
3136 cancel_delayed_work_sync(&hdev->le_scan_restart);
3138 hci_req_init(&req, hdev);
3140 hci_req_add_le_scan_disable(&req);
3142 err = hci_req_run(&req, le_scan_disable_work_complete);
3143 if (err)
3144 BT_ERR("Disable LE scanning request failed: err %d", err);
3147 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3148 u16 opcode)
3150 unsigned long timeout, duration, scan_start, now;
3152 BT_DBG("%s", hdev->name);
3154 if (status) {
3155 BT_ERR("Failed to restart LE scan: status %d", status);
3156 return;
3159 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3160 !hdev->discovery.scan_start)
3161 return;
3163 /* When the scan was started, hdev->le_scan_disable has been queued
3164 * after duration from scan_start. During scan restart this job
3165 * has been canceled, and we need to queue it again after proper
3166 * timeout, to make sure that scan does not run indefinitely.
3168 duration = hdev->discovery.scan_duration;
3169 scan_start = hdev->discovery.scan_start;
3170 now = jiffies;
3171 if (now - scan_start <= duration) {
3172 int elapsed;
3174 if (now >= scan_start)
3175 elapsed = now - scan_start;
3176 else
3177 elapsed = ULONG_MAX - scan_start + now;
3179 timeout = duration - elapsed;
3180 } else {
3181 timeout = 0;
3183 queue_delayed_work(hdev->workqueue,
3184 &hdev->le_scan_disable, timeout);
3187 static void le_scan_restart_work(struct work_struct *work)
3189 struct hci_dev *hdev = container_of(work, struct hci_dev,
3190 le_scan_restart.work);
3191 struct hci_request req;
3192 struct hci_cp_le_set_scan_enable cp;
3193 int err;
3195 BT_DBG("%s", hdev->name);
3197 /* If controller is not scanning we are done. */
3198 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3199 return;
3201 hci_req_init(&req, hdev);
3203 hci_req_add_le_scan_disable(&req);
3205 memset(&cp, 0, sizeof(cp));
3206 cp.enable = LE_SCAN_ENABLE;
3207 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3208 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3210 err = hci_req_run(&req, le_scan_restart_work_complete);
3211 if (err)
3212 BT_ERR("Restart LE scan request failed: err %d", err);
3215 /* Copy the Identity Address of the controller.
3217 * If the controller has a public BD_ADDR, then by default use that one.
3218 * If this is a LE only controller without a public address, default to
3219 * the static random address.
3221 * For debugging purposes it is possible to force controllers with a
3222 * public address to use the static random address instead.
3224 * In case BR/EDR has been disabled on a dual-mode controller and
3225 * userspace has configured a static address, then that address
3226 * becomes the identity address instead of the public BR/EDR address.
3228 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3229 u8 *bdaddr_type)
3231 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3232 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3233 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3234 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3235 bacpy(bdaddr, &hdev->static_addr);
3236 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3237 } else {
3238 bacpy(bdaddr, &hdev->bdaddr);
3239 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3243 /* Alloc HCI device */
3244 struct hci_dev *hci_alloc_dev(void)
3246 struct hci_dev *hdev;
3248 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3249 if (!hdev)
3250 return NULL;
3252 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3253 hdev->esco_type = (ESCO_HV1);
3254 hdev->link_mode = (HCI_LM_ACCEPT);
3255 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3256 hdev->io_capability = 0x03; /* No Input No Output */
3257 hdev->manufacturer = 0xffff; /* Default to internal use */
3258 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3259 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3260 hdev->adv_instance_cnt = 0;
3261 hdev->cur_adv_instance = 0x00;
3262 hdev->adv_instance_timeout = 0;
3264 hdev->sniff_max_interval = 800;
3265 hdev->sniff_min_interval = 80;
3267 hdev->le_adv_channel_map = 0x07;
3268 hdev->le_adv_min_interval = 0x0800;
3269 hdev->le_adv_max_interval = 0x0800;
3270 hdev->le_scan_interval = 0x0060;
3271 hdev->le_scan_window = 0x0030;
3272 hdev->le_conn_min_interval = 0x0028;
3273 hdev->le_conn_max_interval = 0x0038;
3274 hdev->le_conn_latency = 0x0000;
3275 hdev->le_supv_timeout = 0x002a;
3276 hdev->le_def_tx_len = 0x001b;
3277 hdev->le_def_tx_time = 0x0148;
3278 hdev->le_max_tx_len = 0x001b;
3279 hdev->le_max_tx_time = 0x0148;
3280 hdev->le_max_rx_len = 0x001b;
3281 hdev->le_max_rx_time = 0x0148;
3283 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3284 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3285 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3286 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3288 mutex_init(&hdev->lock);
3289 mutex_init(&hdev->req_lock);
3291 INIT_LIST_HEAD(&hdev->mgmt_pending);
3292 INIT_LIST_HEAD(&hdev->blacklist);
3293 INIT_LIST_HEAD(&hdev->whitelist);
3294 INIT_LIST_HEAD(&hdev->uuids);
3295 INIT_LIST_HEAD(&hdev->link_keys);
3296 INIT_LIST_HEAD(&hdev->long_term_keys);
3297 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3298 INIT_LIST_HEAD(&hdev->remote_oob_data);
3299 INIT_LIST_HEAD(&hdev->le_white_list);
3300 INIT_LIST_HEAD(&hdev->le_conn_params);
3301 INIT_LIST_HEAD(&hdev->pend_le_conns);
3302 INIT_LIST_HEAD(&hdev->pend_le_reports);
3303 INIT_LIST_HEAD(&hdev->conn_hash.list);
3304 INIT_LIST_HEAD(&hdev->adv_instances);
3306 INIT_WORK(&hdev->rx_work, hci_rx_work);
3307 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3308 INIT_WORK(&hdev->tx_work, hci_tx_work);
3309 INIT_WORK(&hdev->power_on, hci_power_on);
3310 INIT_WORK(&hdev->error_reset, hci_error_reset);
3312 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3313 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3314 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3315 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3316 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3318 skb_queue_head_init(&hdev->rx_q);
3319 skb_queue_head_init(&hdev->cmd_q);
3320 skb_queue_head_init(&hdev->raw_q);
3322 init_waitqueue_head(&hdev->req_wait_q);
3324 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3326 hci_init_sysfs(hdev);
3327 discovery_init(hdev);
3329 return hdev;
3331 EXPORT_SYMBOL(hci_alloc_dev);
3333 /* Free HCI device */
3334 void hci_free_dev(struct hci_dev *hdev)
3336 /* will free via device release */
3337 put_device(&hdev->dev);
3339 EXPORT_SYMBOL(hci_free_dev);
3341 /* Register HCI device */
3342 int hci_register_dev(struct hci_dev *hdev)
3344 int id, error;
3346 if (!hdev->open || !hdev->close || !hdev->send)
3347 return -EINVAL;
3349 /* Do not allow HCI_AMP devices to register at index 0,
3350 * so the index can be used as the AMP controller ID.
3352 switch (hdev->dev_type) {
3353 case HCI_BREDR:
3354 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3355 break;
3356 case HCI_AMP:
3357 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3358 break;
3359 default:
3360 return -EINVAL;
3363 if (id < 0)
3364 return id;
3366 sprintf(hdev->name, "hci%d", id);
3367 hdev->id = id;
3369 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3371 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3372 WQ_MEM_RECLAIM, 1, hdev->name);
3373 if (!hdev->workqueue) {
3374 error = -ENOMEM;
3375 goto err;
3378 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3379 WQ_MEM_RECLAIM, 1, hdev->name);
3380 if (!hdev->req_workqueue) {
3381 destroy_workqueue(hdev->workqueue);
3382 error = -ENOMEM;
3383 goto err;
3386 if (!IS_ERR_OR_NULL(bt_debugfs))
3387 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3389 dev_set_name(&hdev->dev, "%s", hdev->name);
3391 error = device_add(&hdev->dev);
3392 if (error < 0)
3393 goto err_wqueue;
3395 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3396 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3397 hdev);
3398 if (hdev->rfkill) {
3399 if (rfkill_register(hdev->rfkill) < 0) {
3400 rfkill_destroy(hdev->rfkill);
3401 hdev->rfkill = NULL;
3405 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3406 hci_dev_set_flag(hdev, HCI_RFKILLED);
3408 hci_dev_set_flag(hdev, HCI_SETUP);
3409 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3411 if (hdev->dev_type == HCI_BREDR) {
3412 /* Assume BR/EDR support until proven otherwise (such as
3413 * through reading supported features during init.
3415 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3418 write_lock(&hci_dev_list_lock);
3419 list_add(&hdev->list, &hci_dev_list);
3420 write_unlock(&hci_dev_list_lock);
3422 /* Devices that are marked for raw-only usage are unconfigured
3423 * and should not be included in normal operation.
3425 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3426 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3428 hci_sock_dev_event(hdev, HCI_DEV_REG);
3429 hci_dev_hold(hdev);
3431 queue_work(hdev->req_workqueue, &hdev->power_on);
3433 return id;
3435 err_wqueue:
3436 destroy_workqueue(hdev->workqueue);
3437 destroy_workqueue(hdev->req_workqueue);
3438 err:
3439 ida_simple_remove(&hci_index_ida, hdev->id);
3441 return error;
3443 EXPORT_SYMBOL(hci_register_dev);
3445 /* Unregister HCI device */
3446 void hci_unregister_dev(struct hci_dev *hdev)
3448 int id;
3450 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3452 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3454 id = hdev->id;
3456 write_lock(&hci_dev_list_lock);
3457 list_del(&hdev->list);
3458 write_unlock(&hci_dev_list_lock);
3460 hci_dev_do_close(hdev);
3462 cancel_work_sync(&hdev->power_on);
3464 if (!test_bit(HCI_INIT, &hdev->flags) &&
3465 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3466 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3467 hci_dev_lock(hdev);
3468 mgmt_index_removed(hdev);
3469 hci_dev_unlock(hdev);
3472 /* mgmt_index_removed should take care of emptying the
3473 * pending list */
3474 BUG_ON(!list_empty(&hdev->mgmt_pending));
3476 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3478 if (hdev->rfkill) {
3479 rfkill_unregister(hdev->rfkill);
3480 rfkill_destroy(hdev->rfkill);
3483 device_del(&hdev->dev);
3485 debugfs_remove_recursive(hdev->debugfs);
3487 destroy_workqueue(hdev->workqueue);
3488 destroy_workqueue(hdev->req_workqueue);
3490 hci_dev_lock(hdev);
3491 hci_bdaddr_list_clear(&hdev->blacklist);
3492 hci_bdaddr_list_clear(&hdev->whitelist);
3493 hci_uuids_clear(hdev);
3494 hci_link_keys_clear(hdev);
3495 hci_smp_ltks_clear(hdev);
3496 hci_smp_irks_clear(hdev);
3497 hci_remote_oob_data_clear(hdev);
3498 hci_adv_instances_clear(hdev);
3499 hci_bdaddr_list_clear(&hdev->le_white_list);
3500 hci_conn_params_clear_all(hdev);
3501 hci_discovery_filter_clear(hdev);
3502 hci_dev_unlock(hdev);
3504 hci_dev_put(hdev);
3506 ida_simple_remove(&hci_index_ida, id);
3508 EXPORT_SYMBOL(hci_unregister_dev);
3510 /* Suspend HCI device */
3511 int hci_suspend_dev(struct hci_dev *hdev)
3513 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3514 return 0;
3516 EXPORT_SYMBOL(hci_suspend_dev);
3518 /* Resume HCI device */
3519 int hci_resume_dev(struct hci_dev *hdev)
3521 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3522 return 0;
3524 EXPORT_SYMBOL(hci_resume_dev);
3526 /* Reset HCI device */
3527 int hci_reset_dev(struct hci_dev *hdev)
3529 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3530 struct sk_buff *skb;
3532 skb = bt_skb_alloc(3, GFP_ATOMIC);
3533 if (!skb)
3534 return -ENOMEM;
3536 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3537 memcpy(skb_put(skb, 3), hw_err, 3);
3539 /* Send Hardware Error to upper stack */
3540 return hci_recv_frame(hdev, skb);
3542 EXPORT_SYMBOL(hci_reset_dev);
3544 /* Receive frame from HCI drivers */
3545 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3547 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3548 && !test_bit(HCI_INIT, &hdev->flags))) {
3549 kfree_skb(skb);
3550 return -ENXIO;
3553 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3554 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3555 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3556 kfree_skb(skb);
3557 return -EINVAL;
3560 /* Incoming skb */
3561 bt_cb(skb)->incoming = 1;
3563 /* Time stamp */
3564 __net_timestamp(skb);
3566 skb_queue_tail(&hdev->rx_q, skb);
3567 queue_work(hdev->workqueue, &hdev->rx_work);
3569 return 0;
3571 EXPORT_SYMBOL(hci_recv_frame);
3573 /* Receive diagnostic message from HCI drivers */
3574 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3576 /* Mark as diagnostic packet */
3577 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3579 /* Time stamp */
3580 __net_timestamp(skb);
3582 skb_queue_tail(&hdev->rx_q, skb);
3583 queue_work(hdev->workqueue, &hdev->rx_work);
3585 return 0;
3587 EXPORT_SYMBOL(hci_recv_diag);
3589 /* ---- Interface to upper protocols ---- */
3591 int hci_register_cb(struct hci_cb *cb)
3593 BT_DBG("%p name %s", cb, cb->name);
3595 mutex_lock(&hci_cb_list_lock);
3596 list_add_tail(&cb->list, &hci_cb_list);
3597 mutex_unlock(&hci_cb_list_lock);
3599 return 0;
3601 EXPORT_SYMBOL(hci_register_cb);
3603 int hci_unregister_cb(struct hci_cb *cb)
3605 BT_DBG("%p name %s", cb, cb->name);
3607 mutex_lock(&hci_cb_list_lock);
3608 list_del(&cb->list);
3609 mutex_unlock(&hci_cb_list_lock);
3611 return 0;
3613 EXPORT_SYMBOL(hci_unregister_cb);
3615 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3617 int err;
3619 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3621 /* Time stamp */
3622 __net_timestamp(skb);
3624 /* Send copy to monitor */
3625 hci_send_to_monitor(hdev, skb);
3627 if (atomic_read(&hdev->promisc)) {
3628 /* Send copy to the sockets */
3629 hci_send_to_sock(hdev, skb);
3632 /* Get rid of skb owner, prior to sending to the driver. */
3633 skb_orphan(skb);
3635 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3636 kfree_skb(skb);
3637 return;
3640 err = hdev->send(hdev, skb);
3641 if (err < 0) {
3642 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3643 kfree_skb(skb);
3647 /* Send HCI command */
3648 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3649 const void *param)
3651 struct sk_buff *skb;
3653 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3655 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3656 if (!skb) {
3657 BT_ERR("%s no memory for command", hdev->name);
3658 return -ENOMEM;
3661 /* Stand-alone HCI commands must be flagged as
3662 * single-command requests.
3664 bt_cb(skb)->hci.req_start = true;
3666 skb_queue_tail(&hdev->cmd_q, skb);
3667 queue_work(hdev->workqueue, &hdev->cmd_work);
3669 return 0;
3672 /* Get data from the previously sent command */
3673 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3675 struct hci_command_hdr *hdr;
3677 if (!hdev->sent_cmd)
3678 return NULL;
3680 hdr = (void *) hdev->sent_cmd->data;
3682 if (hdr->opcode != cpu_to_le16(opcode))
3683 return NULL;
3685 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3687 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3690 /* Send HCI command and wait for command commplete event */
3691 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3692 const void *param, u32 timeout)
3694 struct sk_buff *skb;
3696 if (!test_bit(HCI_UP, &hdev->flags))
3697 return ERR_PTR(-ENETDOWN);
3699 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3701 hci_req_lock(hdev);
3702 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3703 hci_req_unlock(hdev);
3705 return skb;
3707 EXPORT_SYMBOL(hci_cmd_sync);
3709 /* Send ACL data */
3710 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3712 struct hci_acl_hdr *hdr;
3713 int len = skb->len;
3715 skb_push(skb, HCI_ACL_HDR_SIZE);
3716 skb_reset_transport_header(skb);
3717 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3718 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3719 hdr->dlen = cpu_to_le16(len);
3722 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3723 struct sk_buff *skb, __u16 flags)
3725 struct hci_conn *conn = chan->conn;
3726 struct hci_dev *hdev = conn->hdev;
3727 struct sk_buff *list;
3729 skb->len = skb_headlen(skb);
3730 skb->data_len = 0;
3732 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3734 switch (hdev->dev_type) {
3735 case HCI_BREDR:
3736 hci_add_acl_hdr(skb, conn->handle, flags);
3737 break;
3738 case HCI_AMP:
3739 hci_add_acl_hdr(skb, chan->handle, flags);
3740 break;
3741 default:
3742 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3743 return;
3746 list = skb_shinfo(skb)->frag_list;
3747 if (!list) {
3748 /* Non fragmented */
3749 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3751 skb_queue_tail(queue, skb);
3752 } else {
3753 /* Fragmented */
3754 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3756 skb_shinfo(skb)->frag_list = NULL;
3758 /* Queue all fragments atomically. We need to use spin_lock_bh
3759 * here because of 6LoWPAN links, as there this function is
3760 * called from softirq and using normal spin lock could cause
3761 * deadlocks.
3763 spin_lock_bh(&queue->lock);
3765 __skb_queue_tail(queue, skb);
3767 flags &= ~ACL_START;
3768 flags |= ACL_CONT;
3769 do {
3770 skb = list; list = list->next;
3772 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3773 hci_add_acl_hdr(skb, conn->handle, flags);
3775 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3777 __skb_queue_tail(queue, skb);
3778 } while (list);
3780 spin_unlock_bh(&queue->lock);
3784 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3786 struct hci_dev *hdev = chan->conn->hdev;
3788 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3790 hci_queue_acl(chan, &chan->data_q, skb, flags);
3792 queue_work(hdev->workqueue, &hdev->tx_work);
3795 /* Send SCO data */
3796 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3798 struct hci_dev *hdev = conn->hdev;
3799 struct hci_sco_hdr hdr;
3801 BT_DBG("%s len %d", hdev->name, skb->len);
3803 hdr.handle = cpu_to_le16(conn->handle);
3804 hdr.dlen = skb->len;
3806 skb_push(skb, HCI_SCO_HDR_SIZE);
3807 skb_reset_transport_header(skb);
3808 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3810 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3812 skb_queue_tail(&conn->data_q, skb);
3813 queue_work(hdev->workqueue, &hdev->tx_work);
3816 /* ---- HCI TX task (outgoing data) ---- */
3818 /* HCI Connection scheduler */
3819 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3820 int *quote)
3822 struct hci_conn_hash *h = &hdev->conn_hash;
3823 struct hci_conn *conn = NULL, *c;
3824 unsigned int num = 0, min = ~0;
3826 /* We don't have to lock device here. Connections are always
3827 * added and removed with TX task disabled. */
3829 rcu_read_lock();
3831 list_for_each_entry_rcu(c, &h->list, list) {
3832 if (c->type != type || skb_queue_empty(&c->data_q))
3833 continue;
3835 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3836 continue;
3838 num++;
3840 if (c->sent < min) {
3841 min = c->sent;
3842 conn = c;
3845 if (hci_conn_num(hdev, type) == num)
3846 break;
3849 rcu_read_unlock();
3851 if (conn) {
3852 int cnt, q;
3854 switch (conn->type) {
3855 case ACL_LINK:
3856 cnt = hdev->acl_cnt;
3857 break;
3858 case SCO_LINK:
3859 case ESCO_LINK:
3860 cnt = hdev->sco_cnt;
3861 break;
3862 case LE_LINK:
3863 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3864 break;
3865 default:
3866 cnt = 0;
3867 BT_ERR("Unknown link type");
3870 q = cnt / num;
3871 *quote = q ? q : 1;
3872 } else
3873 *quote = 0;
3875 BT_DBG("conn %p quote %d", conn, *quote);
3876 return conn;
3879 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3881 struct hci_conn_hash *h = &hdev->conn_hash;
3882 struct hci_conn *c;
3884 BT_ERR("%s link tx timeout", hdev->name);
3886 rcu_read_lock();
3888 /* Kill stalled connections */
3889 list_for_each_entry_rcu(c, &h->list, list) {
3890 if (c->type == type && c->sent) {
3891 BT_ERR("%s killing stalled connection %pMR",
3892 hdev->name, &c->dst);
3893 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3897 rcu_read_unlock();
3900 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3901 int *quote)
3903 struct hci_conn_hash *h = &hdev->conn_hash;
3904 struct hci_chan *chan = NULL;
3905 unsigned int num = 0, min = ~0, cur_prio = 0;
3906 struct hci_conn *conn;
3907 int cnt, q, conn_num = 0;
3909 BT_DBG("%s", hdev->name);
3911 rcu_read_lock();
3913 list_for_each_entry_rcu(conn, &h->list, list) {
3914 struct hci_chan *tmp;
3916 if (conn->type != type)
3917 continue;
3919 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3920 continue;
3922 conn_num++;
3924 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3925 struct sk_buff *skb;
3927 if (skb_queue_empty(&tmp->data_q))
3928 continue;
3930 skb = skb_peek(&tmp->data_q);
3931 if (skb->priority < cur_prio)
3932 continue;
3934 if (skb->priority > cur_prio) {
3935 num = 0;
3936 min = ~0;
3937 cur_prio = skb->priority;
3940 num++;
3942 if (conn->sent < min) {
3943 min = conn->sent;
3944 chan = tmp;
3948 if (hci_conn_num(hdev, type) == conn_num)
3949 break;
3952 rcu_read_unlock();
3954 if (!chan)
3955 return NULL;
3957 switch (chan->conn->type) {
3958 case ACL_LINK:
3959 cnt = hdev->acl_cnt;
3960 break;
3961 case AMP_LINK:
3962 cnt = hdev->block_cnt;
3963 break;
3964 case SCO_LINK:
3965 case ESCO_LINK:
3966 cnt = hdev->sco_cnt;
3967 break;
3968 case LE_LINK:
3969 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3970 break;
3971 default:
3972 cnt = 0;
3973 BT_ERR("Unknown link type");
3976 q = cnt / num;
3977 *quote = q ? q : 1;
3978 BT_DBG("chan %p quote %d", chan, *quote);
3979 return chan;
3982 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3984 struct hci_conn_hash *h = &hdev->conn_hash;
3985 struct hci_conn *conn;
3986 int num = 0;
3988 BT_DBG("%s", hdev->name);
3990 rcu_read_lock();
3992 list_for_each_entry_rcu(conn, &h->list, list) {
3993 struct hci_chan *chan;
3995 if (conn->type != type)
3996 continue;
3998 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3999 continue;
4001 num++;
4003 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4004 struct sk_buff *skb;
4006 if (chan->sent) {
4007 chan->sent = 0;
4008 continue;
4011 if (skb_queue_empty(&chan->data_q))
4012 continue;
4014 skb = skb_peek(&chan->data_q);
4015 if (skb->priority >= HCI_PRIO_MAX - 1)
4016 continue;
4018 skb->priority = HCI_PRIO_MAX - 1;
4020 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4021 skb->priority);
4024 if (hci_conn_num(hdev, type) == num)
4025 break;
4028 rcu_read_unlock();
4032 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4034 /* Calculate count of blocks used by this packet */
4035 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4038 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4040 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4041 /* ACL tx timeout must be longer than maximum
4042 * link supervision timeout (40.9 seconds) */
4043 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4044 HCI_ACL_TX_TIMEOUT))
4045 hci_link_tx_to(hdev, ACL_LINK);
4049 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4051 unsigned int cnt = hdev->acl_cnt;
4052 struct hci_chan *chan;
4053 struct sk_buff *skb;
4054 int quote;
4056 __check_timeout(hdev, cnt);
4058 while (hdev->acl_cnt &&
4059 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4060 u32 priority = (skb_peek(&chan->data_q))->priority;
4061 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4062 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4063 skb->len, skb->priority);
4065 /* Stop if priority has changed */
4066 if (skb->priority < priority)
4067 break;
4069 skb = skb_dequeue(&chan->data_q);
4071 hci_conn_enter_active_mode(chan->conn,
4072 bt_cb(skb)->force_active);
4074 hci_send_frame(hdev, skb);
4075 hdev->acl_last_tx = jiffies;
4077 hdev->acl_cnt--;
4078 chan->sent++;
4079 chan->conn->sent++;
4083 if (cnt != hdev->acl_cnt)
4084 hci_prio_recalculate(hdev, ACL_LINK);
4087 static void hci_sched_acl_blk(struct hci_dev *hdev)
4089 unsigned int cnt = hdev->block_cnt;
4090 struct hci_chan *chan;
4091 struct sk_buff *skb;
4092 int quote;
4093 u8 type;
4095 __check_timeout(hdev, cnt);
4097 BT_DBG("%s", hdev->name);
4099 if (hdev->dev_type == HCI_AMP)
4100 type = AMP_LINK;
4101 else
4102 type = ACL_LINK;
4104 while (hdev->block_cnt > 0 &&
4105 (chan = hci_chan_sent(hdev, type, &quote))) {
4106 u32 priority = (skb_peek(&chan->data_q))->priority;
4107 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4108 int blocks;
4110 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4111 skb->len, skb->priority);
4113 /* Stop if priority has changed */
4114 if (skb->priority < priority)
4115 break;
4117 skb = skb_dequeue(&chan->data_q);
4119 blocks = __get_blocks(hdev, skb);
4120 if (blocks > hdev->block_cnt)
4121 return;
4123 hci_conn_enter_active_mode(chan->conn,
4124 bt_cb(skb)->force_active);
4126 hci_send_frame(hdev, skb);
4127 hdev->acl_last_tx = jiffies;
4129 hdev->block_cnt -= blocks;
4130 quote -= blocks;
4132 chan->sent += blocks;
4133 chan->conn->sent += blocks;
4137 if (cnt != hdev->block_cnt)
4138 hci_prio_recalculate(hdev, type);
4141 static void hci_sched_acl(struct hci_dev *hdev)
4143 BT_DBG("%s", hdev->name);
4145 /* No ACL link over BR/EDR controller */
4146 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4147 return;
4149 /* No AMP link over AMP controller */
4150 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4151 return;
4153 switch (hdev->flow_ctl_mode) {
4154 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4155 hci_sched_acl_pkt(hdev);
4156 break;
4158 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4159 hci_sched_acl_blk(hdev);
4160 break;
4164 /* Schedule SCO */
4165 static void hci_sched_sco(struct hci_dev *hdev)
4167 struct hci_conn *conn;
4168 struct sk_buff *skb;
4169 int quote;
4171 BT_DBG("%s", hdev->name);
4173 if (!hci_conn_num(hdev, SCO_LINK))
4174 return;
4176 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4177 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4178 BT_DBG("skb %p len %d", skb, skb->len);
4179 hci_send_frame(hdev, skb);
4181 conn->sent++;
4182 if (conn->sent == ~0)
4183 conn->sent = 0;
4188 static void hci_sched_esco(struct hci_dev *hdev)
4190 struct hci_conn *conn;
4191 struct sk_buff *skb;
4192 int quote;
4194 BT_DBG("%s", hdev->name);
4196 if (!hci_conn_num(hdev, ESCO_LINK))
4197 return;
4199 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4200 &quote))) {
4201 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4202 BT_DBG("skb %p len %d", skb, skb->len);
4203 hci_send_frame(hdev, skb);
4205 conn->sent++;
4206 if (conn->sent == ~0)
4207 conn->sent = 0;
4212 static void hci_sched_le(struct hci_dev *hdev)
4214 struct hci_chan *chan;
4215 struct sk_buff *skb;
4216 int quote, cnt, tmp;
4218 BT_DBG("%s", hdev->name);
4220 if (!hci_conn_num(hdev, LE_LINK))
4221 return;
4223 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4224 /* LE tx timeout must be longer than maximum
4225 * link supervision timeout (40.9 seconds) */
4226 if (!hdev->le_cnt && hdev->le_pkts &&
4227 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4228 hci_link_tx_to(hdev, LE_LINK);
4231 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4232 tmp = cnt;
4233 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4234 u32 priority = (skb_peek(&chan->data_q))->priority;
4235 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4236 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4237 skb->len, skb->priority);
4239 /* Stop if priority has changed */
4240 if (skb->priority < priority)
4241 break;
4243 skb = skb_dequeue(&chan->data_q);
4245 hci_send_frame(hdev, skb);
4246 hdev->le_last_tx = jiffies;
4248 cnt--;
4249 chan->sent++;
4250 chan->conn->sent++;
4254 if (hdev->le_pkts)
4255 hdev->le_cnt = cnt;
4256 else
4257 hdev->acl_cnt = cnt;
4259 if (cnt != tmp)
4260 hci_prio_recalculate(hdev, LE_LINK);
4263 static void hci_tx_work(struct work_struct *work)
4265 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4266 struct sk_buff *skb;
4268 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4269 hdev->sco_cnt, hdev->le_cnt);
4271 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4272 /* Schedule queues and send stuff to HCI driver */
4273 hci_sched_acl(hdev);
4274 hci_sched_sco(hdev);
4275 hci_sched_esco(hdev);
4276 hci_sched_le(hdev);
4279 /* Send next queued raw (unknown type) packet */
4280 while ((skb = skb_dequeue(&hdev->raw_q)))
4281 hci_send_frame(hdev, skb);
4284 /* ----- HCI RX task (incoming data processing) ----- */
4286 /* ACL data packet */
4287 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4289 struct hci_acl_hdr *hdr = (void *) skb->data;
4290 struct hci_conn *conn;
4291 __u16 handle, flags;
4293 skb_pull(skb, HCI_ACL_HDR_SIZE);
4295 handle = __le16_to_cpu(hdr->handle);
4296 flags = hci_flags(handle);
4297 handle = hci_handle(handle);
4299 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4300 handle, flags);
4302 hdev->stat.acl_rx++;
4304 hci_dev_lock(hdev);
4305 conn = hci_conn_hash_lookup_handle(hdev, handle);
4306 hci_dev_unlock(hdev);
4308 if (conn) {
4309 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4311 /* Send to upper protocol */
4312 l2cap_recv_acldata(conn, skb, flags);
4313 return;
4314 } else {
4315 BT_ERR("%s ACL packet for unknown connection handle %d",
4316 hdev->name, handle);
4319 kfree_skb(skb);
4322 /* SCO data packet */
4323 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4325 struct hci_sco_hdr *hdr = (void *) skb->data;
4326 struct hci_conn *conn;
4327 __u16 handle;
4329 skb_pull(skb, HCI_SCO_HDR_SIZE);
4331 handle = __le16_to_cpu(hdr->handle);
4333 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4335 hdev->stat.sco_rx++;
4337 hci_dev_lock(hdev);
4338 conn = hci_conn_hash_lookup_handle(hdev, handle);
4339 hci_dev_unlock(hdev);
4341 if (conn) {
4342 /* Send to upper protocol */
4343 sco_recv_scodata(conn, skb);
4344 return;
4345 } else {
4346 BT_ERR("%s SCO packet for unknown connection handle %d",
4347 hdev->name, handle);
4350 kfree_skb(skb);
4353 static bool hci_req_is_complete(struct hci_dev *hdev)
4355 struct sk_buff *skb;
4357 skb = skb_peek(&hdev->cmd_q);
4358 if (!skb)
4359 return true;
4361 return bt_cb(skb)->hci.req_start;
4364 static void hci_resend_last(struct hci_dev *hdev)
4366 struct hci_command_hdr *sent;
4367 struct sk_buff *skb;
4368 u16 opcode;
4370 if (!hdev->sent_cmd)
4371 return;
4373 sent = (void *) hdev->sent_cmd->data;
4374 opcode = __le16_to_cpu(sent->opcode);
4375 if (opcode == HCI_OP_RESET)
4376 return;
4378 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4379 if (!skb)
4380 return;
4382 skb_queue_head(&hdev->cmd_q, skb);
4383 queue_work(hdev->workqueue, &hdev->cmd_work);
4386 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4387 hci_req_complete_t *req_complete,
4388 hci_req_complete_skb_t *req_complete_skb)
4390 struct sk_buff *skb;
4391 unsigned long flags;
4393 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4395 /* If the completed command doesn't match the last one that was
4396 * sent we need to do special handling of it.
4398 if (!hci_sent_cmd_data(hdev, opcode)) {
4399 /* Some CSR based controllers generate a spontaneous
4400 * reset complete event during init and any pending
4401 * command will never be completed. In such a case we
4402 * need to resend whatever was the last sent
4403 * command.
4405 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4406 hci_resend_last(hdev);
4408 return;
4411 /* If the command succeeded and there's still more commands in
4412 * this request the request is not yet complete.
4414 if (!status && !hci_req_is_complete(hdev))
4415 return;
4417 /* If this was the last command in a request the complete
4418 * callback would be found in hdev->sent_cmd instead of the
4419 * command queue (hdev->cmd_q).
4421 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4422 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4423 return;
4426 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4427 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4428 return;
4431 /* Remove all pending commands belonging to this request */
4432 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4433 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4434 if (bt_cb(skb)->hci.req_start) {
4435 __skb_queue_head(&hdev->cmd_q, skb);
4436 break;
4439 *req_complete = bt_cb(skb)->hci.req_complete;
4440 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4441 kfree_skb(skb);
4443 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4446 static void hci_rx_work(struct work_struct *work)
4448 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4449 struct sk_buff *skb;
4451 BT_DBG("%s", hdev->name);
4453 while ((skb = skb_dequeue(&hdev->rx_q))) {
4454 /* Send copy to monitor */
4455 hci_send_to_monitor(hdev, skb);
4457 if (atomic_read(&hdev->promisc)) {
4458 /* Send copy to the sockets */
4459 hci_send_to_sock(hdev, skb);
4462 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4463 kfree_skb(skb);
4464 continue;
4467 if (test_bit(HCI_INIT, &hdev->flags)) {
4468 /* Don't process data packets in this states. */
4469 switch (bt_cb(skb)->pkt_type) {
4470 case HCI_ACLDATA_PKT:
4471 case HCI_SCODATA_PKT:
4472 kfree_skb(skb);
4473 continue;
4477 /* Process frame */
4478 switch (bt_cb(skb)->pkt_type) {
4479 case HCI_EVENT_PKT:
4480 BT_DBG("%s Event packet", hdev->name);
4481 hci_event_packet(hdev, skb);
4482 break;
4484 case HCI_ACLDATA_PKT:
4485 BT_DBG("%s ACL data packet", hdev->name);
4486 hci_acldata_packet(hdev, skb);
4487 break;
4489 case HCI_SCODATA_PKT:
4490 BT_DBG("%s SCO data packet", hdev->name);
4491 hci_scodata_packet(hdev, skb);
4492 break;
4494 default:
4495 kfree_skb(skb);
4496 break;
4501 static void hci_cmd_work(struct work_struct *work)
4503 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4504 struct sk_buff *skb;
4506 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4507 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4509 /* Send queued commands */
4510 if (atomic_read(&hdev->cmd_cnt)) {
4511 skb = skb_dequeue(&hdev->cmd_q);
4512 if (!skb)
4513 return;
4515 kfree_skb(hdev->sent_cmd);
4517 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4518 if (hdev->sent_cmd) {
4519 atomic_dec(&hdev->cmd_cnt);
4520 hci_send_frame(hdev, skb);
4521 if (test_bit(HCI_RESET, &hdev->flags))
4522 cancel_delayed_work(&hdev->cmd_timer);
4523 else
4524 schedule_delayed_work(&hdev->cmd_timer,
4525 HCI_CMD_TIMEOUT);
4526 } else {
4527 skb_queue_head(&hdev->cmd_q, skb);
4528 queue_work(hdev->workqueue, &hdev->cmd_work);