2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
45 static void hci_rx_work(struct work_struct
*work
);
46 static void hci_cmd_work(struct work_struct
*work
);
47 static void hci_tx_work(struct work_struct
*work
);
50 LIST_HEAD(hci_dev_list
);
51 DEFINE_RWLOCK(hci_dev_list_lock
);
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list
);
55 DEFINE_MUTEX(hci_cb_list_lock
);
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida
);
60 /* ---- HCI debugfs entries ---- */
62 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
63 size_t count
, loff_t
*ppos
)
65 struct hci_dev
*hdev
= file
->private_data
;
68 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y' : 'N';
71 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
74 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
75 size_t count
, loff_t
*ppos
)
77 struct hci_dev
*hdev
= file
->private_data
;
82 if (!test_bit(HCI_UP
, &hdev
->flags
))
85 err
= kstrtobool_from_user(user_buf
, count
, &enable
);
89 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
92 hci_req_sync_lock(hdev
);
94 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
97 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
99 hci_req_sync_unlock(hdev
);
106 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
111 static const struct file_operations dut_mode_fops
= {
113 .read
= dut_mode_read
,
114 .write
= dut_mode_write
,
115 .llseek
= default_llseek
,
118 static ssize_t
vendor_diag_read(struct file
*file
, char __user
*user_buf
,
119 size_t count
, loff_t
*ppos
)
121 struct hci_dev
*hdev
= file
->private_data
;
124 buf
[0] = hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) ? 'Y' : 'N';
127 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
130 static ssize_t
vendor_diag_write(struct file
*file
, const char __user
*user_buf
,
131 size_t count
, loff_t
*ppos
)
133 struct hci_dev
*hdev
= file
->private_data
;
137 err
= kstrtobool_from_user(user_buf
, count
, &enable
);
141 /* When the diagnostic flags are not persistent and the transport
142 * is not active or in user channel operation, then there is no need
143 * for the vendor callback. Instead just store the desired value and
144 * the setting will be programmed when the controller gets powered on.
146 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
147 (!test_bit(HCI_RUNNING
, &hdev
->flags
) ||
148 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)))
151 hci_req_sync_lock(hdev
);
152 err
= hdev
->set_diag(hdev
, enable
);
153 hci_req_sync_unlock(hdev
);
160 hci_dev_set_flag(hdev
, HCI_VENDOR_DIAG
);
162 hci_dev_clear_flag(hdev
, HCI_VENDOR_DIAG
);
167 static const struct file_operations vendor_diag_fops
= {
169 .read
= vendor_diag_read
,
170 .write
= vendor_diag_write
,
171 .llseek
= default_llseek
,
174 static void hci_debugfs_create_basic(struct hci_dev
*hdev
)
176 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
180 debugfs_create_file("vendor_diag", 0644, hdev
->debugfs
, hdev
,
184 static int hci_reset_req(struct hci_request
*req
, unsigned long opt
)
186 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
189 set_bit(HCI_RESET
, &req
->hdev
->flags
);
190 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
194 static void bredr_init(struct hci_request
*req
)
196 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
198 /* Read Local Supported Features */
199 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
201 /* Read Local Version */
202 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
204 /* Read BD Address */
205 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
208 static void amp_init1(struct hci_request
*req
)
210 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
212 /* Read Local Version */
213 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
215 /* Read Local Supported Commands */
216 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
218 /* Read Local AMP Info */
219 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
221 /* Read Data Blk size */
222 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
224 /* Read Flow Control Mode */
225 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
227 /* Read Location Data */
228 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
231 static int amp_init2(struct hci_request
*req
)
233 /* Read Local Supported Features. Not all AMP controllers
234 * support this so it's placed conditionally in the second
237 if (req
->hdev
->commands
[14] & 0x20)
238 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
243 static int hci_init1_req(struct hci_request
*req
, unsigned long opt
)
245 struct hci_dev
*hdev
= req
->hdev
;
247 BT_DBG("%s %ld", hdev
->name
, opt
);
250 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
251 hci_reset_req(req
, 0);
253 switch (hdev
->dev_type
) {
261 bt_dev_err(hdev
, "Unknown device type %d", hdev
->dev_type
);
268 static void bredr_setup(struct hci_request
*req
)
273 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
276 /* Read Class of Device */
277 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
279 /* Read Local Name */
280 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
282 /* Read Voice Setting */
283 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
285 /* Read Number of Supported IAC */
286 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
288 /* Read Current IAC LAP */
289 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
291 /* Clear Event Filters */
292 flt_type
= HCI_FLT_CLEAR_ALL
;
293 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
295 /* Connection accept timeout ~20 secs */
296 param
= cpu_to_le16(0x7d00);
297 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
300 static void le_setup(struct hci_request
*req
)
302 struct hci_dev
*hdev
= req
->hdev
;
304 /* Read LE Buffer Size */
305 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
307 /* Read LE Local Supported Features */
308 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
310 /* Read LE Supported States */
311 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
313 /* LE-only controllers have LE implicitly enabled */
314 if (!lmp_bredr_capable(hdev
))
315 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
318 static void hci_setup_event_mask(struct hci_request
*req
)
320 struct hci_dev
*hdev
= req
->hdev
;
322 /* The second byte is 0xff instead of 0x9f (two reserved bits
323 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
326 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329 * any event mask for pre 1.2 devices.
331 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
334 if (lmp_bredr_capable(hdev
)) {
335 events
[4] |= 0x01; /* Flow Specification Complete */
337 /* Use a different default for LE-only devices */
338 memset(events
, 0, sizeof(events
));
339 events
[1] |= 0x20; /* Command Complete */
340 events
[1] |= 0x40; /* Command Status */
341 events
[1] |= 0x80; /* Hardware Error */
343 /* If the controller supports the Disconnect command, enable
344 * the corresponding event. In addition enable packet flow
345 * control related events.
347 if (hdev
->commands
[0] & 0x20) {
348 events
[0] |= 0x10; /* Disconnection Complete */
349 events
[2] |= 0x04; /* Number of Completed Packets */
350 events
[3] |= 0x02; /* Data Buffer Overflow */
353 /* If the controller supports the Read Remote Version
354 * Information command, enable the corresponding event.
356 if (hdev
->commands
[2] & 0x80)
357 events
[1] |= 0x08; /* Read Remote Version Information
361 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
362 events
[0] |= 0x80; /* Encryption Change */
363 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
367 if (lmp_inq_rssi_capable(hdev
) ||
368 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
))
369 events
[4] |= 0x02; /* Inquiry Result with RSSI */
371 if (lmp_ext_feat_capable(hdev
))
372 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
374 if (lmp_esco_capable(hdev
)) {
375 events
[5] |= 0x08; /* Synchronous Connection Complete */
376 events
[5] |= 0x10; /* Synchronous Connection Changed */
379 if (lmp_sniffsubr_capable(hdev
))
380 events
[5] |= 0x20; /* Sniff Subrating */
382 if (lmp_pause_enc_capable(hdev
))
383 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
385 if (lmp_ext_inq_capable(hdev
))
386 events
[5] |= 0x40; /* Extended Inquiry Result */
388 if (lmp_no_flush_capable(hdev
))
389 events
[7] |= 0x01; /* Enhanced Flush Complete */
391 if (lmp_lsto_capable(hdev
))
392 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
394 if (lmp_ssp_capable(hdev
)) {
395 events
[6] |= 0x01; /* IO Capability Request */
396 events
[6] |= 0x02; /* IO Capability Response */
397 events
[6] |= 0x04; /* User Confirmation Request */
398 events
[6] |= 0x08; /* User Passkey Request */
399 events
[6] |= 0x10; /* Remote OOB Data Request */
400 events
[6] |= 0x20; /* Simple Pairing Complete */
401 events
[7] |= 0x04; /* User Passkey Notification */
402 events
[7] |= 0x08; /* Keypress Notification */
403 events
[7] |= 0x10; /* Remote Host Supported
404 * Features Notification
408 if (lmp_le_capable(hdev
))
409 events
[7] |= 0x20; /* LE Meta-Event */
411 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
414 static int hci_init2_req(struct hci_request
*req
, unsigned long opt
)
416 struct hci_dev
*hdev
= req
->hdev
;
418 if (hdev
->dev_type
== HCI_AMP
)
419 return amp_init2(req
);
421 if (lmp_bredr_capable(hdev
))
424 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
426 if (lmp_le_capable(hdev
))
429 /* All Bluetooth 1.2 and later controllers should support the
430 * HCI command for reading the local supported commands.
432 * Unfortunately some controllers indicate Bluetooth 1.2 support,
433 * but do not have support for this command. If that is the case,
434 * the driver can quirk the behavior and skip reading the local
435 * supported commands.
437 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
438 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
439 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
441 if (lmp_ssp_capable(hdev
)) {
442 /* When SSP is available, then the host features page
443 * should also be available as well. However some
444 * controllers list the max_page as 0 as long as SSP
445 * has not been enabled. To achieve proper debugging
446 * output, force the minimum max_page to 1 at least.
448 hdev
->max_page
= 0x01;
450 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
453 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
454 sizeof(mode
), &mode
);
456 struct hci_cp_write_eir cp
;
458 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
459 memset(&cp
, 0, sizeof(cp
));
461 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
465 if (lmp_inq_rssi_capable(hdev
) ||
466 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
469 /* If Extended Inquiry Result events are supported, then
470 * they are clearly preferred over Inquiry Result with RSSI
473 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
475 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
478 if (lmp_inq_tx_pwr_capable(hdev
))
479 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
481 if (lmp_ext_feat_capable(hdev
)) {
482 struct hci_cp_read_local_ext_features cp
;
485 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
489 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
491 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
498 static void hci_setup_link_policy(struct hci_request
*req
)
500 struct hci_dev
*hdev
= req
->hdev
;
501 struct hci_cp_write_def_link_policy cp
;
504 if (lmp_rswitch_capable(hdev
))
505 link_policy
|= HCI_LP_RSWITCH
;
506 if (lmp_hold_capable(hdev
))
507 link_policy
|= HCI_LP_HOLD
;
508 if (lmp_sniff_capable(hdev
))
509 link_policy
|= HCI_LP_SNIFF
;
510 if (lmp_park_capable(hdev
))
511 link_policy
|= HCI_LP_PARK
;
513 cp
.policy
= cpu_to_le16(link_policy
);
514 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
517 static void hci_set_le_support(struct hci_request
*req
)
519 struct hci_dev
*hdev
= req
->hdev
;
520 struct hci_cp_write_le_host_supported cp
;
522 /* LE-only devices do not support explicit enablement */
523 if (!lmp_bredr_capable(hdev
))
526 memset(&cp
, 0, sizeof(cp
));
528 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
533 if (cp
.le
!= lmp_host_le_capable(hdev
))
534 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
538 static void hci_set_event_mask_page_2(struct hci_request
*req
)
540 struct hci_dev
*hdev
= req
->hdev
;
541 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542 bool changed
= false;
544 /* If Connectionless Slave Broadcast master role is supported
545 * enable all necessary events for it.
547 if (lmp_csb_master_capable(hdev
)) {
548 events
[1] |= 0x40; /* Triggered Clock Capture */
549 events
[1] |= 0x80; /* Synchronization Train Complete */
550 events
[2] |= 0x10; /* Slave Page Response Timeout */
551 events
[2] |= 0x20; /* CSB Channel Map Change */
555 /* If Connectionless Slave Broadcast slave role is supported
556 * enable all necessary events for it.
558 if (lmp_csb_slave_capable(hdev
)) {
559 events
[2] |= 0x01; /* Synchronization Train Received */
560 events
[2] |= 0x02; /* CSB Receive */
561 events
[2] |= 0x04; /* CSB Timeout */
562 events
[2] |= 0x08; /* Truncated Page Complete */
566 /* Enable Authenticated Payload Timeout Expired event if supported */
567 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
) {
572 /* Some Broadcom based controllers indicate support for Set Event
573 * Mask Page 2 command, but then actually do not support it. Since
574 * the default value is all bits set to zero, the command is only
575 * required if the event mask has to be changed. In case no change
576 * to the event mask is needed, skip this command.
579 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
,
580 sizeof(events
), events
);
583 static int hci_init3_req(struct hci_request
*req
, unsigned long opt
)
585 struct hci_dev
*hdev
= req
->hdev
;
588 hci_setup_event_mask(req
);
590 if (hdev
->commands
[6] & 0x20 &&
591 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
592 struct hci_cp_read_stored_link_key cp
;
594 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
596 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
599 if (hdev
->commands
[5] & 0x10)
600 hci_setup_link_policy(req
);
602 if (hdev
->commands
[8] & 0x01)
603 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
605 /* Some older Broadcom based Bluetooth 1.2 controllers do not
606 * support the Read Page Scan Type command. Check support for
607 * this command in the bit mask of supported commands.
609 if (hdev
->commands
[13] & 0x01)
610 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
612 if (lmp_le_capable(hdev
)) {
615 memset(events
, 0, sizeof(events
));
617 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
618 events
[0] |= 0x10; /* LE Long Term Key Request */
620 /* If controller supports the Connection Parameters Request
621 * Link Layer Procedure, enable the corresponding event.
623 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
624 events
[0] |= 0x20; /* LE Remote Connection
628 /* If the controller supports the Data Length Extension
629 * feature, enable the corresponding event.
631 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
632 events
[0] |= 0x40; /* LE Data Length Change */
634 /* If the controller supports Extended Scanner Filter
635 * Policies, enable the correspondig event.
637 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
638 events
[1] |= 0x04; /* LE Direct Advertising
642 /* If the controller supports Channel Selection Algorithm #2
643 * feature, enable the corresponding event.
645 if (hdev
->le_features
[1] & HCI_LE_CHAN_SEL_ALG2
)
646 events
[2] |= 0x08; /* LE Channel Selection
650 /* If the controller supports the LE Set Scan Enable command,
651 * enable the corresponding advertising report event.
653 if (hdev
->commands
[26] & 0x08)
654 events
[0] |= 0x02; /* LE Advertising Report */
656 /* If the controller supports the LE Create Connection
657 * command, enable the corresponding event.
659 if (hdev
->commands
[26] & 0x10)
660 events
[0] |= 0x01; /* LE Connection Complete */
662 /* If the controller supports the LE Connection Update
663 * command, enable the corresponding event.
665 if (hdev
->commands
[27] & 0x04)
666 events
[0] |= 0x04; /* LE Connection Update
670 /* If the controller supports the LE Read Remote Used Features
671 * command, enable the corresponding event.
673 if (hdev
->commands
[27] & 0x20)
674 events
[0] |= 0x08; /* LE Read Remote Used
678 /* If the controller supports the LE Read Local P-256
679 * Public Key command, enable the corresponding event.
681 if (hdev
->commands
[34] & 0x02)
682 events
[0] |= 0x80; /* LE Read Local P-256
683 * Public Key Complete
686 /* If the controller supports the LE Generate DHKey
687 * command, enable the corresponding event.
689 if (hdev
->commands
[34] & 0x04)
690 events
[1] |= 0x01; /* LE Generate DHKey Complete */
692 /* If the controller supports the LE Set Default PHY or
693 * LE Set PHY commands, enable the corresponding event.
695 if (hdev
->commands
[35] & (0x20 | 0x40))
696 events
[1] |= 0x08; /* LE PHY Update Complete */
698 /* If the controller supports LE Set Extended Scan Parameters
699 * and LE Set Extended Scan Enable commands, enable the
700 * corresponding event.
702 if (use_ext_scan(hdev
))
703 events
[1] |= 0x10; /* LE Extended Advertising
707 /* If the controller supports the LE Extended Create Connection
708 * command, enable the corresponding event.
710 if (use_ext_conn(hdev
))
711 events
[1] |= 0x02; /* LE Enhanced Connection
715 /* If the controller supports the LE Extended Advertising
716 * command, enable the corresponding event.
718 if (ext_adv_capable(hdev
))
719 events
[2] |= 0x02; /* LE Advertising Set
723 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
726 /* Read LE Advertising Channel TX Power */
727 if ((hdev
->commands
[25] & 0x40) && !ext_adv_capable(hdev
)) {
728 /* HCI TS spec forbids mixing of legacy and extended
729 * advertising commands wherein READ_ADV_TX_POWER is
730 * also included. So do not call it if extended adv
731 * is supported otherwise controller will return
732 * COMMAND_DISALLOWED for extended commands.
734 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
737 if (hdev
->commands
[26] & 0x40) {
738 /* Read LE White List Size */
739 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
,
743 if (hdev
->commands
[26] & 0x80) {
744 /* Clear LE White List */
745 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
748 if (hdev
->commands
[34] & 0x40) {
749 /* Read LE Resolving List Size */
750 hci_req_add(req
, HCI_OP_LE_READ_RESOLV_LIST_SIZE
,
754 if (hdev
->commands
[34] & 0x20) {
755 /* Clear LE Resolving List */
756 hci_req_add(req
, HCI_OP_LE_CLEAR_RESOLV_LIST
, 0, NULL
);
759 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
760 /* Read LE Maximum Data Length */
761 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
763 /* Read LE Suggested Default Data Length */
764 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
767 if (ext_adv_capable(hdev
)) {
768 /* Read LE Number of Supported Advertising Sets */
769 hci_req_add(req
, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS
,
773 hci_set_le_support(req
);
776 /* Read features beyond page 1 if available */
777 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
778 struct hci_cp_read_local_ext_features cp
;
781 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
788 static int hci_init4_req(struct hci_request
*req
, unsigned long opt
)
790 struct hci_dev
*hdev
= req
->hdev
;
792 /* Some Broadcom based Bluetooth controllers do not support the
793 * Delete Stored Link Key command. They are clearly indicating its
794 * absence in the bit mask of supported commands.
796 * Check the supported commands and only if the the command is marked
797 * as supported send it. If not supported assume that the controller
798 * does not have actual support for stored link keys which makes this
799 * command redundant anyway.
801 * Some controllers indicate that they support handling deleting
802 * stored link keys, but they don't. The quirk lets a driver
803 * just disable this command.
805 if (hdev
->commands
[6] & 0x80 &&
806 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
807 struct hci_cp_delete_stored_link_key cp
;
809 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
810 cp
.delete_all
= 0x01;
811 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
815 /* Set event mask page 2 if the HCI command for it is supported */
816 if (hdev
->commands
[22] & 0x04)
817 hci_set_event_mask_page_2(req
);
819 /* Read local codec list if the HCI command is supported */
820 if (hdev
->commands
[29] & 0x20)
821 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
823 /* Get MWS transport configuration if the HCI command is supported */
824 if (hdev
->commands
[30] & 0x08)
825 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
827 /* Check for Synchronization Train support */
828 if (lmp_sync_train_capable(hdev
))
829 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
831 /* Enable Secure Connections if supported and configured */
832 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
833 bredr_sc_enabled(hdev
)) {
836 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
837 sizeof(support
), &support
);
840 /* Set Suggested Default Data Length to maximum if supported */
841 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
842 struct hci_cp_le_write_def_data_len cp
;
844 cp
.tx_len
= hdev
->le_max_tx_len
;
845 cp
.tx_time
= hdev
->le_max_tx_time
;
846 hci_req_add(req
, HCI_OP_LE_WRITE_DEF_DATA_LEN
, sizeof(cp
), &cp
);
849 /* Set Default PHY parameters if command is supported */
850 if (hdev
->commands
[35] & 0x20) {
851 struct hci_cp_le_set_default_phy cp
;
854 cp
.tx_phys
= hdev
->le_tx_def_phys
;
855 cp
.rx_phys
= hdev
->le_rx_def_phys
;
857 hci_req_add(req
, HCI_OP_LE_SET_DEFAULT_PHY
, sizeof(cp
), &cp
);
863 static int __hci_init(struct hci_dev
*hdev
)
867 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
871 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
872 hci_debugfs_create_basic(hdev
);
874 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
878 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
879 * BR/EDR/LE type controllers. AMP controllers only need the
880 * first two stages of init.
882 if (hdev
->dev_type
!= HCI_PRIMARY
)
885 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
889 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
893 /* This function is only called when the controller is actually in
894 * configured state. When the controller is marked as unconfigured,
895 * this initialization procedure is not run.
897 * It means that it is possible that a controller runs through its
898 * setup phase and then discovers missing settings. If that is the
899 * case, then this function will not be called. It then will only
900 * be called during the config phase.
902 * So only when in setup phase or config phase, create the debugfs
903 * entries and register the SMP channels.
905 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
906 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
909 hci_debugfs_create_common(hdev
);
911 if (lmp_bredr_capable(hdev
))
912 hci_debugfs_create_bredr(hdev
);
914 if (lmp_le_capable(hdev
))
915 hci_debugfs_create_le(hdev
);
920 static int hci_init0_req(struct hci_request
*req
, unsigned long opt
)
922 struct hci_dev
*hdev
= req
->hdev
;
924 BT_DBG("%s %ld", hdev
->name
, opt
);
927 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
928 hci_reset_req(req
, 0);
930 /* Read Local Version */
931 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
933 /* Read BD Address */
934 if (hdev
->set_bdaddr
)
935 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
940 static int __hci_unconf_init(struct hci_dev
*hdev
)
944 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
947 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
951 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
952 hci_debugfs_create_basic(hdev
);
957 static int hci_scan_req(struct hci_request
*req
, unsigned long opt
)
961 BT_DBG("%s %x", req
->hdev
->name
, scan
);
963 /* Inquiry and Page scans */
964 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
968 static int hci_auth_req(struct hci_request
*req
, unsigned long opt
)
972 BT_DBG("%s %x", req
->hdev
->name
, auth
);
975 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
979 static int hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
983 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
986 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
990 static int hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
992 __le16 policy
= cpu_to_le16(opt
);
994 BT_DBG("%s %x", req
->hdev
->name
, policy
);
996 /* Default link policy */
997 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1001 /* Get HCI device by index.
1002 * Device is held on return. */
1003 struct hci_dev
*hci_dev_get(int index
)
1005 struct hci_dev
*hdev
= NULL
, *d
;
1007 BT_DBG("%d", index
);
1012 read_lock(&hci_dev_list_lock
);
1013 list_for_each_entry(d
, &hci_dev_list
, list
) {
1014 if (d
->id
== index
) {
1015 hdev
= hci_dev_hold(d
);
1019 read_unlock(&hci_dev_list_lock
);
1023 /* ---- Inquiry support ---- */
1025 bool hci_discovery_active(struct hci_dev
*hdev
)
1027 struct discovery_state
*discov
= &hdev
->discovery
;
1029 switch (discov
->state
) {
1030 case DISCOVERY_FINDING
:
1031 case DISCOVERY_RESOLVING
:
1039 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1041 int old_state
= hdev
->discovery
.state
;
1043 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1045 if (old_state
== state
)
1048 hdev
->discovery
.state
= state
;
1051 case DISCOVERY_STOPPED
:
1052 hci_update_background_scan(hdev
);
1054 if (old_state
!= DISCOVERY_STARTING
)
1055 mgmt_discovering(hdev
, 0);
1057 case DISCOVERY_STARTING
:
1059 case DISCOVERY_FINDING
:
1060 mgmt_discovering(hdev
, 1);
1062 case DISCOVERY_RESOLVING
:
1064 case DISCOVERY_STOPPING
:
1069 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1071 struct discovery_state
*cache
= &hdev
->discovery
;
1072 struct inquiry_entry
*p
, *n
;
1074 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1079 INIT_LIST_HEAD(&cache
->unknown
);
1080 INIT_LIST_HEAD(&cache
->resolve
);
1083 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1086 struct discovery_state
*cache
= &hdev
->discovery
;
1087 struct inquiry_entry
*e
;
1089 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1091 list_for_each_entry(e
, &cache
->all
, all
) {
1092 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1099 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1102 struct discovery_state
*cache
= &hdev
->discovery
;
1103 struct inquiry_entry
*e
;
1105 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1107 list_for_each_entry(e
, &cache
->unknown
, list
) {
1108 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1115 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1119 struct discovery_state
*cache
= &hdev
->discovery
;
1120 struct inquiry_entry
*e
;
1122 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1124 list_for_each_entry(e
, &cache
->resolve
, list
) {
1125 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1127 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1134 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1135 struct inquiry_entry
*ie
)
1137 struct discovery_state
*cache
= &hdev
->discovery
;
1138 struct list_head
*pos
= &cache
->resolve
;
1139 struct inquiry_entry
*p
;
1141 list_del(&ie
->list
);
1143 list_for_each_entry(p
, &cache
->resolve
, list
) {
1144 if (p
->name_state
!= NAME_PENDING
&&
1145 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1150 list_add(&ie
->list
, pos
);
1153 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1156 struct discovery_state
*cache
= &hdev
->discovery
;
1157 struct inquiry_entry
*ie
;
1160 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1162 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1164 if (!data
->ssp_mode
)
1165 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1167 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1169 if (!ie
->data
.ssp_mode
)
1170 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1172 if (ie
->name_state
== NAME_NEEDED
&&
1173 data
->rssi
!= ie
->data
.rssi
) {
1174 ie
->data
.rssi
= data
->rssi
;
1175 hci_inquiry_cache_update_resolve(hdev
, ie
);
1181 /* Entry not in the cache. Add new one. */
1182 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1184 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1188 list_add(&ie
->all
, &cache
->all
);
1191 ie
->name_state
= NAME_KNOWN
;
1193 ie
->name_state
= NAME_NOT_KNOWN
;
1194 list_add(&ie
->list
, &cache
->unknown
);
1198 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1199 ie
->name_state
!= NAME_PENDING
) {
1200 ie
->name_state
= NAME_KNOWN
;
1201 list_del(&ie
->list
);
1204 memcpy(&ie
->data
, data
, sizeof(*data
));
1205 ie
->timestamp
= jiffies
;
1206 cache
->timestamp
= jiffies
;
1208 if (ie
->name_state
== NAME_NOT_KNOWN
)
1209 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1215 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1217 struct discovery_state
*cache
= &hdev
->discovery
;
1218 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1219 struct inquiry_entry
*e
;
1222 list_for_each_entry(e
, &cache
->all
, all
) {
1223 struct inquiry_data
*data
= &e
->data
;
1228 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1229 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1230 info
->pscan_period_mode
= data
->pscan_period_mode
;
1231 info
->pscan_mode
= data
->pscan_mode
;
1232 memcpy(info
->dev_class
, data
->dev_class
, 3);
1233 info
->clock_offset
= data
->clock_offset
;
1239 BT_DBG("cache %p, copied %d", cache
, copied
);
1243 static int hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1245 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1246 struct hci_dev
*hdev
= req
->hdev
;
1247 struct hci_cp_inquiry cp
;
1249 BT_DBG("%s", hdev
->name
);
1251 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1255 memcpy(&cp
.lap
, &ir
->lap
, 3);
1256 cp
.length
= ir
->length
;
1257 cp
.num_rsp
= ir
->num_rsp
;
1258 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1263 int hci_inquiry(void __user
*arg
)
1265 __u8 __user
*ptr
= arg
;
1266 struct hci_inquiry_req ir
;
1267 struct hci_dev
*hdev
;
1268 int err
= 0, do_inquiry
= 0, max_rsp
;
1272 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1275 hdev
= hci_dev_get(ir
.dev_id
);
1279 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1284 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1289 if (hdev
->dev_type
!= HCI_PRIMARY
) {
1294 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1300 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1301 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1302 hci_inquiry_cache_flush(hdev
);
1305 hci_dev_unlock(hdev
);
1307 timeo
= ir
.length
* msecs_to_jiffies(2000);
1310 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1315 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1316 * cleared). If it is interrupted by a signal, return -EINTR.
1318 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1319 TASK_INTERRUPTIBLE
))
1323 /* for unlimited number of responses we will use buffer with
1326 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1328 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1329 * copy it to the user space.
1331 buf
= kmalloc_array(max_rsp
, sizeof(struct inquiry_info
), GFP_KERNEL
);
1338 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1339 hci_dev_unlock(hdev
);
1341 BT_DBG("num_rsp %d", ir
.num_rsp
);
1343 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1345 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1358 static int hci_dev_do_open(struct hci_dev
*hdev
)
1362 BT_DBG("%s %p", hdev
->name
, hdev
);
1364 hci_req_sync_lock(hdev
);
1366 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1371 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1372 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1373 /* Check for rfkill but allow the HCI setup stage to
1374 * proceed (which in itself doesn't cause any RF activity).
1376 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1381 /* Check for valid public address or a configured static
1382 * random adddress, but let the HCI setup proceed to
1383 * be able to determine if there is a public address
1386 * In case of user channel usage, it is not important
1387 * if a public address or static random address is
1390 * This check is only valid for BR/EDR controllers
1391 * since AMP controllers do not have an address.
1393 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1394 hdev
->dev_type
== HCI_PRIMARY
&&
1395 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1396 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1397 ret
= -EADDRNOTAVAIL
;
1402 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1407 if (hdev
->open(hdev
)) {
1412 set_bit(HCI_RUNNING
, &hdev
->flags
);
1413 hci_sock_dev_event(hdev
, HCI_DEV_OPEN
);
1415 atomic_set(&hdev
->cmd_cnt
, 1);
1416 set_bit(HCI_INIT
, &hdev
->flags
);
1418 if (hci_dev_test_flag(hdev
, HCI_SETUP
) ||
1419 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP
, &hdev
->quirks
)) {
1420 hci_sock_dev_event(hdev
, HCI_DEV_SETUP
);
1423 ret
= hdev
->setup(hdev
);
1425 /* The transport driver can set these quirks before
1426 * creating the HCI device or in its setup callback.
1428 * In case any of them is set, the controller has to
1429 * start up as unconfigured.
1431 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1432 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1433 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1435 /* For an unconfigured controller it is required to
1436 * read at least the version information provided by
1437 * the Read Local Version Information command.
1439 * If the set_bdaddr driver callback is provided, then
1440 * also the original Bluetooth public device address
1441 * will be read using the Read BD Address command.
1443 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1444 ret
= __hci_unconf_init(hdev
);
1447 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1448 /* If public address change is configured, ensure that
1449 * the address gets programmed. If the driver does not
1450 * support changing the public address, fail the power
1453 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1455 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1457 ret
= -EADDRNOTAVAIL
;
1461 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1462 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1463 ret
= __hci_init(hdev
);
1464 if (!ret
&& hdev
->post_init
)
1465 ret
= hdev
->post_init(hdev
);
1469 /* If the HCI Reset command is clearing all diagnostic settings,
1470 * then they need to be reprogrammed after the init procedure
1473 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
1474 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1475 hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) && hdev
->set_diag
)
1476 ret
= hdev
->set_diag(hdev
, true);
1478 clear_bit(HCI_INIT
, &hdev
->flags
);
1482 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1483 hci_adv_instances_set_rpa_expired(hdev
, true);
1484 set_bit(HCI_UP
, &hdev
->flags
);
1485 hci_sock_dev_event(hdev
, HCI_DEV_UP
);
1486 hci_leds_update_powered(hdev
, true);
1487 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1488 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1489 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1490 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1491 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
1492 hdev
->dev_type
== HCI_PRIMARY
) {
1493 ret
= __hci_req_hci_power_on(hdev
);
1494 mgmt_power_on(hdev
, ret
);
1497 /* Init failed, cleanup */
1498 flush_work(&hdev
->tx_work
);
1499 flush_work(&hdev
->cmd_work
);
1500 flush_work(&hdev
->rx_work
);
1502 skb_queue_purge(&hdev
->cmd_q
);
1503 skb_queue_purge(&hdev
->rx_q
);
1508 if (hdev
->sent_cmd
) {
1509 kfree_skb(hdev
->sent_cmd
);
1510 hdev
->sent_cmd
= NULL
;
1513 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1514 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1517 hdev
->flags
&= BIT(HCI_RAW
);
1521 hci_req_sync_unlock(hdev
);
1525 /* ---- HCI ioctl helpers ---- */
1527 int hci_dev_open(__u16 dev
)
1529 struct hci_dev
*hdev
;
1532 hdev
= hci_dev_get(dev
);
1536 /* Devices that are marked as unconfigured can only be powered
1537 * up as user channel. Trying to bring them up as normal devices
1538 * will result into a failure. Only user channel operation is
1541 * When this function is called for a user channel, the flag
1542 * HCI_USER_CHANNEL will be set first before attempting to
1545 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1546 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1551 /* We need to ensure that no other power on/off work is pending
1552 * before proceeding to call hci_dev_do_open. This is
1553 * particularly important if the setup procedure has not yet
1556 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1557 cancel_delayed_work(&hdev
->power_off
);
1559 /* After this call it is guaranteed that the setup procedure
1560 * has finished. This means that error conditions like RFKILL
1561 * or no valid public or static random address apply.
1563 flush_workqueue(hdev
->req_workqueue
);
1565 /* For controllers not using the management interface and that
1566 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1567 * so that pairing works for them. Once the management interface
1568 * is in use this bit will be cleared again and userspace has
1569 * to explicitly enable it.
1571 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1572 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1573 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1575 err
= hci_dev_do_open(hdev
);
1582 /* This function requires the caller holds hdev->lock */
1583 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1585 struct hci_conn_params
*p
;
1587 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1589 hci_conn_drop(p
->conn
);
1590 hci_conn_put(p
->conn
);
1593 list_del_init(&p
->action
);
1596 BT_DBG("All LE pending actions cleared");
1599 int hci_dev_do_close(struct hci_dev
*hdev
)
1603 BT_DBG("%s %p", hdev
->name
, hdev
);
1605 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1606 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1607 test_bit(HCI_UP
, &hdev
->flags
)) {
1608 /* Execute vendor specific shutdown routine */
1610 hdev
->shutdown(hdev
);
1613 cancel_delayed_work(&hdev
->power_off
);
1615 hci_request_cancel_all(hdev
);
1616 hci_req_sync_lock(hdev
);
1618 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1619 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1620 hci_req_sync_unlock(hdev
);
1624 hci_leds_update_powered(hdev
, false);
1626 /* Flush RX and TX works */
1627 flush_work(&hdev
->tx_work
);
1628 flush_work(&hdev
->rx_work
);
1630 if (hdev
->discov_timeout
> 0) {
1631 hdev
->discov_timeout
= 0;
1632 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1633 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1636 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1637 cancel_delayed_work(&hdev
->service_cache
);
1639 if (hci_dev_test_flag(hdev
, HCI_MGMT
)) {
1640 struct adv_info
*adv_instance
;
1642 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1644 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
)
1645 cancel_delayed_work_sync(&adv_instance
->rpa_expired_cb
);
1648 /* Avoid potential lockdep warnings from the *_flush() calls by
1649 * ensuring the workqueue is empty up front.
1651 drain_workqueue(hdev
->workqueue
);
1655 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1657 auto_off
= hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
);
1659 if (!auto_off
&& hdev
->dev_type
== HCI_PRIMARY
&&
1660 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1661 hci_dev_test_flag(hdev
, HCI_MGMT
))
1662 __mgmt_power_off(hdev
);
1664 hci_inquiry_cache_flush(hdev
);
1665 hci_pend_le_actions_clear(hdev
);
1666 hci_conn_hash_flush(hdev
);
1667 hci_dev_unlock(hdev
);
1669 smp_unregister(hdev
);
1671 hci_sock_dev_event(hdev
, HCI_DEV_DOWN
);
1677 skb_queue_purge(&hdev
->cmd_q
);
1678 atomic_set(&hdev
->cmd_cnt
, 1);
1679 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
) &&
1680 !auto_off
&& !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1681 set_bit(HCI_INIT
, &hdev
->flags
);
1682 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
, NULL
);
1683 clear_bit(HCI_INIT
, &hdev
->flags
);
1686 /* flush cmd work */
1687 flush_work(&hdev
->cmd_work
);
1690 skb_queue_purge(&hdev
->rx_q
);
1691 skb_queue_purge(&hdev
->cmd_q
);
1692 skb_queue_purge(&hdev
->raw_q
);
1694 /* Drop last sent command */
1695 if (hdev
->sent_cmd
) {
1696 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1697 kfree_skb(hdev
->sent_cmd
);
1698 hdev
->sent_cmd
= NULL
;
1701 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1702 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1704 /* After this point our queues are empty
1705 * and no tasks are scheduled. */
1709 hdev
->flags
&= BIT(HCI_RAW
);
1710 hci_dev_clear_volatile_flags(hdev
);
1712 /* Controller radio is available but is currently powered down */
1713 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1715 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1716 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1717 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1719 hci_req_sync_unlock(hdev
);
1725 int hci_dev_close(__u16 dev
)
1727 struct hci_dev
*hdev
;
1730 hdev
= hci_dev_get(dev
);
1734 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1739 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1740 cancel_delayed_work(&hdev
->power_off
);
1742 err
= hci_dev_do_close(hdev
);
1749 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1753 BT_DBG("%s %p", hdev
->name
, hdev
);
1755 hci_req_sync_lock(hdev
);
1758 skb_queue_purge(&hdev
->rx_q
);
1759 skb_queue_purge(&hdev
->cmd_q
);
1761 /* Avoid potential lockdep warnings from the *_flush() calls by
1762 * ensuring the workqueue is empty up front.
1764 drain_workqueue(hdev
->workqueue
);
1767 hci_inquiry_cache_flush(hdev
);
1768 hci_conn_hash_flush(hdev
);
1769 hci_dev_unlock(hdev
);
1774 atomic_set(&hdev
->cmd_cnt
, 1);
1775 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1777 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
1779 hci_req_sync_unlock(hdev
);
1783 int hci_dev_reset(__u16 dev
)
1785 struct hci_dev
*hdev
;
1788 hdev
= hci_dev_get(dev
);
1792 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1797 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1802 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1807 err
= hci_dev_do_reset(hdev
);
1814 int hci_dev_reset_stat(__u16 dev
)
1816 struct hci_dev
*hdev
;
1819 hdev
= hci_dev_get(dev
);
1823 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1828 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1833 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1840 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1842 bool conn_changed
, discov_changed
;
1844 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1846 if ((scan
& SCAN_PAGE
))
1847 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1850 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1853 if ((scan
& SCAN_INQUIRY
)) {
1854 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1857 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1858 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1862 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1865 if (conn_changed
|| discov_changed
) {
1866 /* In case this was disabled through mgmt */
1867 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1869 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1870 hci_req_update_adv_data(hdev
, hdev
->cur_adv_instance
);
1872 mgmt_new_settings(hdev
);
1876 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1878 struct hci_dev
*hdev
;
1879 struct hci_dev_req dr
;
1882 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1885 hdev
= hci_dev_get(dr
.dev_id
);
1889 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1894 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1899 if (hdev
->dev_type
!= HCI_PRIMARY
) {
1904 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1911 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1912 HCI_INIT_TIMEOUT
, NULL
);
1916 if (!lmp_encrypt_capable(hdev
)) {
1921 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1922 /* Auth must be enabled first */
1923 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1924 HCI_INIT_TIMEOUT
, NULL
);
1929 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1930 HCI_INIT_TIMEOUT
, NULL
);
1934 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1935 HCI_INIT_TIMEOUT
, NULL
);
1937 /* Ensure that the connectable and discoverable states
1938 * get correctly modified as this was a non-mgmt change.
1941 hci_update_scan_state(hdev
, dr
.dev_opt
);
1945 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1946 HCI_INIT_TIMEOUT
, NULL
);
1949 case HCISETLINKMODE
:
1950 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1951 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1955 if (hdev
->pkt_type
== (__u16
) dr
.dev_opt
)
1958 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1959 mgmt_phy_configuration_changed(hdev
, NULL
);
1963 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1964 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1968 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1969 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1982 int hci_get_dev_list(void __user
*arg
)
1984 struct hci_dev
*hdev
;
1985 struct hci_dev_list_req
*dl
;
1986 struct hci_dev_req
*dr
;
1987 int n
= 0, size
, err
;
1990 if (get_user(dev_num
, (__u16 __user
*) arg
))
1993 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1996 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1998 dl
= kzalloc(size
, GFP_KERNEL
);
2004 read_lock(&hci_dev_list_lock
);
2005 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2006 unsigned long flags
= hdev
->flags
;
2008 /* When the auto-off is configured it means the transport
2009 * is running, but in that case still indicate that the
2010 * device is actually down.
2012 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2013 flags
&= ~BIT(HCI_UP
);
2015 (dr
+ n
)->dev_id
= hdev
->id
;
2016 (dr
+ n
)->dev_opt
= flags
;
2021 read_unlock(&hci_dev_list_lock
);
2024 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2026 err
= copy_to_user(arg
, dl
, size
);
2029 return err
? -EFAULT
: 0;
2032 int hci_get_dev_info(void __user
*arg
)
2034 struct hci_dev
*hdev
;
2035 struct hci_dev_info di
;
2036 unsigned long flags
;
2039 if (copy_from_user(&di
, arg
, sizeof(di
)))
2042 hdev
= hci_dev_get(di
.dev_id
);
2046 /* When the auto-off is configured it means the transport
2047 * is running, but in that case still indicate that the
2048 * device is actually down.
2050 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2051 flags
= hdev
->flags
& ~BIT(HCI_UP
);
2053 flags
= hdev
->flags
;
2055 strcpy(di
.name
, hdev
->name
);
2056 di
.bdaddr
= hdev
->bdaddr
;
2057 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2059 di
.pkt_type
= hdev
->pkt_type
;
2060 if (lmp_bredr_capable(hdev
)) {
2061 di
.acl_mtu
= hdev
->acl_mtu
;
2062 di
.acl_pkts
= hdev
->acl_pkts
;
2063 di
.sco_mtu
= hdev
->sco_mtu
;
2064 di
.sco_pkts
= hdev
->sco_pkts
;
2066 di
.acl_mtu
= hdev
->le_mtu
;
2067 di
.acl_pkts
= hdev
->le_pkts
;
2071 di
.link_policy
= hdev
->link_policy
;
2072 di
.link_mode
= hdev
->link_mode
;
2074 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2075 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2077 if (copy_to_user(arg
, &di
, sizeof(di
)))
2085 /* ---- Interface to HCI drivers ---- */
2087 static int hci_rfkill_set_block(void *data
, bool blocked
)
2089 struct hci_dev
*hdev
= data
;
2091 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2093 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
2097 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
2098 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
2099 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
2100 hci_dev_do_close(hdev
);
2102 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2108 static const struct rfkill_ops hci_rfkill_ops
= {
2109 .set_block
= hci_rfkill_set_block
,
2112 static void hci_power_on(struct work_struct
*work
)
2114 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2117 BT_DBG("%s", hdev
->name
);
2119 if (test_bit(HCI_UP
, &hdev
->flags
) &&
2120 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
2121 hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
2122 cancel_delayed_work(&hdev
->power_off
);
2123 hci_req_sync_lock(hdev
);
2124 err
= __hci_req_hci_power_on(hdev
);
2125 hci_req_sync_unlock(hdev
);
2126 mgmt_power_on(hdev
, err
);
2130 err
= hci_dev_do_open(hdev
);
2133 mgmt_set_powered_failed(hdev
, err
);
2134 hci_dev_unlock(hdev
);
2138 /* During the HCI setup phase, a few error conditions are
2139 * ignored and they need to be checked now. If they are still
2140 * valid, it is important to turn the device back off.
2142 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2143 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2144 (hdev
->dev_type
== HCI_PRIMARY
&&
2145 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2146 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2147 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2148 hci_dev_do_close(hdev
);
2149 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2150 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2151 HCI_AUTO_OFF_TIMEOUT
);
2154 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2155 /* For unconfigured devices, set the HCI_RAW flag
2156 * so that userspace can easily identify them.
2158 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2159 set_bit(HCI_RAW
, &hdev
->flags
);
2161 /* For fully configured devices, this will send
2162 * the Index Added event. For unconfigured devices,
2163 * it will send Unconfigued Index Added event.
2165 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2166 * and no event will be send.
2168 mgmt_index_added(hdev
);
2169 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2170 /* When the controller is now configured, then it
2171 * is important to clear the HCI_RAW flag.
2173 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2174 clear_bit(HCI_RAW
, &hdev
->flags
);
2176 /* Powering on the controller with HCI_CONFIG set only
2177 * happens with the transition from unconfigured to
2178 * configured. This will send the Index Added event.
2180 mgmt_index_added(hdev
);
2184 static void hci_power_off(struct work_struct
*work
)
2186 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2189 BT_DBG("%s", hdev
->name
);
2191 hci_dev_do_close(hdev
);
2194 static void hci_error_reset(struct work_struct
*work
)
2196 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2198 BT_DBG("%s", hdev
->name
);
2201 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2203 bt_dev_err(hdev
, "hardware error 0x%2.2x", hdev
->hw_error_code
);
2205 if (hci_dev_do_close(hdev
))
2208 hci_dev_do_open(hdev
);
2211 void hci_uuids_clear(struct hci_dev
*hdev
)
2213 struct bt_uuid
*uuid
, *tmp
;
2215 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2216 list_del(&uuid
->list
);
2221 void hci_link_keys_clear(struct hci_dev
*hdev
)
2223 struct link_key
*key
;
2225 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2226 list_del_rcu(&key
->list
);
2227 kfree_rcu(key
, rcu
);
2231 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2235 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2236 list_del_rcu(&k
->list
);
2241 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2245 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2246 list_del_rcu(&k
->list
);
2251 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2256 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2257 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2267 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2268 u8 key_type
, u8 old_key_type
)
2271 if (key_type
< 0x03)
2274 /* Debug keys are insecure so don't store them persistently */
2275 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2278 /* Changed combination key and there's no previous one */
2279 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2282 /* Security mode 3 case */
2286 /* BR/EDR key derived using SC from an LE link */
2287 if (conn
->type
== LE_LINK
)
2290 /* Neither local nor remote side had no-bonding as requirement */
2291 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2294 /* Local side had dedicated bonding as requirement */
2295 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2298 /* Remote side had dedicated bonding as requirement */
2299 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2302 /* If none of the above criteria match, then don't store the key
2307 static u8
ltk_role(u8 type
)
2309 if (type
== SMP_LTK
)
2310 return HCI_ROLE_MASTER
;
2312 return HCI_ROLE_SLAVE
;
2315 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2316 u8 addr_type
, u8 role
)
2321 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2322 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2325 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2335 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2337 struct smp_irk
*irk
;
2340 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2341 if (!bacmp(&irk
->rpa
, rpa
)) {
2347 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2348 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2349 bacpy(&irk
->rpa
, rpa
);
2359 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2362 struct smp_irk
*irk
;
2364 /* Identity Address must be public or static random */
2365 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2369 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2370 if (addr_type
== irk
->addr_type
&&
2371 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2381 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2382 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2383 u8 pin_len
, bool *persistent
)
2385 struct link_key
*key
, *old_key
;
2388 old_key
= hci_find_link_key(hdev
, bdaddr
);
2390 old_key_type
= old_key
->type
;
2393 old_key_type
= conn
? conn
->key_type
: 0xff;
2394 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2397 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2400 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2402 /* Some buggy controller combinations generate a changed
2403 * combination key for legacy pairing even when there's no
2405 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2406 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2407 type
= HCI_LK_COMBINATION
;
2409 conn
->key_type
= type
;
2412 bacpy(&key
->bdaddr
, bdaddr
);
2413 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2414 key
->pin_len
= pin_len
;
2416 if (type
== HCI_LK_CHANGED_COMBINATION
)
2417 key
->type
= old_key_type
;
2422 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2428 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2429 u8 addr_type
, u8 type
, u8 authenticated
,
2430 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2432 struct smp_ltk
*key
, *old_key
;
2433 u8 role
= ltk_role(type
);
2435 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2439 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2442 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2445 bacpy(&key
->bdaddr
, bdaddr
);
2446 key
->bdaddr_type
= addr_type
;
2447 memcpy(key
->val
, tk
, sizeof(key
->val
));
2448 key
->authenticated
= authenticated
;
2451 key
->enc_size
= enc_size
;
2457 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2458 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2460 struct smp_irk
*irk
;
2462 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2464 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2468 bacpy(&irk
->bdaddr
, bdaddr
);
2469 irk
->addr_type
= addr_type
;
2471 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2474 memcpy(irk
->val
, val
, 16);
2475 bacpy(&irk
->rpa
, rpa
);
2480 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2482 struct link_key
*key
;
2484 key
= hci_find_link_key(hdev
, bdaddr
);
2488 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2490 list_del_rcu(&key
->list
);
2491 kfree_rcu(key
, rcu
);
2496 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2501 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2502 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2505 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2507 list_del_rcu(&k
->list
);
2512 return removed
? 0 : -ENOENT
;
2515 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2519 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2520 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2523 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2525 list_del_rcu(&k
->list
);
2530 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2533 struct smp_irk
*irk
;
2536 if (type
== BDADDR_BREDR
) {
2537 if (hci_find_link_key(hdev
, bdaddr
))
2542 /* Convert to HCI addr type which struct smp_ltk uses */
2543 if (type
== BDADDR_LE_PUBLIC
)
2544 addr_type
= ADDR_LE_DEV_PUBLIC
;
2546 addr_type
= ADDR_LE_DEV_RANDOM
;
2548 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2550 bdaddr
= &irk
->bdaddr
;
2551 addr_type
= irk
->addr_type
;
2555 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2556 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2566 /* HCI command timer function */
2567 static void hci_cmd_timeout(struct work_struct
*work
)
2569 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2572 if (hdev
->sent_cmd
) {
2573 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2574 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2576 bt_dev_err(hdev
, "command 0x%4.4x tx timeout", opcode
);
2578 bt_dev_err(hdev
, "command tx timeout");
2581 atomic_set(&hdev
->cmd_cnt
, 1);
2582 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2585 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2586 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2588 struct oob_data
*data
;
2590 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2591 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2593 if (data
->bdaddr_type
!= bdaddr_type
)
2601 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2604 struct oob_data
*data
;
2606 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2610 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2612 list_del(&data
->list
);
2618 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2620 struct oob_data
*data
, *n
;
2622 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2623 list_del(&data
->list
);
2628 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2629 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2630 u8
*hash256
, u8
*rand256
)
2632 struct oob_data
*data
;
2634 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2636 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2640 bacpy(&data
->bdaddr
, bdaddr
);
2641 data
->bdaddr_type
= bdaddr_type
;
2642 list_add(&data
->list
, &hdev
->remote_oob_data
);
2645 if (hash192
&& rand192
) {
2646 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2647 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2648 if (hash256
&& rand256
)
2649 data
->present
= 0x03;
2651 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2652 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2653 if (hash256
&& rand256
)
2654 data
->present
= 0x02;
2656 data
->present
= 0x00;
2659 if (hash256
&& rand256
) {
2660 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2661 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2663 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2664 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2665 if (hash192
&& rand192
)
2666 data
->present
= 0x01;
2669 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2674 /* This function requires the caller holds hdev->lock */
2675 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2677 struct adv_info
*adv_instance
;
2679 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
2680 if (adv_instance
->instance
== instance
)
2681 return adv_instance
;
2687 /* This function requires the caller holds hdev->lock */
2688 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
)
2690 struct adv_info
*cur_instance
;
2692 cur_instance
= hci_find_adv_instance(hdev
, instance
);
2696 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
2697 struct adv_info
, list
))
2698 return list_first_entry(&hdev
->adv_instances
,
2699 struct adv_info
, list
);
2701 return list_next_entry(cur_instance
, list
);
2704 /* This function requires the caller holds hdev->lock */
2705 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2707 struct adv_info
*adv_instance
;
2709 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2713 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
2715 if (hdev
->cur_adv_instance
== instance
) {
2716 if (hdev
->adv_instance_timeout
) {
2717 cancel_delayed_work(&hdev
->adv_instance_expire
);
2718 hdev
->adv_instance_timeout
= 0;
2720 hdev
->cur_adv_instance
= 0x00;
2723 cancel_delayed_work_sync(&adv_instance
->rpa_expired_cb
);
2725 list_del(&adv_instance
->list
);
2726 kfree(adv_instance
);
2728 hdev
->adv_instance_cnt
--;
2733 void hci_adv_instances_set_rpa_expired(struct hci_dev
*hdev
, bool rpa_expired
)
2735 struct adv_info
*adv_instance
, *n
;
2737 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
)
2738 adv_instance
->rpa_expired
= rpa_expired
;
2741 /* This function requires the caller holds hdev->lock */
2742 void hci_adv_instances_clear(struct hci_dev
*hdev
)
2744 struct adv_info
*adv_instance
, *n
;
2746 if (hdev
->adv_instance_timeout
) {
2747 cancel_delayed_work(&hdev
->adv_instance_expire
);
2748 hdev
->adv_instance_timeout
= 0;
2751 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
2752 cancel_delayed_work_sync(&adv_instance
->rpa_expired_cb
);
2753 list_del(&adv_instance
->list
);
2754 kfree(adv_instance
);
2757 hdev
->adv_instance_cnt
= 0;
2758 hdev
->cur_adv_instance
= 0x00;
2761 static void adv_instance_rpa_expired(struct work_struct
*work
)
2763 struct adv_info
*adv_instance
= container_of(work
, struct adv_info
,
2764 rpa_expired_cb
.work
);
2768 adv_instance
->rpa_expired
= true;
2771 /* This function requires the caller holds hdev->lock */
2772 int hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
, u32 flags
,
2773 u16 adv_data_len
, u8
*adv_data
,
2774 u16 scan_rsp_len
, u8
*scan_rsp_data
,
2775 u16 timeout
, u16 duration
)
2777 struct adv_info
*adv_instance
;
2779 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2781 memset(adv_instance
->adv_data
, 0,
2782 sizeof(adv_instance
->adv_data
));
2783 memset(adv_instance
->scan_rsp_data
, 0,
2784 sizeof(adv_instance
->scan_rsp_data
));
2786 if (hdev
->adv_instance_cnt
>= HCI_MAX_ADV_INSTANCES
||
2787 instance
< 1 || instance
> HCI_MAX_ADV_INSTANCES
)
2790 adv_instance
= kzalloc(sizeof(*adv_instance
), GFP_KERNEL
);
2794 adv_instance
->pending
= true;
2795 adv_instance
->instance
= instance
;
2796 list_add(&adv_instance
->list
, &hdev
->adv_instances
);
2797 hdev
->adv_instance_cnt
++;
2800 adv_instance
->flags
= flags
;
2801 adv_instance
->adv_data_len
= adv_data_len
;
2802 adv_instance
->scan_rsp_len
= scan_rsp_len
;
2805 memcpy(adv_instance
->adv_data
, adv_data
, adv_data_len
);
2808 memcpy(adv_instance
->scan_rsp_data
,
2809 scan_rsp_data
, scan_rsp_len
);
2811 adv_instance
->timeout
= timeout
;
2812 adv_instance
->remaining_time
= timeout
;
2815 adv_instance
->duration
= HCI_DEFAULT_ADV_DURATION
;
2817 adv_instance
->duration
= duration
;
2819 adv_instance
->tx_power
= HCI_TX_POWER_INVALID
;
2821 INIT_DELAYED_WORK(&adv_instance
->rpa_expired_cb
,
2822 adv_instance_rpa_expired
);
2824 BT_DBG("%s for %dMR", hdev
->name
, instance
);
2829 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2830 bdaddr_t
*bdaddr
, u8 type
)
2832 struct bdaddr_list
*b
;
2834 list_for_each_entry(b
, bdaddr_list
, list
) {
2835 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2842 struct bdaddr_list_with_irk
*hci_bdaddr_list_lookup_with_irk(
2843 struct list_head
*bdaddr_list
, bdaddr_t
*bdaddr
,
2846 struct bdaddr_list_with_irk
*b
;
2848 list_for_each_entry(b
, bdaddr_list
, list
) {
2849 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2856 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2858 struct bdaddr_list
*b
, *n
;
2860 list_for_each_entry_safe(b
, n
, bdaddr_list
, list
) {
2866 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2868 struct bdaddr_list
*entry
;
2870 if (!bacmp(bdaddr
, BDADDR_ANY
))
2873 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2876 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2880 bacpy(&entry
->bdaddr
, bdaddr
);
2881 entry
->bdaddr_type
= type
;
2883 list_add(&entry
->list
, list
);
2888 int hci_bdaddr_list_add_with_irk(struct list_head
*list
, bdaddr_t
*bdaddr
,
2889 u8 type
, u8
*peer_irk
, u8
*local_irk
)
2891 struct bdaddr_list_with_irk
*entry
;
2893 if (!bacmp(bdaddr
, BDADDR_ANY
))
2896 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2899 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2903 bacpy(&entry
->bdaddr
, bdaddr
);
2904 entry
->bdaddr_type
= type
;
2907 memcpy(entry
->peer_irk
, peer_irk
, 16);
2910 memcpy(entry
->local_irk
, local_irk
, 16);
2912 list_add(&entry
->list
, list
);
2917 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2919 struct bdaddr_list
*entry
;
2921 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2922 hci_bdaddr_list_clear(list
);
2926 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2930 list_del(&entry
->list
);
2936 int hci_bdaddr_list_del_with_irk(struct list_head
*list
, bdaddr_t
*bdaddr
,
2939 struct bdaddr_list_with_irk
*entry
;
2941 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2942 hci_bdaddr_list_clear(list
);
2946 entry
= hci_bdaddr_list_lookup_with_irk(list
, bdaddr
, type
);
2950 list_del(&entry
->list
);
2956 /* This function requires the caller holds hdev->lock */
2957 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2958 bdaddr_t
*addr
, u8 addr_type
)
2960 struct hci_conn_params
*params
;
2962 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2963 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2964 params
->addr_type
== addr_type
) {
2972 /* This function requires the caller holds hdev->lock */
2973 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2974 bdaddr_t
*addr
, u8 addr_type
)
2976 struct hci_conn_params
*param
;
2978 list_for_each_entry(param
, list
, action
) {
2979 if (bacmp(¶m
->addr
, addr
) == 0 &&
2980 param
->addr_type
== addr_type
)
2987 /* This function requires the caller holds hdev->lock */
2988 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2989 bdaddr_t
*addr
, u8 addr_type
)
2991 struct hci_conn_params
*params
;
2993 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2997 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2999 bt_dev_err(hdev
, "out of memory");
3003 bacpy(¶ms
->addr
, addr
);
3004 params
->addr_type
= addr_type
;
3006 list_add(¶ms
->list
, &hdev
->le_conn_params
);
3007 INIT_LIST_HEAD(¶ms
->action
);
3009 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
3010 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
3011 params
->conn_latency
= hdev
->le_conn_latency
;
3012 params
->supervision_timeout
= hdev
->le_supv_timeout
;
3013 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3015 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3020 static void hci_conn_params_free(struct hci_conn_params
*params
)
3023 hci_conn_drop(params
->conn
);
3024 hci_conn_put(params
->conn
);
3027 list_del(¶ms
->action
);
3028 list_del(¶ms
->list
);
3032 /* This function requires the caller holds hdev->lock */
3033 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
3035 struct hci_conn_params
*params
;
3037 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
3041 hci_conn_params_free(params
);
3043 hci_update_background_scan(hdev
);
3045 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3048 /* This function requires the caller holds hdev->lock */
3049 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
3051 struct hci_conn_params
*params
, *tmp
;
3053 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
3054 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
3057 /* If trying to estabilish one time connection to disabled
3058 * device, leave the params, but mark them as just once.
3060 if (params
->explicit_connect
) {
3061 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
3065 list_del(¶ms
->list
);
3069 BT_DBG("All LE disabled connection parameters were removed");
3072 /* This function requires the caller holds hdev->lock */
3073 static void hci_conn_params_clear_all(struct hci_dev
*hdev
)
3075 struct hci_conn_params
*params
, *tmp
;
3077 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
3078 hci_conn_params_free(params
);
3080 BT_DBG("All LE connection parameters were removed");
3083 /* Copy the Identity Address of the controller.
3085 * If the controller has a public BD_ADDR, then by default use that one.
3086 * If this is a LE only controller without a public address, default to
3087 * the static random address.
3089 * For debugging purposes it is possible to force controllers with a
3090 * public address to use the static random address instead.
3092 * In case BR/EDR has been disabled on a dual-mode controller and
3093 * userspace has configured a static address, then that address
3094 * becomes the identity address instead of the public BR/EDR address.
3096 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3099 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
3100 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
3101 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
3102 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
3103 bacpy(bdaddr
, &hdev
->static_addr
);
3104 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
3106 bacpy(bdaddr
, &hdev
->bdaddr
);
3107 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
3111 /* Alloc HCI device */
3112 struct hci_dev
*hci_alloc_dev(void)
3114 struct hci_dev
*hdev
;
3116 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
3120 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
3121 hdev
->esco_type
= (ESCO_HV1
);
3122 hdev
->link_mode
= (HCI_LM_ACCEPT
);
3123 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
3124 hdev
->io_capability
= 0x03; /* No Input No Output */
3125 hdev
->manufacturer
= 0xffff; /* Default to internal use */
3126 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
3127 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
3128 hdev
->adv_instance_cnt
= 0;
3129 hdev
->cur_adv_instance
= 0x00;
3130 hdev
->adv_instance_timeout
= 0;
3132 hdev
->sniff_max_interval
= 800;
3133 hdev
->sniff_min_interval
= 80;
3135 hdev
->le_adv_channel_map
= 0x07;
3136 hdev
->le_adv_min_interval
= 0x0800;
3137 hdev
->le_adv_max_interval
= 0x0800;
3138 hdev
->le_scan_interval
= 0x0060;
3139 hdev
->le_scan_window
= 0x0030;
3140 hdev
->le_conn_min_interval
= 0x0018;
3141 hdev
->le_conn_max_interval
= 0x0028;
3142 hdev
->le_conn_latency
= 0x0000;
3143 hdev
->le_supv_timeout
= 0x002a;
3144 hdev
->le_def_tx_len
= 0x001b;
3145 hdev
->le_def_tx_time
= 0x0148;
3146 hdev
->le_max_tx_len
= 0x001b;
3147 hdev
->le_max_tx_time
= 0x0148;
3148 hdev
->le_max_rx_len
= 0x001b;
3149 hdev
->le_max_rx_time
= 0x0148;
3150 hdev
->le_max_key_size
= SMP_MAX_ENC_KEY_SIZE
;
3151 hdev
->le_min_key_size
= SMP_MIN_ENC_KEY_SIZE
;
3152 hdev
->le_tx_def_phys
= HCI_LE_SET_PHY_1M
;
3153 hdev
->le_rx_def_phys
= HCI_LE_SET_PHY_1M
;
3155 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
3156 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
3157 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
3158 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
3160 mutex_init(&hdev
->lock
);
3161 mutex_init(&hdev
->req_lock
);
3163 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
3164 INIT_LIST_HEAD(&hdev
->blacklist
);
3165 INIT_LIST_HEAD(&hdev
->whitelist
);
3166 INIT_LIST_HEAD(&hdev
->uuids
);
3167 INIT_LIST_HEAD(&hdev
->link_keys
);
3168 INIT_LIST_HEAD(&hdev
->long_term_keys
);
3169 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
3170 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
3171 INIT_LIST_HEAD(&hdev
->le_white_list
);
3172 INIT_LIST_HEAD(&hdev
->le_resolv_list
);
3173 INIT_LIST_HEAD(&hdev
->le_conn_params
);
3174 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
3175 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
3176 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
3177 INIT_LIST_HEAD(&hdev
->adv_instances
);
3179 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
3180 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
3181 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
3182 INIT_WORK(&hdev
->power_on
, hci_power_on
);
3183 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
3185 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
3187 skb_queue_head_init(&hdev
->rx_q
);
3188 skb_queue_head_init(&hdev
->cmd_q
);
3189 skb_queue_head_init(&hdev
->raw_q
);
3191 init_waitqueue_head(&hdev
->req_wait_q
);
3193 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
3195 hci_request_setup(hdev
);
3197 hci_init_sysfs(hdev
);
3198 discovery_init(hdev
);
3202 EXPORT_SYMBOL(hci_alloc_dev
);
3204 /* Free HCI device */
3205 void hci_free_dev(struct hci_dev
*hdev
)
3207 /* will free via device release */
3208 put_device(&hdev
->dev
);
3210 EXPORT_SYMBOL(hci_free_dev
);
3212 /* Register HCI device */
3213 int hci_register_dev(struct hci_dev
*hdev
)
3217 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3220 /* Do not allow HCI_AMP devices to register at index 0,
3221 * so the index can be used as the AMP controller ID.
3223 switch (hdev
->dev_type
) {
3225 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3228 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3237 sprintf(hdev
->name
, "hci%d", id
);
3240 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3242 hdev
->workqueue
= alloc_ordered_workqueue("%s", WQ_HIGHPRI
, hdev
->name
);
3243 if (!hdev
->workqueue
) {
3248 hdev
->req_workqueue
= alloc_ordered_workqueue("%s", WQ_HIGHPRI
,
3250 if (!hdev
->req_workqueue
) {
3251 destroy_workqueue(hdev
->workqueue
);
3256 if (!IS_ERR_OR_NULL(bt_debugfs
))
3257 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3259 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3261 error
= device_add(&hdev
->dev
);
3265 hci_leds_init(hdev
);
3267 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3268 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3271 if (rfkill_register(hdev
->rfkill
) < 0) {
3272 rfkill_destroy(hdev
->rfkill
);
3273 hdev
->rfkill
= NULL
;
3277 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3278 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3280 hci_dev_set_flag(hdev
, HCI_SETUP
);
3281 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3283 if (hdev
->dev_type
== HCI_PRIMARY
) {
3284 /* Assume BR/EDR support until proven otherwise (such as
3285 * through reading supported features during init.
3287 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3290 write_lock(&hci_dev_list_lock
);
3291 list_add(&hdev
->list
, &hci_dev_list
);
3292 write_unlock(&hci_dev_list_lock
);
3294 /* Devices that are marked for raw-only usage are unconfigured
3295 * and should not be included in normal operation.
3297 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3298 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3300 hci_sock_dev_event(hdev
, HCI_DEV_REG
);
3303 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3308 destroy_workqueue(hdev
->workqueue
);
3309 destroy_workqueue(hdev
->req_workqueue
);
3311 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3315 EXPORT_SYMBOL(hci_register_dev
);
3317 /* Unregister HCI device */
3318 void hci_unregister_dev(struct hci_dev
*hdev
)
3322 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3324 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3328 write_lock(&hci_dev_list_lock
);
3329 list_del(&hdev
->list
);
3330 write_unlock(&hci_dev_list_lock
);
3332 cancel_work_sync(&hdev
->power_on
);
3334 hci_dev_do_close(hdev
);
3336 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3337 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3338 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3340 mgmt_index_removed(hdev
);
3341 hci_dev_unlock(hdev
);
3344 /* mgmt_index_removed should take care of emptying the
3346 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3348 hci_sock_dev_event(hdev
, HCI_DEV_UNREG
);
3351 rfkill_unregister(hdev
->rfkill
);
3352 rfkill_destroy(hdev
->rfkill
);
3355 device_del(&hdev
->dev
);
3357 debugfs_remove_recursive(hdev
->debugfs
);
3358 kfree_const(hdev
->hw_info
);
3359 kfree_const(hdev
->fw_info
);
3361 destroy_workqueue(hdev
->workqueue
);
3362 destroy_workqueue(hdev
->req_workqueue
);
3365 hci_bdaddr_list_clear(&hdev
->blacklist
);
3366 hci_bdaddr_list_clear(&hdev
->whitelist
);
3367 hci_uuids_clear(hdev
);
3368 hci_link_keys_clear(hdev
);
3369 hci_smp_ltks_clear(hdev
);
3370 hci_smp_irks_clear(hdev
);
3371 hci_remote_oob_data_clear(hdev
);
3372 hci_adv_instances_clear(hdev
);
3373 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3374 hci_bdaddr_list_clear(&hdev
->le_resolv_list
);
3375 hci_conn_params_clear_all(hdev
);
3376 hci_discovery_filter_clear(hdev
);
3377 hci_dev_unlock(hdev
);
3381 ida_simple_remove(&hci_index_ida
, id
);
3383 EXPORT_SYMBOL(hci_unregister_dev
);
3385 /* Suspend HCI device */
3386 int hci_suspend_dev(struct hci_dev
*hdev
)
3388 hci_sock_dev_event(hdev
, HCI_DEV_SUSPEND
);
3391 EXPORT_SYMBOL(hci_suspend_dev
);
3393 /* Resume HCI device */
3394 int hci_resume_dev(struct hci_dev
*hdev
)
3396 hci_sock_dev_event(hdev
, HCI_DEV_RESUME
);
3399 EXPORT_SYMBOL(hci_resume_dev
);
3401 /* Reset HCI device */
3402 int hci_reset_dev(struct hci_dev
*hdev
)
3404 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3405 struct sk_buff
*skb
;
3407 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3411 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
3412 skb_put_data(skb
, hw_err
, 3);
3414 /* Send Hardware Error to upper stack */
3415 return hci_recv_frame(hdev
, skb
);
3417 EXPORT_SYMBOL(hci_reset_dev
);
3419 /* Receive frame from HCI drivers */
3420 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3422 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3423 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3428 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
3429 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
3430 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
3436 bt_cb(skb
)->incoming
= 1;
3439 __net_timestamp(skb
);
3441 skb_queue_tail(&hdev
->rx_q
, skb
);
3442 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3446 EXPORT_SYMBOL(hci_recv_frame
);
3448 /* Receive diagnostic message from HCI drivers */
3449 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3451 /* Mark as diagnostic packet */
3452 hci_skb_pkt_type(skb
) = HCI_DIAG_PKT
;
3455 __net_timestamp(skb
);
3457 skb_queue_tail(&hdev
->rx_q
, skb
);
3458 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3462 EXPORT_SYMBOL(hci_recv_diag
);
3464 void hci_set_hw_info(struct hci_dev
*hdev
, const char *fmt
, ...)
3468 va_start(vargs
, fmt
);
3469 kfree_const(hdev
->hw_info
);
3470 hdev
->hw_info
= kvasprintf_const(GFP_KERNEL
, fmt
, vargs
);
3473 EXPORT_SYMBOL(hci_set_hw_info
);
3475 void hci_set_fw_info(struct hci_dev
*hdev
, const char *fmt
, ...)
3479 va_start(vargs
, fmt
);
3480 kfree_const(hdev
->fw_info
);
3481 hdev
->fw_info
= kvasprintf_const(GFP_KERNEL
, fmt
, vargs
);
3484 EXPORT_SYMBOL(hci_set_fw_info
);
3486 /* ---- Interface to upper protocols ---- */
3488 int hci_register_cb(struct hci_cb
*cb
)
3490 BT_DBG("%p name %s", cb
, cb
->name
);
3492 mutex_lock(&hci_cb_list_lock
);
3493 list_add_tail(&cb
->list
, &hci_cb_list
);
3494 mutex_unlock(&hci_cb_list_lock
);
3498 EXPORT_SYMBOL(hci_register_cb
);
3500 int hci_unregister_cb(struct hci_cb
*cb
)
3502 BT_DBG("%p name %s", cb
, cb
->name
);
3504 mutex_lock(&hci_cb_list_lock
);
3505 list_del(&cb
->list
);
3506 mutex_unlock(&hci_cb_list_lock
);
3510 EXPORT_SYMBOL(hci_unregister_cb
);
3512 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3516 BT_DBG("%s type %d len %d", hdev
->name
, hci_skb_pkt_type(skb
),
3520 __net_timestamp(skb
);
3522 /* Send copy to monitor */
3523 hci_send_to_monitor(hdev
, skb
);
3525 if (atomic_read(&hdev
->promisc
)) {
3526 /* Send copy to the sockets */
3527 hci_send_to_sock(hdev
, skb
);
3530 /* Get rid of skb owner, prior to sending to the driver. */
3533 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3538 err
= hdev
->send(hdev
, skb
);
3540 bt_dev_err(hdev
, "sending frame failed (%d)", err
);
3545 /* Send HCI command */
3546 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3549 struct sk_buff
*skb
;
3551 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3553 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3555 bt_dev_err(hdev
, "no memory for command");
3559 /* Stand-alone HCI commands must be flagged as
3560 * single-command requests.
3562 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
3564 skb_queue_tail(&hdev
->cmd_q
, skb
);
3565 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3570 int __hci_cmd_send(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3573 struct sk_buff
*skb
;
3575 if (hci_opcode_ogf(opcode
) != 0x3f) {
3576 /* A controller receiving a command shall respond with either
3577 * a Command Status Event or a Command Complete Event.
3578 * Therefore, all standard HCI commands must be sent via the
3579 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3580 * Some vendors do not comply with this rule for vendor-specific
3581 * commands and do not return any event. We want to support
3582 * unresponded commands for such cases only.
3584 bt_dev_err(hdev
, "unresponded command not supported");
3588 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3590 bt_dev_err(hdev
, "no memory for command (opcode 0x%4.4x)",
3595 hci_send_frame(hdev
, skb
);
3599 EXPORT_SYMBOL(__hci_cmd_send
);
3601 /* Get data from the previously sent command */
3602 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3604 struct hci_command_hdr
*hdr
;
3606 if (!hdev
->sent_cmd
)
3609 hdr
= (void *) hdev
->sent_cmd
->data
;
3611 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3614 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3616 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3619 /* Send HCI command and wait for command commplete event */
3620 struct sk_buff
*hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3621 const void *param
, u32 timeout
)
3623 struct sk_buff
*skb
;
3625 if (!test_bit(HCI_UP
, &hdev
->flags
))
3626 return ERR_PTR(-ENETDOWN
);
3628 bt_dev_dbg(hdev
, "opcode 0x%4.4x plen %d", opcode
, plen
);
3630 hci_req_sync_lock(hdev
);
3631 skb
= __hci_cmd_sync(hdev
, opcode
, plen
, param
, timeout
);
3632 hci_req_sync_unlock(hdev
);
3636 EXPORT_SYMBOL(hci_cmd_sync
);
3639 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3641 struct hci_acl_hdr
*hdr
;
3644 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3645 skb_reset_transport_header(skb
);
3646 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3647 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3648 hdr
->dlen
= cpu_to_le16(len
);
3651 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3652 struct sk_buff
*skb
, __u16 flags
)
3654 struct hci_conn
*conn
= chan
->conn
;
3655 struct hci_dev
*hdev
= conn
->hdev
;
3656 struct sk_buff
*list
;
3658 skb
->len
= skb_headlen(skb
);
3661 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3663 switch (hdev
->dev_type
) {
3665 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3668 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3671 bt_dev_err(hdev
, "unknown dev_type %d", hdev
->dev_type
);
3675 list
= skb_shinfo(skb
)->frag_list
;
3677 /* Non fragmented */
3678 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3680 skb_queue_tail(queue
, skb
);
3683 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3685 skb_shinfo(skb
)->frag_list
= NULL
;
3687 /* Queue all fragments atomically. We need to use spin_lock_bh
3688 * here because of 6LoWPAN links, as there this function is
3689 * called from softirq and using normal spin lock could cause
3692 spin_lock_bh(&queue
->lock
);
3694 __skb_queue_tail(queue
, skb
);
3696 flags
&= ~ACL_START
;
3699 skb
= list
; list
= list
->next
;
3701 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3702 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3704 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3706 __skb_queue_tail(queue
, skb
);
3709 spin_unlock_bh(&queue
->lock
);
3713 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3715 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3717 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3719 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3721 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3725 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3727 struct hci_dev
*hdev
= conn
->hdev
;
3728 struct hci_sco_hdr hdr
;
3730 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3732 hdr
.handle
= cpu_to_le16(conn
->handle
);
3733 hdr
.dlen
= skb
->len
;
3735 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3736 skb_reset_transport_header(skb
);
3737 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3739 hci_skb_pkt_type(skb
) = HCI_SCODATA_PKT
;
3741 skb_queue_tail(&conn
->data_q
, skb
);
3742 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3745 /* ---- HCI TX task (outgoing data) ---- */
3747 /* HCI Connection scheduler */
3748 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3751 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3752 struct hci_conn
*conn
= NULL
, *c
;
3753 unsigned int num
= 0, min
= ~0;
3755 /* We don't have to lock device here. Connections are always
3756 * added and removed with TX task disabled. */
3760 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3761 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3764 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3769 if (c
->sent
< min
) {
3774 if (hci_conn_num(hdev
, type
) == num
)
3783 switch (conn
->type
) {
3785 cnt
= hdev
->acl_cnt
;
3789 cnt
= hdev
->sco_cnt
;
3792 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3796 bt_dev_err(hdev
, "unknown link type %d", conn
->type
);
3804 BT_DBG("conn %p quote %d", conn
, *quote
);
3808 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3810 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3813 bt_dev_err(hdev
, "link tx timeout");
3817 /* Kill stalled connections */
3818 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3819 if (c
->type
== type
&& c
->sent
) {
3820 bt_dev_err(hdev
, "killing stalled connection %pMR",
3822 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3829 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3832 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3833 struct hci_chan
*chan
= NULL
;
3834 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3835 struct hci_conn
*conn
;
3836 int cnt
, q
, conn_num
= 0;
3838 BT_DBG("%s", hdev
->name
);
3842 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3843 struct hci_chan
*tmp
;
3845 if (conn
->type
!= type
)
3848 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3853 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3854 struct sk_buff
*skb
;
3856 if (skb_queue_empty(&tmp
->data_q
))
3859 skb
= skb_peek(&tmp
->data_q
);
3860 if (skb
->priority
< cur_prio
)
3863 if (skb
->priority
> cur_prio
) {
3866 cur_prio
= skb
->priority
;
3871 if (conn
->sent
< min
) {
3877 if (hci_conn_num(hdev
, type
) == conn_num
)
3886 switch (chan
->conn
->type
) {
3888 cnt
= hdev
->acl_cnt
;
3891 cnt
= hdev
->block_cnt
;
3895 cnt
= hdev
->sco_cnt
;
3898 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3902 bt_dev_err(hdev
, "unknown link type %d", chan
->conn
->type
);
3907 BT_DBG("chan %p quote %d", chan
, *quote
);
3911 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3913 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3914 struct hci_conn
*conn
;
3917 BT_DBG("%s", hdev
->name
);
3921 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3922 struct hci_chan
*chan
;
3924 if (conn
->type
!= type
)
3927 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3932 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3933 struct sk_buff
*skb
;
3940 if (skb_queue_empty(&chan
->data_q
))
3943 skb
= skb_peek(&chan
->data_q
);
3944 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3947 skb
->priority
= HCI_PRIO_MAX
- 1;
3949 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3953 if (hci_conn_num(hdev
, type
) == num
)
3961 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3963 /* Calculate count of blocks used by this packet */
3964 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3967 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3969 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3970 /* ACL tx timeout must be longer than maximum
3971 * link supervision timeout (40.9 seconds) */
3972 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3973 HCI_ACL_TX_TIMEOUT
))
3974 hci_link_tx_to(hdev
, ACL_LINK
);
3978 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3980 unsigned int cnt
= hdev
->acl_cnt
;
3981 struct hci_chan
*chan
;
3982 struct sk_buff
*skb
;
3985 __check_timeout(hdev
, cnt
);
3987 while (hdev
->acl_cnt
&&
3988 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3989 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3990 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3991 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3992 skb
->len
, skb
->priority
);
3994 /* Stop if priority has changed */
3995 if (skb
->priority
< priority
)
3998 skb
= skb_dequeue(&chan
->data_q
);
4000 hci_conn_enter_active_mode(chan
->conn
,
4001 bt_cb(skb
)->force_active
);
4003 hci_send_frame(hdev
, skb
);
4004 hdev
->acl_last_tx
= jiffies
;
4012 if (cnt
!= hdev
->acl_cnt
)
4013 hci_prio_recalculate(hdev
, ACL_LINK
);
4016 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
4018 unsigned int cnt
= hdev
->block_cnt
;
4019 struct hci_chan
*chan
;
4020 struct sk_buff
*skb
;
4024 __check_timeout(hdev
, cnt
);
4026 BT_DBG("%s", hdev
->name
);
4028 if (hdev
->dev_type
== HCI_AMP
)
4033 while (hdev
->block_cnt
> 0 &&
4034 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
4035 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4036 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
4039 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4040 skb
->len
, skb
->priority
);
4042 /* Stop if priority has changed */
4043 if (skb
->priority
< priority
)
4046 skb
= skb_dequeue(&chan
->data_q
);
4048 blocks
= __get_blocks(hdev
, skb
);
4049 if (blocks
> hdev
->block_cnt
)
4052 hci_conn_enter_active_mode(chan
->conn
,
4053 bt_cb(skb
)->force_active
);
4055 hci_send_frame(hdev
, skb
);
4056 hdev
->acl_last_tx
= jiffies
;
4058 hdev
->block_cnt
-= blocks
;
4061 chan
->sent
+= blocks
;
4062 chan
->conn
->sent
+= blocks
;
4066 if (cnt
!= hdev
->block_cnt
)
4067 hci_prio_recalculate(hdev
, type
);
4070 static void hci_sched_acl(struct hci_dev
*hdev
)
4072 BT_DBG("%s", hdev
->name
);
4074 /* No ACL link over BR/EDR controller */
4075 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_PRIMARY
)
4078 /* No AMP link over AMP controller */
4079 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
4082 switch (hdev
->flow_ctl_mode
) {
4083 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
4084 hci_sched_acl_pkt(hdev
);
4087 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
4088 hci_sched_acl_blk(hdev
);
4094 static void hci_sched_sco(struct hci_dev
*hdev
)
4096 struct hci_conn
*conn
;
4097 struct sk_buff
*skb
;
4100 BT_DBG("%s", hdev
->name
);
4102 if (!hci_conn_num(hdev
, SCO_LINK
))
4105 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
4106 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4107 BT_DBG("skb %p len %d", skb
, skb
->len
);
4108 hci_send_frame(hdev
, skb
);
4111 if (conn
->sent
== ~0)
4117 static void hci_sched_esco(struct hci_dev
*hdev
)
4119 struct hci_conn
*conn
;
4120 struct sk_buff
*skb
;
4123 BT_DBG("%s", hdev
->name
);
4125 if (!hci_conn_num(hdev
, ESCO_LINK
))
4128 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
4130 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4131 BT_DBG("skb %p len %d", skb
, skb
->len
);
4132 hci_send_frame(hdev
, skb
);
4135 if (conn
->sent
== ~0)
4141 static void hci_sched_le(struct hci_dev
*hdev
)
4143 struct hci_chan
*chan
;
4144 struct sk_buff
*skb
;
4145 int quote
, cnt
, tmp
;
4147 BT_DBG("%s", hdev
->name
);
4149 if (!hci_conn_num(hdev
, LE_LINK
))
4152 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
4153 /* LE tx timeout must be longer than maximum
4154 * link supervision timeout (40.9 seconds) */
4155 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
4156 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
4157 hci_link_tx_to(hdev
, LE_LINK
);
4160 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
4162 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
4163 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4164 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4165 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4166 skb
->len
, skb
->priority
);
4168 /* Stop if priority has changed */
4169 if (skb
->priority
< priority
)
4172 skb
= skb_dequeue(&chan
->data_q
);
4174 hci_send_frame(hdev
, skb
);
4175 hdev
->le_last_tx
= jiffies
;
4186 hdev
->acl_cnt
= cnt
;
4189 hci_prio_recalculate(hdev
, LE_LINK
);
4192 static void hci_tx_work(struct work_struct
*work
)
4194 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
4195 struct sk_buff
*skb
;
4197 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
4198 hdev
->sco_cnt
, hdev
->le_cnt
);
4200 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4201 /* Schedule queues and send stuff to HCI driver */
4202 hci_sched_acl(hdev
);
4203 hci_sched_sco(hdev
);
4204 hci_sched_esco(hdev
);
4208 /* Send next queued raw (unknown type) packet */
4209 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
4210 hci_send_frame(hdev
, skb
);
4213 /* ----- HCI RX task (incoming data processing) ----- */
4215 /* ACL data packet */
4216 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4218 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4219 struct hci_conn
*conn
;
4220 __u16 handle
, flags
;
4222 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4224 handle
= __le16_to_cpu(hdr
->handle
);
4225 flags
= hci_flags(handle
);
4226 handle
= hci_handle(handle
);
4228 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4231 hdev
->stat
.acl_rx
++;
4234 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4235 hci_dev_unlock(hdev
);
4238 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4240 /* Send to upper protocol */
4241 l2cap_recv_acldata(conn
, skb
, flags
);
4244 bt_dev_err(hdev
, "ACL packet for unknown connection handle %d",
4251 /* SCO data packet */
4252 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4254 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4255 struct hci_conn
*conn
;
4258 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4260 handle
= __le16_to_cpu(hdr
->handle
);
4262 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4264 hdev
->stat
.sco_rx
++;
4267 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4268 hci_dev_unlock(hdev
);
4271 /* Send to upper protocol */
4272 sco_recv_scodata(conn
, skb
);
4275 bt_dev_err(hdev
, "SCO packet for unknown connection handle %d",
4282 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4284 struct sk_buff
*skb
;
4286 skb
= skb_peek(&hdev
->cmd_q
);
4290 return (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
);
4293 static void hci_resend_last(struct hci_dev
*hdev
)
4295 struct hci_command_hdr
*sent
;
4296 struct sk_buff
*skb
;
4299 if (!hdev
->sent_cmd
)
4302 sent
= (void *) hdev
->sent_cmd
->data
;
4303 opcode
= __le16_to_cpu(sent
->opcode
);
4304 if (opcode
== HCI_OP_RESET
)
4307 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4311 skb_queue_head(&hdev
->cmd_q
, skb
);
4312 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4315 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4316 hci_req_complete_t
*req_complete
,
4317 hci_req_complete_skb_t
*req_complete_skb
)
4319 struct sk_buff
*skb
;
4320 unsigned long flags
;
4322 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4324 /* If the completed command doesn't match the last one that was
4325 * sent we need to do special handling of it.
4327 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4328 /* Some CSR based controllers generate a spontaneous
4329 * reset complete event during init and any pending
4330 * command will never be completed. In such a case we
4331 * need to resend whatever was the last sent
4334 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4335 hci_resend_last(hdev
);
4340 /* If the command succeeded and there's still more commands in
4341 * this request the request is not yet complete.
4343 if (!status
&& !hci_req_is_complete(hdev
))
4346 /* If this was the last command in a request the complete
4347 * callback would be found in hdev->sent_cmd instead of the
4348 * command queue (hdev->cmd_q).
4350 if (bt_cb(hdev
->sent_cmd
)->hci
.req_flags
& HCI_REQ_SKB
) {
4351 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete_skb
;
4355 if (bt_cb(hdev
->sent_cmd
)->hci
.req_complete
) {
4356 *req_complete
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete
;
4360 /* Remove all pending commands belonging to this request */
4361 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4362 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4363 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
) {
4364 __skb_queue_head(&hdev
->cmd_q
, skb
);
4368 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_SKB
)
4369 *req_complete_skb
= bt_cb(skb
)->hci
.req_complete_skb
;
4371 *req_complete
= bt_cb(skb
)->hci
.req_complete
;
4374 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4377 static void hci_rx_work(struct work_struct
*work
)
4379 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4380 struct sk_buff
*skb
;
4382 BT_DBG("%s", hdev
->name
);
4384 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4385 /* Send copy to monitor */
4386 hci_send_to_monitor(hdev
, skb
);
4388 if (atomic_read(&hdev
->promisc
)) {
4389 /* Send copy to the sockets */
4390 hci_send_to_sock(hdev
, skb
);
4393 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4398 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4399 /* Don't process data packets in this states. */
4400 switch (hci_skb_pkt_type(skb
)) {
4401 case HCI_ACLDATA_PKT
:
4402 case HCI_SCODATA_PKT
:
4409 switch (hci_skb_pkt_type(skb
)) {
4411 BT_DBG("%s Event packet", hdev
->name
);
4412 hci_event_packet(hdev
, skb
);
4415 case HCI_ACLDATA_PKT
:
4416 BT_DBG("%s ACL data packet", hdev
->name
);
4417 hci_acldata_packet(hdev
, skb
);
4420 case HCI_SCODATA_PKT
:
4421 BT_DBG("%s SCO data packet", hdev
->name
);
4422 hci_scodata_packet(hdev
, skb
);
4432 static void hci_cmd_work(struct work_struct
*work
)
4434 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4435 struct sk_buff
*skb
;
4437 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4438 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4440 /* Send queued commands */
4441 if (atomic_read(&hdev
->cmd_cnt
)) {
4442 skb
= skb_dequeue(&hdev
->cmd_q
);
4446 kfree_skb(hdev
->sent_cmd
);
4448 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4449 if (hdev
->sent_cmd
) {
4450 atomic_dec(&hdev
->cmd_cnt
);
4451 hci_send_frame(hdev
, skb
);
4452 if (test_bit(HCI_RESET
, &hdev
->flags
))
4453 cancel_delayed_work(&hdev
->cmd_timer
);
4455 schedule_delayed_work(&hdev
->cmd_timer
,
4458 skb_queue_head(&hdev
->cmd_q
, skb
);
4459 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);