2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
45 static void hci_rx_work(struct work_struct
*work
);
46 static void hci_cmd_work(struct work_struct
*work
);
47 static void hci_tx_work(struct work_struct
*work
);
50 LIST_HEAD(hci_dev_list
);
51 DEFINE_RWLOCK(hci_dev_list_lock
);
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list
);
55 DEFINE_MUTEX(hci_cb_list_lock
);
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida
);
60 /* ---- HCI debugfs entries ---- */
62 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
63 size_t count
, loff_t
*ppos
)
65 struct hci_dev
*hdev
= file
->private_data
;
68 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y' : 'N';
71 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
74 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
75 size_t count
, loff_t
*ppos
)
77 struct hci_dev
*hdev
= file
->private_data
;
80 size_t buf_size
= min(count
, (sizeof(buf
)-1));
83 if (!test_bit(HCI_UP
, &hdev
->flags
))
86 if (copy_from_user(buf
, user_buf
, buf_size
))
90 if (strtobool(buf
, &enable
))
93 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
96 hci_req_sync_lock(hdev
);
98 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
101 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
103 hci_req_sync_unlock(hdev
);
110 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
115 static const struct file_operations dut_mode_fops
= {
117 .read
= dut_mode_read
,
118 .write
= dut_mode_write
,
119 .llseek
= default_llseek
,
122 static ssize_t
vendor_diag_read(struct file
*file
, char __user
*user_buf
,
123 size_t count
, loff_t
*ppos
)
125 struct hci_dev
*hdev
= file
->private_data
;
128 buf
[0] = hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
134 static ssize_t
vendor_diag_write(struct file
*file
, const char __user
*user_buf
,
135 size_t count
, loff_t
*ppos
)
137 struct hci_dev
*hdev
= file
->private_data
;
139 size_t buf_size
= min(count
, (sizeof(buf
)-1));
143 if (copy_from_user(buf
, user_buf
, buf_size
))
146 buf
[buf_size
] = '\0';
147 if (strtobool(buf
, &enable
))
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active or in user channel operation, then there is no need
152 * for the vendor callback. Instead just store the desired value and
153 * the setting will be programmed when the controller gets powered on.
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
156 (!test_bit(HCI_RUNNING
, &hdev
->flags
) ||
157 hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)))
160 hci_req_sync_lock(hdev
);
161 err
= hdev
->set_diag(hdev
, enable
);
162 hci_req_sync_unlock(hdev
);
169 hci_dev_set_flag(hdev
, HCI_VENDOR_DIAG
);
171 hci_dev_clear_flag(hdev
, HCI_VENDOR_DIAG
);
176 static const struct file_operations vendor_diag_fops
= {
178 .read
= vendor_diag_read
,
179 .write
= vendor_diag_write
,
180 .llseek
= default_llseek
,
183 static void hci_debugfs_create_basic(struct hci_dev
*hdev
)
185 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
189 debugfs_create_file("vendor_diag", 0644, hdev
->debugfs
, hdev
,
193 static int hci_reset_req(struct hci_request
*req
, unsigned long opt
)
195 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
198 set_bit(HCI_RESET
, &req
->hdev
->flags
);
199 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
203 static void bredr_init(struct hci_request
*req
)
205 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
207 /* Read Local Supported Features */
208 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
210 /* Read Local Version */
211 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
213 /* Read BD Address */
214 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
217 static void amp_init1(struct hci_request
*req
)
219 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
221 /* Read Local Version */
222 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
224 /* Read Local Supported Commands */
225 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
227 /* Read Local AMP Info */
228 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
230 /* Read Data Blk size */
231 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
233 /* Read Flow Control Mode */
234 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
236 /* Read Location Data */
237 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
240 static int amp_init2(struct hci_request
*req
)
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
246 if (req
->hdev
->commands
[14] & 0x20)
247 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
252 static int hci_init1_req(struct hci_request
*req
, unsigned long opt
)
254 struct hci_dev
*hdev
= req
->hdev
;
256 BT_DBG("%s %ld", hdev
->name
, opt
);
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
260 hci_reset_req(req
, 0);
262 switch (hdev
->dev_type
) {
270 bt_dev_err(hdev
, "Unknown device type %d", hdev
->dev_type
);
277 static void bredr_setup(struct hci_request
*req
)
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
285 /* Read Class of Device */
286 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
288 /* Read Local Name */
289 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
291 /* Read Voice Setting */
292 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
294 /* Read Number of Supported IAC */
295 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
297 /* Read Current IAC LAP */
298 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
300 /* Clear Event Filters */
301 flt_type
= HCI_FLT_CLEAR_ALL
;
302 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
304 /* Connection accept timeout ~20 secs */
305 param
= cpu_to_le16(0x7d00);
306 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
309 static void le_setup(struct hci_request
*req
)
311 struct hci_dev
*hdev
= req
->hdev
;
313 /* Read LE Buffer Size */
314 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
316 /* Read LE Local Supported Features */
317 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
319 /* Read LE Supported States */
320 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev
))
324 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
327 static void hci_setup_event_mask(struct hci_request
*req
)
329 struct hci_dev
*hdev
= req
->hdev
;
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
340 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
343 if (lmp_bredr_capable(hdev
)) {
344 events
[4] |= 0x01; /* Flow Specification Complete */
346 /* Use a different default for LE-only devices */
347 memset(events
, 0, sizeof(events
));
348 events
[1] |= 0x20; /* Command Complete */
349 events
[1] |= 0x40; /* Command Status */
350 events
[1] |= 0x80; /* Hardware Error */
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
356 if (hdev
->commands
[0] & 0x20) {
357 events
[0] |= 0x10; /* Disconnection Complete */
358 events
[2] |= 0x04; /* Number of Completed Packets */
359 events
[3] |= 0x02; /* Data Buffer Overflow */
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
365 if (hdev
->commands
[2] & 0x80)
366 events
[1] |= 0x08; /* Read Remote Version Information
370 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
371 events
[0] |= 0x80; /* Encryption Change */
372 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
376 if (lmp_inq_rssi_capable(hdev
) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
))
378 events
[4] |= 0x02; /* Inquiry Result with RSSI */
380 if (lmp_ext_feat_capable(hdev
))
381 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
383 if (lmp_esco_capable(hdev
)) {
384 events
[5] |= 0x08; /* Synchronous Connection Complete */
385 events
[5] |= 0x10; /* Synchronous Connection Changed */
388 if (lmp_sniffsubr_capable(hdev
))
389 events
[5] |= 0x20; /* Sniff Subrating */
391 if (lmp_pause_enc_capable(hdev
))
392 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
394 if (lmp_ext_inq_capable(hdev
))
395 events
[5] |= 0x40; /* Extended Inquiry Result */
397 if (lmp_no_flush_capable(hdev
))
398 events
[7] |= 0x01; /* Enhanced Flush Complete */
400 if (lmp_lsto_capable(hdev
))
401 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
403 if (lmp_ssp_capable(hdev
)) {
404 events
[6] |= 0x01; /* IO Capability Request */
405 events
[6] |= 0x02; /* IO Capability Response */
406 events
[6] |= 0x04; /* User Confirmation Request */
407 events
[6] |= 0x08; /* User Passkey Request */
408 events
[6] |= 0x10; /* Remote OOB Data Request */
409 events
[6] |= 0x20; /* Simple Pairing Complete */
410 events
[7] |= 0x04; /* User Passkey Notification */
411 events
[7] |= 0x08; /* Keypress Notification */
412 events
[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
417 if (lmp_le_capable(hdev
))
418 events
[7] |= 0x20; /* LE Meta-Event */
420 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
423 static int hci_init2_req(struct hci_request
*req
, unsigned long opt
)
425 struct hci_dev
*hdev
= req
->hdev
;
427 if (hdev
->dev_type
== HCI_AMP
)
428 return amp_init2(req
);
430 if (lmp_bredr_capable(hdev
))
433 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
435 if (lmp_le_capable(hdev
))
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
446 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
448 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
450 if (lmp_ssp_capable(hdev
)) {
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
457 hdev
->max_page
= 0x01;
459 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
462 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
463 sizeof(mode
), &mode
);
465 struct hci_cp_write_eir cp
;
467 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
468 memset(&cp
, 0, sizeof(cp
));
470 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
474 if (lmp_inq_rssi_capable(hdev
) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
482 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
484 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
487 if (lmp_inq_tx_pwr_capable(hdev
))
488 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
490 if (lmp_ext_feat_capable(hdev
)) {
491 struct hci_cp_read_local_ext_features cp
;
494 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
498 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
500 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
507 static void hci_setup_link_policy(struct hci_request
*req
)
509 struct hci_dev
*hdev
= req
->hdev
;
510 struct hci_cp_write_def_link_policy cp
;
513 if (lmp_rswitch_capable(hdev
))
514 link_policy
|= HCI_LP_RSWITCH
;
515 if (lmp_hold_capable(hdev
))
516 link_policy
|= HCI_LP_HOLD
;
517 if (lmp_sniff_capable(hdev
))
518 link_policy
|= HCI_LP_SNIFF
;
519 if (lmp_park_capable(hdev
))
520 link_policy
|= HCI_LP_PARK
;
522 cp
.policy
= cpu_to_le16(link_policy
);
523 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
526 static void hci_set_le_support(struct hci_request
*req
)
528 struct hci_dev
*hdev
= req
->hdev
;
529 struct hci_cp_write_le_host_supported cp
;
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev
))
535 memset(&cp
, 0, sizeof(cp
));
537 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
542 if (cp
.le
!= lmp_host_le_capable(hdev
))
543 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
547 static void hci_set_event_mask_page_2(struct hci_request
*req
)
549 struct hci_dev
*hdev
= req
->hdev
;
550 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551 bool changed
= false;
553 /* If Connectionless Slave Broadcast master role is supported
554 * enable all necessary events for it.
556 if (lmp_csb_master_capable(hdev
)) {
557 events
[1] |= 0x40; /* Triggered Clock Capture */
558 events
[1] |= 0x80; /* Synchronization Train Complete */
559 events
[2] |= 0x10; /* Slave Page Response Timeout */
560 events
[2] |= 0x20; /* CSB Channel Map Change */
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
567 if (lmp_csb_slave_capable(hdev
)) {
568 events
[2] |= 0x01; /* Synchronization Train Received */
569 events
[2] |= 0x02; /* CSB Receive */
570 events
[2] |= 0x04; /* CSB Timeout */
571 events
[2] |= 0x08; /* Truncated Page Complete */
575 /* Enable Authenticated Payload Timeout Expired event if supported */
576 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
) {
581 /* Some Broadcom based controllers indicate support for Set Event
582 * Mask Page 2 command, but then actually do not support it. Since
583 * the default value is all bits set to zero, the command is only
584 * required if the event mask has to be changed. In case no change
585 * to the event mask is needed, skip this command.
588 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
,
589 sizeof(events
), events
);
592 static int hci_init3_req(struct hci_request
*req
, unsigned long opt
)
594 struct hci_dev
*hdev
= req
->hdev
;
597 hci_setup_event_mask(req
);
599 if (hdev
->commands
[6] & 0x20 &&
600 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
601 struct hci_cp_read_stored_link_key cp
;
603 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
605 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
608 if (hdev
->commands
[5] & 0x10)
609 hci_setup_link_policy(req
);
611 if (hdev
->commands
[8] & 0x01)
612 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
614 /* Some older Broadcom based Bluetooth 1.2 controllers do not
615 * support the Read Page Scan Type command. Check support for
616 * this command in the bit mask of supported commands.
618 if (hdev
->commands
[13] & 0x01)
619 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
621 if (lmp_le_capable(hdev
)) {
624 memset(events
, 0, sizeof(events
));
626 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
627 events
[0] |= 0x10; /* LE Long Term Key Request */
629 /* If controller supports the Connection Parameters Request
630 * Link Layer Procedure, enable the corresponding event.
632 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
633 events
[0] |= 0x20; /* LE Remote Connection
637 /* If the controller supports the Data Length Extension
638 * feature, enable the corresponding event.
640 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
641 events
[0] |= 0x40; /* LE Data Length Change */
643 /* If the controller supports Extended Scanner Filter
644 * Policies, enable the correspondig event.
646 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
647 events
[1] |= 0x04; /* LE Direct Advertising
651 /* If the controller supports Channel Selection Algorithm #2
652 * feature, enable the corresponding event.
654 if (hdev
->le_features
[1] & HCI_LE_CHAN_SEL_ALG2
)
655 events
[2] |= 0x08; /* LE Channel Selection
659 /* If the controller supports the LE Set Scan Enable command,
660 * enable the corresponding advertising report event.
662 if (hdev
->commands
[26] & 0x08)
663 events
[0] |= 0x02; /* LE Advertising Report */
665 /* If the controller supports the LE Create Connection
666 * command, enable the corresponding event.
668 if (hdev
->commands
[26] & 0x10)
669 events
[0] |= 0x01; /* LE Connection Complete */
671 /* If the controller supports the LE Connection Update
672 * command, enable the corresponding event.
674 if (hdev
->commands
[27] & 0x04)
675 events
[0] |= 0x04; /* LE Connection Update
679 /* If the controller supports the LE Read Remote Used Features
680 * command, enable the corresponding event.
682 if (hdev
->commands
[27] & 0x20)
683 events
[0] |= 0x08; /* LE Read Remote Used
687 /* If the controller supports the LE Read Local P-256
688 * Public Key command, enable the corresponding event.
690 if (hdev
->commands
[34] & 0x02)
691 events
[0] |= 0x80; /* LE Read Local P-256
692 * Public Key Complete
695 /* If the controller supports the LE Generate DHKey
696 * command, enable the corresponding event.
698 if (hdev
->commands
[34] & 0x04)
699 events
[1] |= 0x01; /* LE Generate DHKey Complete */
701 /* If the controller supports the LE Set Default PHY or
702 * LE Set PHY commands, enable the corresponding event.
704 if (hdev
->commands
[35] & (0x20 | 0x40))
705 events
[1] |= 0x08; /* LE PHY Update Complete */
707 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
710 if (hdev
->commands
[25] & 0x40) {
711 /* Read LE Advertising Channel TX Power */
712 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
715 if (hdev
->commands
[26] & 0x40) {
716 /* Read LE White List Size */
717 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
,
721 if (hdev
->commands
[26] & 0x80) {
722 /* Clear LE White List */
723 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
726 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
727 /* Read LE Maximum Data Length */
728 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
730 /* Read LE Suggested Default Data Length */
731 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
734 hci_set_le_support(req
);
737 /* Read features beyond page 1 if available */
738 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
739 struct hci_cp_read_local_ext_features cp
;
742 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
749 static int hci_init4_req(struct hci_request
*req
, unsigned long opt
)
751 struct hci_dev
*hdev
= req
->hdev
;
753 /* Some Broadcom based Bluetooth controllers do not support the
754 * Delete Stored Link Key command. They are clearly indicating its
755 * absence in the bit mask of supported commands.
757 * Check the supported commands and only if the the command is marked
758 * as supported send it. If not supported assume that the controller
759 * does not have actual support for stored link keys which makes this
760 * command redundant anyway.
762 * Some controllers indicate that they support handling deleting
763 * stored link keys, but they don't. The quirk lets a driver
764 * just disable this command.
766 if (hdev
->commands
[6] & 0x80 &&
767 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
768 struct hci_cp_delete_stored_link_key cp
;
770 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
771 cp
.delete_all
= 0x01;
772 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
776 /* Set event mask page 2 if the HCI command for it is supported */
777 if (hdev
->commands
[22] & 0x04)
778 hci_set_event_mask_page_2(req
);
780 /* Read local codec list if the HCI command is supported */
781 if (hdev
->commands
[29] & 0x20)
782 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
784 /* Get MWS transport configuration if the HCI command is supported */
785 if (hdev
->commands
[30] & 0x08)
786 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
788 /* Check for Synchronization Train support */
789 if (lmp_sync_train_capable(hdev
))
790 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
792 /* Enable Secure Connections if supported and configured */
793 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
794 bredr_sc_enabled(hdev
)) {
797 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
798 sizeof(support
), &support
);
801 /* Set Suggested Default Data Length to maximum if supported */
802 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
803 struct hci_cp_le_write_def_data_len cp
;
805 cp
.tx_len
= hdev
->le_max_tx_len
;
806 cp
.tx_time
= hdev
->le_max_tx_time
;
807 hci_req_add(req
, HCI_OP_LE_WRITE_DEF_DATA_LEN
, sizeof(cp
), &cp
);
810 /* Set Default PHY parameters if command is supported */
811 if (hdev
->commands
[35] & 0x20) {
812 struct hci_cp_le_set_default_phy cp
;
814 /* No transmitter PHY or receiver PHY preferences */
819 hci_req_add(req
, HCI_OP_LE_SET_DEFAULT_PHY
, sizeof(cp
), &cp
);
825 static int __hci_init(struct hci_dev
*hdev
)
829 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
833 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
834 hci_debugfs_create_basic(hdev
);
836 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
840 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
841 * BR/EDR/LE type controllers. AMP controllers only need the
842 * first two stages of init.
844 if (hdev
->dev_type
!= HCI_PRIMARY
)
847 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
851 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
855 /* This function is only called when the controller is actually in
856 * configured state. When the controller is marked as unconfigured,
857 * this initialization procedure is not run.
859 * It means that it is possible that a controller runs through its
860 * setup phase and then discovers missing settings. If that is the
861 * case, then this function will not be called. It then will only
862 * be called during the config phase.
864 * So only when in setup phase or config phase, create the debugfs
865 * entries and register the SMP channels.
867 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
868 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
871 hci_debugfs_create_common(hdev
);
873 if (lmp_bredr_capable(hdev
))
874 hci_debugfs_create_bredr(hdev
);
876 if (lmp_le_capable(hdev
))
877 hci_debugfs_create_le(hdev
);
882 static int hci_init0_req(struct hci_request
*req
, unsigned long opt
)
884 struct hci_dev
*hdev
= req
->hdev
;
886 BT_DBG("%s %ld", hdev
->name
, opt
);
889 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
890 hci_reset_req(req
, 0);
892 /* Read Local Version */
893 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
895 /* Read BD Address */
896 if (hdev
->set_bdaddr
)
897 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
902 static int __hci_unconf_init(struct hci_dev
*hdev
)
906 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
909 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
913 if (hci_dev_test_flag(hdev
, HCI_SETUP
))
914 hci_debugfs_create_basic(hdev
);
919 static int hci_scan_req(struct hci_request
*req
, unsigned long opt
)
923 BT_DBG("%s %x", req
->hdev
->name
, scan
);
925 /* Inquiry and Page scans */
926 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
930 static int hci_auth_req(struct hci_request
*req
, unsigned long opt
)
934 BT_DBG("%s %x", req
->hdev
->name
, auth
);
937 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
941 static int hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
945 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
948 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
952 static int hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
954 __le16 policy
= cpu_to_le16(opt
);
956 BT_DBG("%s %x", req
->hdev
->name
, policy
);
958 /* Default link policy */
959 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
963 /* Get HCI device by index.
964 * Device is held on return. */
965 struct hci_dev
*hci_dev_get(int index
)
967 struct hci_dev
*hdev
= NULL
, *d
;
974 read_lock(&hci_dev_list_lock
);
975 list_for_each_entry(d
, &hci_dev_list
, list
) {
976 if (d
->id
== index
) {
977 hdev
= hci_dev_hold(d
);
981 read_unlock(&hci_dev_list_lock
);
985 /* ---- Inquiry support ---- */
987 bool hci_discovery_active(struct hci_dev
*hdev
)
989 struct discovery_state
*discov
= &hdev
->discovery
;
991 switch (discov
->state
) {
992 case DISCOVERY_FINDING
:
993 case DISCOVERY_RESOLVING
:
1001 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1003 int old_state
= hdev
->discovery
.state
;
1005 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1007 if (old_state
== state
)
1010 hdev
->discovery
.state
= state
;
1013 case DISCOVERY_STOPPED
:
1014 hci_update_background_scan(hdev
);
1016 if (old_state
!= DISCOVERY_STARTING
)
1017 mgmt_discovering(hdev
, 0);
1019 case DISCOVERY_STARTING
:
1021 case DISCOVERY_FINDING
:
1022 mgmt_discovering(hdev
, 1);
1024 case DISCOVERY_RESOLVING
:
1026 case DISCOVERY_STOPPING
:
1031 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1033 struct discovery_state
*cache
= &hdev
->discovery
;
1034 struct inquiry_entry
*p
, *n
;
1036 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1041 INIT_LIST_HEAD(&cache
->unknown
);
1042 INIT_LIST_HEAD(&cache
->resolve
);
1045 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1048 struct discovery_state
*cache
= &hdev
->discovery
;
1049 struct inquiry_entry
*e
;
1051 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1053 list_for_each_entry(e
, &cache
->all
, all
) {
1054 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1061 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1064 struct discovery_state
*cache
= &hdev
->discovery
;
1065 struct inquiry_entry
*e
;
1067 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1069 list_for_each_entry(e
, &cache
->unknown
, list
) {
1070 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1077 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1081 struct discovery_state
*cache
= &hdev
->discovery
;
1082 struct inquiry_entry
*e
;
1084 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1086 list_for_each_entry(e
, &cache
->resolve
, list
) {
1087 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1089 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1096 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1097 struct inquiry_entry
*ie
)
1099 struct discovery_state
*cache
= &hdev
->discovery
;
1100 struct list_head
*pos
= &cache
->resolve
;
1101 struct inquiry_entry
*p
;
1103 list_del(&ie
->list
);
1105 list_for_each_entry(p
, &cache
->resolve
, list
) {
1106 if (p
->name_state
!= NAME_PENDING
&&
1107 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1112 list_add(&ie
->list
, pos
);
1115 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1118 struct discovery_state
*cache
= &hdev
->discovery
;
1119 struct inquiry_entry
*ie
;
1122 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1124 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1126 if (!data
->ssp_mode
)
1127 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1129 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1131 if (!ie
->data
.ssp_mode
)
1132 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1134 if (ie
->name_state
== NAME_NEEDED
&&
1135 data
->rssi
!= ie
->data
.rssi
) {
1136 ie
->data
.rssi
= data
->rssi
;
1137 hci_inquiry_cache_update_resolve(hdev
, ie
);
1143 /* Entry not in the cache. Add new one. */
1144 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1146 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1150 list_add(&ie
->all
, &cache
->all
);
1153 ie
->name_state
= NAME_KNOWN
;
1155 ie
->name_state
= NAME_NOT_KNOWN
;
1156 list_add(&ie
->list
, &cache
->unknown
);
1160 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1161 ie
->name_state
!= NAME_PENDING
) {
1162 ie
->name_state
= NAME_KNOWN
;
1163 list_del(&ie
->list
);
1166 memcpy(&ie
->data
, data
, sizeof(*data
));
1167 ie
->timestamp
= jiffies
;
1168 cache
->timestamp
= jiffies
;
1170 if (ie
->name_state
== NAME_NOT_KNOWN
)
1171 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1177 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1179 struct discovery_state
*cache
= &hdev
->discovery
;
1180 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1181 struct inquiry_entry
*e
;
1184 list_for_each_entry(e
, &cache
->all
, all
) {
1185 struct inquiry_data
*data
= &e
->data
;
1190 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1191 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1192 info
->pscan_period_mode
= data
->pscan_period_mode
;
1193 info
->pscan_mode
= data
->pscan_mode
;
1194 memcpy(info
->dev_class
, data
->dev_class
, 3);
1195 info
->clock_offset
= data
->clock_offset
;
1201 BT_DBG("cache %p, copied %d", cache
, copied
);
1205 static int hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1207 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1208 struct hci_dev
*hdev
= req
->hdev
;
1209 struct hci_cp_inquiry cp
;
1211 BT_DBG("%s", hdev
->name
);
1213 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1217 memcpy(&cp
.lap
, &ir
->lap
, 3);
1218 cp
.length
= ir
->length
;
1219 cp
.num_rsp
= ir
->num_rsp
;
1220 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1225 int hci_inquiry(void __user
*arg
)
1227 __u8 __user
*ptr
= arg
;
1228 struct hci_inquiry_req ir
;
1229 struct hci_dev
*hdev
;
1230 int err
= 0, do_inquiry
= 0, max_rsp
;
1234 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1237 hdev
= hci_dev_get(ir
.dev_id
);
1241 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1246 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1251 if (hdev
->dev_type
!= HCI_PRIMARY
) {
1256 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1262 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1263 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1264 hci_inquiry_cache_flush(hdev
);
1267 hci_dev_unlock(hdev
);
1269 timeo
= ir
.length
* msecs_to_jiffies(2000);
1272 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1277 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1278 * cleared). If it is interrupted by a signal, return -EINTR.
1280 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1281 TASK_INTERRUPTIBLE
))
1285 /* for unlimited number of responses we will use buffer with
1288 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1290 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1291 * copy it to the user space.
1293 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1300 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1301 hci_dev_unlock(hdev
);
1303 BT_DBG("num_rsp %d", ir
.num_rsp
);
1305 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1307 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1320 static int hci_dev_do_open(struct hci_dev
*hdev
)
1324 BT_DBG("%s %p", hdev
->name
, hdev
);
1326 hci_req_sync_lock(hdev
);
1328 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1333 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1334 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1335 /* Check for rfkill but allow the HCI setup stage to
1336 * proceed (which in itself doesn't cause any RF activity).
1338 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1343 /* Check for valid public address or a configured static
1344 * random adddress, but let the HCI setup proceed to
1345 * be able to determine if there is a public address
1348 * In case of user channel usage, it is not important
1349 * if a public address or static random address is
1352 * This check is only valid for BR/EDR controllers
1353 * since AMP controllers do not have an address.
1355 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1356 hdev
->dev_type
== HCI_PRIMARY
&&
1357 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1358 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1359 ret
= -EADDRNOTAVAIL
;
1364 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1369 if (hdev
->open(hdev
)) {
1374 set_bit(HCI_RUNNING
, &hdev
->flags
);
1375 hci_sock_dev_event(hdev
, HCI_DEV_OPEN
);
1377 atomic_set(&hdev
->cmd_cnt
, 1);
1378 set_bit(HCI_INIT
, &hdev
->flags
);
1380 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
1381 hci_sock_dev_event(hdev
, HCI_DEV_SETUP
);
1384 ret
= hdev
->setup(hdev
);
1386 /* The transport driver can set these quirks before
1387 * creating the HCI device or in its setup callback.
1389 * In case any of them is set, the controller has to
1390 * start up as unconfigured.
1392 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1393 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1394 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1396 /* For an unconfigured controller it is required to
1397 * read at least the version information provided by
1398 * the Read Local Version Information command.
1400 * If the set_bdaddr driver callback is provided, then
1401 * also the original Bluetooth public device address
1402 * will be read using the Read BD Address command.
1404 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1405 ret
= __hci_unconf_init(hdev
);
1408 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1409 /* If public address change is configured, ensure that
1410 * the address gets programmed. If the driver does not
1411 * support changing the public address, fail the power
1414 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1416 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1418 ret
= -EADDRNOTAVAIL
;
1422 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1423 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1424 ret
= __hci_init(hdev
);
1425 if (!ret
&& hdev
->post_init
)
1426 ret
= hdev
->post_init(hdev
);
1430 /* If the HCI Reset command is clearing all diagnostic settings,
1431 * then they need to be reprogrammed after the init procedure
1434 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
) &&
1435 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1436 hci_dev_test_flag(hdev
, HCI_VENDOR_DIAG
) && hdev
->set_diag
)
1437 ret
= hdev
->set_diag(hdev
, true);
1439 clear_bit(HCI_INIT
, &hdev
->flags
);
1443 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1444 set_bit(HCI_UP
, &hdev
->flags
);
1445 hci_sock_dev_event(hdev
, HCI_DEV_UP
);
1446 hci_leds_update_powered(hdev
, true);
1447 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1448 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1449 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1450 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1451 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
1452 hdev
->dev_type
== HCI_PRIMARY
) {
1453 ret
= __hci_req_hci_power_on(hdev
);
1454 mgmt_power_on(hdev
, ret
);
1457 /* Init failed, cleanup */
1458 flush_work(&hdev
->tx_work
);
1459 flush_work(&hdev
->cmd_work
);
1460 flush_work(&hdev
->rx_work
);
1462 skb_queue_purge(&hdev
->cmd_q
);
1463 skb_queue_purge(&hdev
->rx_q
);
1468 if (hdev
->sent_cmd
) {
1469 kfree_skb(hdev
->sent_cmd
);
1470 hdev
->sent_cmd
= NULL
;
1473 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1474 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1477 hdev
->flags
&= BIT(HCI_RAW
);
1481 hci_req_sync_unlock(hdev
);
1485 /* ---- HCI ioctl helpers ---- */
1487 int hci_dev_open(__u16 dev
)
1489 struct hci_dev
*hdev
;
1492 hdev
= hci_dev_get(dev
);
1496 /* Devices that are marked as unconfigured can only be powered
1497 * up as user channel. Trying to bring them up as normal devices
1498 * will result into a failure. Only user channel operation is
1501 * When this function is called for a user channel, the flag
1502 * HCI_USER_CHANNEL will be set first before attempting to
1505 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1506 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1511 /* We need to ensure that no other power on/off work is pending
1512 * before proceeding to call hci_dev_do_open. This is
1513 * particularly important if the setup procedure has not yet
1516 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1517 cancel_delayed_work(&hdev
->power_off
);
1519 /* After this call it is guaranteed that the setup procedure
1520 * has finished. This means that error conditions like RFKILL
1521 * or no valid public or static random address apply.
1523 flush_workqueue(hdev
->req_workqueue
);
1525 /* For controllers not using the management interface and that
1526 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1527 * so that pairing works for them. Once the management interface
1528 * is in use this bit will be cleared again and userspace has
1529 * to explicitly enable it.
1531 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1532 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1533 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1535 err
= hci_dev_do_open(hdev
);
1542 /* This function requires the caller holds hdev->lock */
1543 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1545 struct hci_conn_params
*p
;
1547 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1549 hci_conn_drop(p
->conn
);
1550 hci_conn_put(p
->conn
);
1553 list_del_init(&p
->action
);
1556 BT_DBG("All LE pending actions cleared");
1559 int hci_dev_do_close(struct hci_dev
*hdev
)
1563 BT_DBG("%s %p", hdev
->name
, hdev
);
1565 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1566 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1567 test_bit(HCI_UP
, &hdev
->flags
)) {
1568 /* Execute vendor specific shutdown routine */
1570 hdev
->shutdown(hdev
);
1573 cancel_delayed_work(&hdev
->power_off
);
1575 hci_request_cancel_all(hdev
);
1576 hci_req_sync_lock(hdev
);
1578 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1579 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1580 hci_req_sync_unlock(hdev
);
1584 hci_leds_update_powered(hdev
, false);
1586 /* Flush RX and TX works */
1587 flush_work(&hdev
->tx_work
);
1588 flush_work(&hdev
->rx_work
);
1590 if (hdev
->discov_timeout
> 0) {
1591 hdev
->discov_timeout
= 0;
1592 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1593 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1596 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1597 cancel_delayed_work(&hdev
->service_cache
);
1599 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1600 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1602 /* Avoid potential lockdep warnings from the *_flush() calls by
1603 * ensuring the workqueue is empty up front.
1605 drain_workqueue(hdev
->workqueue
);
1609 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1611 auto_off
= hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
);
1613 if (!auto_off
&& hdev
->dev_type
== HCI_PRIMARY
&&
1614 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1615 hci_dev_test_flag(hdev
, HCI_MGMT
))
1616 __mgmt_power_off(hdev
);
1618 hci_inquiry_cache_flush(hdev
);
1619 hci_pend_le_actions_clear(hdev
);
1620 hci_conn_hash_flush(hdev
);
1621 hci_dev_unlock(hdev
);
1623 smp_unregister(hdev
);
1625 hci_sock_dev_event(hdev
, HCI_DEV_DOWN
);
1631 skb_queue_purge(&hdev
->cmd_q
);
1632 atomic_set(&hdev
->cmd_cnt
, 1);
1633 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
) &&
1634 !auto_off
&& !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1635 set_bit(HCI_INIT
, &hdev
->flags
);
1636 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
, NULL
);
1637 clear_bit(HCI_INIT
, &hdev
->flags
);
1640 /* flush cmd work */
1641 flush_work(&hdev
->cmd_work
);
1644 skb_queue_purge(&hdev
->rx_q
);
1645 skb_queue_purge(&hdev
->cmd_q
);
1646 skb_queue_purge(&hdev
->raw_q
);
1648 /* Drop last sent command */
1649 if (hdev
->sent_cmd
) {
1650 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1651 kfree_skb(hdev
->sent_cmd
);
1652 hdev
->sent_cmd
= NULL
;
1655 clear_bit(HCI_RUNNING
, &hdev
->flags
);
1656 hci_sock_dev_event(hdev
, HCI_DEV_CLOSE
);
1658 /* After this point our queues are empty
1659 * and no tasks are scheduled. */
1663 hdev
->flags
&= BIT(HCI_RAW
);
1664 hci_dev_clear_volatile_flags(hdev
);
1666 /* Controller radio is available but is currently powered down */
1667 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1669 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1670 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1671 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1673 hci_req_sync_unlock(hdev
);
1679 int hci_dev_close(__u16 dev
)
1681 struct hci_dev
*hdev
;
1684 hdev
= hci_dev_get(dev
);
1688 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1693 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1694 cancel_delayed_work(&hdev
->power_off
);
1696 err
= hci_dev_do_close(hdev
);
1703 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1707 BT_DBG("%s %p", hdev
->name
, hdev
);
1709 hci_req_sync_lock(hdev
);
1712 skb_queue_purge(&hdev
->rx_q
);
1713 skb_queue_purge(&hdev
->cmd_q
);
1715 /* Avoid potential lockdep warnings from the *_flush() calls by
1716 * ensuring the workqueue is empty up front.
1718 drain_workqueue(hdev
->workqueue
);
1721 hci_inquiry_cache_flush(hdev
);
1722 hci_conn_hash_flush(hdev
);
1723 hci_dev_unlock(hdev
);
1728 atomic_set(&hdev
->cmd_cnt
, 1);
1729 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1731 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
, NULL
);
1733 hci_req_sync_unlock(hdev
);
1737 int hci_dev_reset(__u16 dev
)
1739 struct hci_dev
*hdev
;
1742 hdev
= hci_dev_get(dev
);
1746 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1751 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1756 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1761 err
= hci_dev_do_reset(hdev
);
1768 int hci_dev_reset_stat(__u16 dev
)
1770 struct hci_dev
*hdev
;
1773 hdev
= hci_dev_get(dev
);
1777 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1782 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1787 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1794 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1796 bool conn_changed
, discov_changed
;
1798 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1800 if ((scan
& SCAN_PAGE
))
1801 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1804 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1807 if ((scan
& SCAN_INQUIRY
)) {
1808 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1811 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1812 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1816 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1819 if (conn_changed
|| discov_changed
) {
1820 /* In case this was disabled through mgmt */
1821 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1823 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1824 hci_req_update_adv_data(hdev
, hdev
->cur_adv_instance
);
1826 mgmt_new_settings(hdev
);
1830 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1832 struct hci_dev
*hdev
;
1833 struct hci_dev_req dr
;
1836 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1839 hdev
= hci_dev_get(dr
.dev_id
);
1843 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1848 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1853 if (hdev
->dev_type
!= HCI_PRIMARY
) {
1858 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1865 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1866 HCI_INIT_TIMEOUT
, NULL
);
1870 if (!lmp_encrypt_capable(hdev
)) {
1875 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1876 /* Auth must be enabled first */
1877 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1878 HCI_INIT_TIMEOUT
, NULL
);
1883 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1884 HCI_INIT_TIMEOUT
, NULL
);
1888 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1889 HCI_INIT_TIMEOUT
, NULL
);
1891 /* Ensure that the connectable and discoverable states
1892 * get correctly modified as this was a non-mgmt change.
1895 hci_update_scan_state(hdev
, dr
.dev_opt
);
1899 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1900 HCI_INIT_TIMEOUT
, NULL
);
1903 case HCISETLINKMODE
:
1904 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1905 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1909 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1913 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1914 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1918 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1919 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1932 int hci_get_dev_list(void __user
*arg
)
1934 struct hci_dev
*hdev
;
1935 struct hci_dev_list_req
*dl
;
1936 struct hci_dev_req
*dr
;
1937 int n
= 0, size
, err
;
1940 if (get_user(dev_num
, (__u16 __user
*) arg
))
1943 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1946 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1948 dl
= kzalloc(size
, GFP_KERNEL
);
1954 read_lock(&hci_dev_list_lock
);
1955 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1956 unsigned long flags
= hdev
->flags
;
1958 /* When the auto-off is configured it means the transport
1959 * is running, but in that case still indicate that the
1960 * device is actually down.
1962 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1963 flags
&= ~BIT(HCI_UP
);
1965 (dr
+ n
)->dev_id
= hdev
->id
;
1966 (dr
+ n
)->dev_opt
= flags
;
1971 read_unlock(&hci_dev_list_lock
);
1974 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1976 err
= copy_to_user(arg
, dl
, size
);
1979 return err
? -EFAULT
: 0;
1982 int hci_get_dev_info(void __user
*arg
)
1984 struct hci_dev
*hdev
;
1985 struct hci_dev_info di
;
1986 unsigned long flags
;
1989 if (copy_from_user(&di
, arg
, sizeof(di
)))
1992 hdev
= hci_dev_get(di
.dev_id
);
1996 /* When the auto-off is configured it means the transport
1997 * is running, but in that case still indicate that the
1998 * device is actually down.
2000 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
2001 flags
= hdev
->flags
& ~BIT(HCI_UP
);
2003 flags
= hdev
->flags
;
2005 strcpy(di
.name
, hdev
->name
);
2006 di
.bdaddr
= hdev
->bdaddr
;
2007 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2009 di
.pkt_type
= hdev
->pkt_type
;
2010 if (lmp_bredr_capable(hdev
)) {
2011 di
.acl_mtu
= hdev
->acl_mtu
;
2012 di
.acl_pkts
= hdev
->acl_pkts
;
2013 di
.sco_mtu
= hdev
->sco_mtu
;
2014 di
.sco_pkts
= hdev
->sco_pkts
;
2016 di
.acl_mtu
= hdev
->le_mtu
;
2017 di
.acl_pkts
= hdev
->le_pkts
;
2021 di
.link_policy
= hdev
->link_policy
;
2022 di
.link_mode
= hdev
->link_mode
;
2024 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2025 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2027 if (copy_to_user(arg
, &di
, sizeof(di
)))
2035 /* ---- Interface to HCI drivers ---- */
2037 static int hci_rfkill_set_block(void *data
, bool blocked
)
2039 struct hci_dev
*hdev
= data
;
2041 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2043 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
2047 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
2048 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
2049 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
2050 hci_dev_do_close(hdev
);
2052 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2058 static const struct rfkill_ops hci_rfkill_ops
= {
2059 .set_block
= hci_rfkill_set_block
,
2062 static void hci_power_on(struct work_struct
*work
)
2064 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2067 BT_DBG("%s", hdev
->name
);
2069 if (test_bit(HCI_UP
, &hdev
->flags
) &&
2070 hci_dev_test_flag(hdev
, HCI_MGMT
) &&
2071 hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
2072 cancel_delayed_work(&hdev
->power_off
);
2073 hci_req_sync_lock(hdev
);
2074 err
= __hci_req_hci_power_on(hdev
);
2075 hci_req_sync_unlock(hdev
);
2076 mgmt_power_on(hdev
, err
);
2080 err
= hci_dev_do_open(hdev
);
2083 mgmt_set_powered_failed(hdev
, err
);
2084 hci_dev_unlock(hdev
);
2088 /* During the HCI setup phase, a few error conditions are
2089 * ignored and they need to be checked now. If they are still
2090 * valid, it is important to turn the device back off.
2092 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2093 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2094 (hdev
->dev_type
== HCI_PRIMARY
&&
2095 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2096 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2097 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2098 hci_dev_do_close(hdev
);
2099 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2100 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2101 HCI_AUTO_OFF_TIMEOUT
);
2104 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2105 /* For unconfigured devices, set the HCI_RAW flag
2106 * so that userspace can easily identify them.
2108 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2109 set_bit(HCI_RAW
, &hdev
->flags
);
2111 /* For fully configured devices, this will send
2112 * the Index Added event. For unconfigured devices,
2113 * it will send Unconfigued Index Added event.
2115 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2116 * and no event will be send.
2118 mgmt_index_added(hdev
);
2119 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2120 /* When the controller is now configured, then it
2121 * is important to clear the HCI_RAW flag.
2123 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2124 clear_bit(HCI_RAW
, &hdev
->flags
);
2126 /* Powering on the controller with HCI_CONFIG set only
2127 * happens with the transition from unconfigured to
2128 * configured. This will send the Index Added event.
2130 mgmt_index_added(hdev
);
2134 static void hci_power_off(struct work_struct
*work
)
2136 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2139 BT_DBG("%s", hdev
->name
);
2141 hci_dev_do_close(hdev
);
2144 static void hci_error_reset(struct work_struct
*work
)
2146 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2148 BT_DBG("%s", hdev
->name
);
2151 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2153 bt_dev_err(hdev
, "hardware error 0x%2.2x", hdev
->hw_error_code
);
2155 if (hci_dev_do_close(hdev
))
2158 hci_dev_do_open(hdev
);
2161 void hci_uuids_clear(struct hci_dev
*hdev
)
2163 struct bt_uuid
*uuid
, *tmp
;
2165 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2166 list_del(&uuid
->list
);
2171 void hci_link_keys_clear(struct hci_dev
*hdev
)
2173 struct link_key
*key
;
2175 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2176 list_del_rcu(&key
->list
);
2177 kfree_rcu(key
, rcu
);
2181 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2185 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2186 list_del_rcu(&k
->list
);
2191 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2195 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2196 list_del_rcu(&k
->list
);
2201 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2206 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2207 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2217 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2218 u8 key_type
, u8 old_key_type
)
2221 if (key_type
< 0x03)
2224 /* Debug keys are insecure so don't store them persistently */
2225 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2228 /* Changed combination key and there's no previous one */
2229 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2232 /* Security mode 3 case */
2236 /* BR/EDR key derived using SC from an LE link */
2237 if (conn
->type
== LE_LINK
)
2240 /* Neither local nor remote side had no-bonding as requirement */
2241 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2244 /* Local side had dedicated bonding as requirement */
2245 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2248 /* Remote side had dedicated bonding as requirement */
2249 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2252 /* If none of the above criteria match, then don't store the key
2257 static u8
ltk_role(u8 type
)
2259 if (type
== SMP_LTK
)
2260 return HCI_ROLE_MASTER
;
2262 return HCI_ROLE_SLAVE
;
2265 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2266 u8 addr_type
, u8 role
)
2271 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2272 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2275 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2285 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2287 struct smp_irk
*irk
;
2290 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2291 if (!bacmp(&irk
->rpa
, rpa
)) {
2297 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2298 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2299 bacpy(&irk
->rpa
, rpa
);
2309 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2312 struct smp_irk
*irk
;
2314 /* Identity Address must be public or static random */
2315 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2319 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2320 if (addr_type
== irk
->addr_type
&&
2321 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2331 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2332 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2333 u8 pin_len
, bool *persistent
)
2335 struct link_key
*key
, *old_key
;
2338 old_key
= hci_find_link_key(hdev
, bdaddr
);
2340 old_key_type
= old_key
->type
;
2343 old_key_type
= conn
? conn
->key_type
: 0xff;
2344 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2347 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2350 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2352 /* Some buggy controller combinations generate a changed
2353 * combination key for legacy pairing even when there's no
2355 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2356 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2357 type
= HCI_LK_COMBINATION
;
2359 conn
->key_type
= type
;
2362 bacpy(&key
->bdaddr
, bdaddr
);
2363 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2364 key
->pin_len
= pin_len
;
2366 if (type
== HCI_LK_CHANGED_COMBINATION
)
2367 key
->type
= old_key_type
;
2372 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2378 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2379 u8 addr_type
, u8 type
, u8 authenticated
,
2380 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2382 struct smp_ltk
*key
, *old_key
;
2383 u8 role
= ltk_role(type
);
2385 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2389 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2392 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2395 bacpy(&key
->bdaddr
, bdaddr
);
2396 key
->bdaddr_type
= addr_type
;
2397 memcpy(key
->val
, tk
, sizeof(key
->val
));
2398 key
->authenticated
= authenticated
;
2401 key
->enc_size
= enc_size
;
2407 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2408 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2410 struct smp_irk
*irk
;
2412 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2414 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2418 bacpy(&irk
->bdaddr
, bdaddr
);
2419 irk
->addr_type
= addr_type
;
2421 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2424 memcpy(irk
->val
, val
, 16);
2425 bacpy(&irk
->rpa
, rpa
);
2430 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2432 struct link_key
*key
;
2434 key
= hci_find_link_key(hdev
, bdaddr
);
2438 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2440 list_del_rcu(&key
->list
);
2441 kfree_rcu(key
, rcu
);
2446 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2451 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2452 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2455 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2457 list_del_rcu(&k
->list
);
2462 return removed
? 0 : -ENOENT
;
2465 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2469 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2470 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2473 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2475 list_del_rcu(&k
->list
);
2480 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2483 struct smp_irk
*irk
;
2486 if (type
== BDADDR_BREDR
) {
2487 if (hci_find_link_key(hdev
, bdaddr
))
2492 /* Convert to HCI addr type which struct smp_ltk uses */
2493 if (type
== BDADDR_LE_PUBLIC
)
2494 addr_type
= ADDR_LE_DEV_PUBLIC
;
2496 addr_type
= ADDR_LE_DEV_RANDOM
;
2498 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2500 bdaddr
= &irk
->bdaddr
;
2501 addr_type
= irk
->addr_type
;
2505 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2506 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2516 /* HCI command timer function */
2517 static void hci_cmd_timeout(struct work_struct
*work
)
2519 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2522 if (hdev
->sent_cmd
) {
2523 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2524 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2526 bt_dev_err(hdev
, "command 0x%4.4x tx timeout", opcode
);
2528 bt_dev_err(hdev
, "command tx timeout");
2531 atomic_set(&hdev
->cmd_cnt
, 1);
2532 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2535 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2536 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2538 struct oob_data
*data
;
2540 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2541 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2543 if (data
->bdaddr_type
!= bdaddr_type
)
2551 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2554 struct oob_data
*data
;
2556 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2560 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2562 list_del(&data
->list
);
2568 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2570 struct oob_data
*data
, *n
;
2572 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2573 list_del(&data
->list
);
2578 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2579 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2580 u8
*hash256
, u8
*rand256
)
2582 struct oob_data
*data
;
2584 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2586 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2590 bacpy(&data
->bdaddr
, bdaddr
);
2591 data
->bdaddr_type
= bdaddr_type
;
2592 list_add(&data
->list
, &hdev
->remote_oob_data
);
2595 if (hash192
&& rand192
) {
2596 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2597 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2598 if (hash256
&& rand256
)
2599 data
->present
= 0x03;
2601 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2602 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2603 if (hash256
&& rand256
)
2604 data
->present
= 0x02;
2606 data
->present
= 0x00;
2609 if (hash256
&& rand256
) {
2610 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2611 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2613 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2614 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2615 if (hash192
&& rand192
)
2616 data
->present
= 0x01;
2619 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2624 /* This function requires the caller holds hdev->lock */
2625 struct adv_info
*hci_find_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2627 struct adv_info
*adv_instance
;
2629 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
2630 if (adv_instance
->instance
== instance
)
2631 return adv_instance
;
2637 /* This function requires the caller holds hdev->lock */
2638 struct adv_info
*hci_get_next_instance(struct hci_dev
*hdev
, u8 instance
)
2640 struct adv_info
*cur_instance
;
2642 cur_instance
= hci_find_adv_instance(hdev
, instance
);
2646 if (cur_instance
== list_last_entry(&hdev
->adv_instances
,
2647 struct adv_info
, list
))
2648 return list_first_entry(&hdev
->adv_instances
,
2649 struct adv_info
, list
);
2651 return list_next_entry(cur_instance
, list
);
2654 /* This function requires the caller holds hdev->lock */
2655 int hci_remove_adv_instance(struct hci_dev
*hdev
, u8 instance
)
2657 struct adv_info
*adv_instance
;
2659 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2663 BT_DBG("%s removing %dMR", hdev
->name
, instance
);
2665 if (hdev
->cur_adv_instance
== instance
) {
2666 if (hdev
->adv_instance_timeout
) {
2667 cancel_delayed_work(&hdev
->adv_instance_expire
);
2668 hdev
->adv_instance_timeout
= 0;
2670 hdev
->cur_adv_instance
= 0x00;
2673 list_del(&adv_instance
->list
);
2674 kfree(adv_instance
);
2676 hdev
->adv_instance_cnt
--;
2681 /* This function requires the caller holds hdev->lock */
2682 void hci_adv_instances_clear(struct hci_dev
*hdev
)
2684 struct adv_info
*adv_instance
, *n
;
2686 if (hdev
->adv_instance_timeout
) {
2687 cancel_delayed_work(&hdev
->adv_instance_expire
);
2688 hdev
->adv_instance_timeout
= 0;
2691 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
2692 list_del(&adv_instance
->list
);
2693 kfree(adv_instance
);
2696 hdev
->adv_instance_cnt
= 0;
2697 hdev
->cur_adv_instance
= 0x00;
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev
*hdev
, u8 instance
, u32 flags
,
2702 u16 adv_data_len
, u8
*adv_data
,
2703 u16 scan_rsp_len
, u8
*scan_rsp_data
,
2704 u16 timeout
, u16 duration
)
2706 struct adv_info
*adv_instance
;
2708 adv_instance
= hci_find_adv_instance(hdev
, instance
);
2710 memset(adv_instance
->adv_data
, 0,
2711 sizeof(adv_instance
->adv_data
));
2712 memset(adv_instance
->scan_rsp_data
, 0,
2713 sizeof(adv_instance
->scan_rsp_data
));
2715 if (hdev
->adv_instance_cnt
>= HCI_MAX_ADV_INSTANCES
||
2716 instance
< 1 || instance
> HCI_MAX_ADV_INSTANCES
)
2719 adv_instance
= kzalloc(sizeof(*adv_instance
), GFP_KERNEL
);
2723 adv_instance
->pending
= true;
2724 adv_instance
->instance
= instance
;
2725 list_add(&adv_instance
->list
, &hdev
->adv_instances
);
2726 hdev
->adv_instance_cnt
++;
2729 adv_instance
->flags
= flags
;
2730 adv_instance
->adv_data_len
= adv_data_len
;
2731 adv_instance
->scan_rsp_len
= scan_rsp_len
;
2734 memcpy(adv_instance
->adv_data
, adv_data
, adv_data_len
);
2737 memcpy(adv_instance
->scan_rsp_data
,
2738 scan_rsp_data
, scan_rsp_len
);
2740 adv_instance
->timeout
= timeout
;
2741 adv_instance
->remaining_time
= timeout
;
2744 adv_instance
->duration
= HCI_DEFAULT_ADV_DURATION
;
2746 adv_instance
->duration
= duration
;
2748 BT_DBG("%s for %dMR", hdev
->name
, instance
);
2753 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2754 bdaddr_t
*bdaddr
, u8 type
)
2756 struct bdaddr_list
*b
;
2758 list_for_each_entry(b
, bdaddr_list
, list
) {
2759 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2766 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2768 struct bdaddr_list
*b
, *n
;
2770 list_for_each_entry_safe(b
, n
, bdaddr_list
, list
) {
2776 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2778 struct bdaddr_list
*entry
;
2780 if (!bacmp(bdaddr
, BDADDR_ANY
))
2783 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2786 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2790 bacpy(&entry
->bdaddr
, bdaddr
);
2791 entry
->bdaddr_type
= type
;
2793 list_add(&entry
->list
, list
);
2798 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2800 struct bdaddr_list
*entry
;
2802 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2803 hci_bdaddr_list_clear(list
);
2807 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2811 list_del(&entry
->list
);
2817 /* This function requires the caller holds hdev->lock */
2818 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2819 bdaddr_t
*addr
, u8 addr_type
)
2821 struct hci_conn_params
*params
;
2823 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2824 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2825 params
->addr_type
== addr_type
) {
2833 /* This function requires the caller holds hdev->lock */
2834 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2835 bdaddr_t
*addr
, u8 addr_type
)
2837 struct hci_conn_params
*param
;
2839 list_for_each_entry(param
, list
, action
) {
2840 if (bacmp(¶m
->addr
, addr
) == 0 &&
2841 param
->addr_type
== addr_type
)
2848 /* This function requires the caller holds hdev->lock */
2849 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2850 bdaddr_t
*addr
, u8 addr_type
)
2852 struct hci_conn_params
*params
;
2854 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2858 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2860 bt_dev_err(hdev
, "out of memory");
2864 bacpy(¶ms
->addr
, addr
);
2865 params
->addr_type
= addr_type
;
2867 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2868 INIT_LIST_HEAD(¶ms
->action
);
2870 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2871 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2872 params
->conn_latency
= hdev
->le_conn_latency
;
2873 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2874 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2876 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2881 static void hci_conn_params_free(struct hci_conn_params
*params
)
2884 hci_conn_drop(params
->conn
);
2885 hci_conn_put(params
->conn
);
2888 list_del(¶ms
->action
);
2889 list_del(¶ms
->list
);
2893 /* This function requires the caller holds hdev->lock */
2894 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
2896 struct hci_conn_params
*params
;
2898 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2902 hci_conn_params_free(params
);
2904 hci_update_background_scan(hdev
);
2906 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2909 /* This function requires the caller holds hdev->lock */
2910 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
2912 struct hci_conn_params
*params
, *tmp
;
2914 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
2915 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
2918 /* If trying to estabilish one time connection to disabled
2919 * device, leave the params, but mark them as just once.
2921 if (params
->explicit_connect
) {
2922 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2926 list_del(¶ms
->list
);
2930 BT_DBG("All LE disabled connection parameters were removed");
2933 /* This function requires the caller holds hdev->lock */
2934 static void hci_conn_params_clear_all(struct hci_dev
*hdev
)
2936 struct hci_conn_params
*params
, *tmp
;
2938 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
2939 hci_conn_params_free(params
);
2941 BT_DBG("All LE connection parameters were removed");
2944 /* Copy the Identity Address of the controller.
2946 * If the controller has a public BD_ADDR, then by default use that one.
2947 * If this is a LE only controller without a public address, default to
2948 * the static random address.
2950 * For debugging purposes it is possible to force controllers with a
2951 * public address to use the static random address instead.
2953 * In case BR/EDR has been disabled on a dual-mode controller and
2954 * userspace has configured a static address, then that address
2955 * becomes the identity address instead of the public BR/EDR address.
2957 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2960 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
2961 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
2962 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
2963 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2964 bacpy(bdaddr
, &hdev
->static_addr
);
2965 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
2967 bacpy(bdaddr
, &hdev
->bdaddr
);
2968 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
2972 /* Alloc HCI device */
2973 struct hci_dev
*hci_alloc_dev(void)
2975 struct hci_dev
*hdev
;
2977 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
2981 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2982 hdev
->esco_type
= (ESCO_HV1
);
2983 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2984 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
2985 hdev
->io_capability
= 0x03; /* No Input No Output */
2986 hdev
->manufacturer
= 0xffff; /* Default to internal use */
2987 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2988 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2989 hdev
->adv_instance_cnt
= 0;
2990 hdev
->cur_adv_instance
= 0x00;
2991 hdev
->adv_instance_timeout
= 0;
2993 hdev
->sniff_max_interval
= 800;
2994 hdev
->sniff_min_interval
= 80;
2996 hdev
->le_adv_channel_map
= 0x07;
2997 hdev
->le_adv_min_interval
= 0x0800;
2998 hdev
->le_adv_max_interval
= 0x0800;
2999 hdev
->le_scan_interval
= 0x0060;
3000 hdev
->le_scan_window
= 0x0030;
3001 hdev
->le_conn_min_interval
= 0x0018;
3002 hdev
->le_conn_max_interval
= 0x0028;
3003 hdev
->le_conn_latency
= 0x0000;
3004 hdev
->le_supv_timeout
= 0x002a;
3005 hdev
->le_def_tx_len
= 0x001b;
3006 hdev
->le_def_tx_time
= 0x0148;
3007 hdev
->le_max_tx_len
= 0x001b;
3008 hdev
->le_max_tx_time
= 0x0148;
3009 hdev
->le_max_rx_len
= 0x001b;
3010 hdev
->le_max_rx_time
= 0x0148;
3012 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
3013 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
3014 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
3015 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
3017 mutex_init(&hdev
->lock
);
3018 mutex_init(&hdev
->req_lock
);
3020 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
3021 INIT_LIST_HEAD(&hdev
->blacklist
);
3022 INIT_LIST_HEAD(&hdev
->whitelist
);
3023 INIT_LIST_HEAD(&hdev
->uuids
);
3024 INIT_LIST_HEAD(&hdev
->link_keys
);
3025 INIT_LIST_HEAD(&hdev
->long_term_keys
);
3026 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
3027 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
3028 INIT_LIST_HEAD(&hdev
->le_white_list
);
3029 INIT_LIST_HEAD(&hdev
->le_conn_params
);
3030 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
3031 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
3032 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
3033 INIT_LIST_HEAD(&hdev
->adv_instances
);
3035 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
3036 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
3037 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
3038 INIT_WORK(&hdev
->power_on
, hci_power_on
);
3039 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
3041 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
3043 skb_queue_head_init(&hdev
->rx_q
);
3044 skb_queue_head_init(&hdev
->cmd_q
);
3045 skb_queue_head_init(&hdev
->raw_q
);
3047 init_waitqueue_head(&hdev
->req_wait_q
);
3049 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
3051 hci_request_setup(hdev
);
3053 hci_init_sysfs(hdev
);
3054 discovery_init(hdev
);
3058 EXPORT_SYMBOL(hci_alloc_dev
);
3060 /* Free HCI device */
3061 void hci_free_dev(struct hci_dev
*hdev
)
3063 /* will free via device release */
3064 put_device(&hdev
->dev
);
3066 EXPORT_SYMBOL(hci_free_dev
);
3068 /* Register HCI device */
3069 int hci_register_dev(struct hci_dev
*hdev
)
3073 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3076 /* Do not allow HCI_AMP devices to register at index 0,
3077 * so the index can be used as the AMP controller ID.
3079 switch (hdev
->dev_type
) {
3081 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3084 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3093 sprintf(hdev
->name
, "hci%d", id
);
3096 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3098 hdev
->workqueue
= alloc_ordered_workqueue("%s", WQ_HIGHPRI
, hdev
->name
);
3099 if (!hdev
->workqueue
) {
3104 hdev
->req_workqueue
= alloc_ordered_workqueue("%s", WQ_HIGHPRI
,
3106 if (!hdev
->req_workqueue
) {
3107 destroy_workqueue(hdev
->workqueue
);
3112 if (!IS_ERR_OR_NULL(bt_debugfs
))
3113 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3115 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3117 error
= device_add(&hdev
->dev
);
3121 hci_leds_init(hdev
);
3123 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3124 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3127 if (rfkill_register(hdev
->rfkill
) < 0) {
3128 rfkill_destroy(hdev
->rfkill
);
3129 hdev
->rfkill
= NULL
;
3133 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3134 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3136 hci_dev_set_flag(hdev
, HCI_SETUP
);
3137 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3139 if (hdev
->dev_type
== HCI_PRIMARY
) {
3140 /* Assume BR/EDR support until proven otherwise (such as
3141 * through reading supported features during init.
3143 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3146 write_lock(&hci_dev_list_lock
);
3147 list_add(&hdev
->list
, &hci_dev_list
);
3148 write_unlock(&hci_dev_list_lock
);
3150 /* Devices that are marked for raw-only usage are unconfigured
3151 * and should not be included in normal operation.
3153 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3154 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3156 hci_sock_dev_event(hdev
, HCI_DEV_REG
);
3159 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3164 destroy_workqueue(hdev
->workqueue
);
3165 destroy_workqueue(hdev
->req_workqueue
);
3167 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3171 EXPORT_SYMBOL(hci_register_dev
);
3173 /* Unregister HCI device */
3174 void hci_unregister_dev(struct hci_dev
*hdev
)
3178 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3180 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3184 write_lock(&hci_dev_list_lock
);
3185 list_del(&hdev
->list
);
3186 write_unlock(&hci_dev_list_lock
);
3188 cancel_work_sync(&hdev
->power_on
);
3190 hci_dev_do_close(hdev
);
3192 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3193 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3194 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3196 mgmt_index_removed(hdev
);
3197 hci_dev_unlock(hdev
);
3200 /* mgmt_index_removed should take care of emptying the
3202 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3204 hci_sock_dev_event(hdev
, HCI_DEV_UNREG
);
3207 rfkill_unregister(hdev
->rfkill
);
3208 rfkill_destroy(hdev
->rfkill
);
3211 device_del(&hdev
->dev
);
3213 debugfs_remove_recursive(hdev
->debugfs
);
3214 kfree_const(hdev
->hw_info
);
3215 kfree_const(hdev
->fw_info
);
3217 destroy_workqueue(hdev
->workqueue
);
3218 destroy_workqueue(hdev
->req_workqueue
);
3221 hci_bdaddr_list_clear(&hdev
->blacklist
);
3222 hci_bdaddr_list_clear(&hdev
->whitelist
);
3223 hci_uuids_clear(hdev
);
3224 hci_link_keys_clear(hdev
);
3225 hci_smp_ltks_clear(hdev
);
3226 hci_smp_irks_clear(hdev
);
3227 hci_remote_oob_data_clear(hdev
);
3228 hci_adv_instances_clear(hdev
);
3229 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3230 hci_conn_params_clear_all(hdev
);
3231 hci_discovery_filter_clear(hdev
);
3232 hci_dev_unlock(hdev
);
3236 ida_simple_remove(&hci_index_ida
, id
);
3238 EXPORT_SYMBOL(hci_unregister_dev
);
3240 /* Suspend HCI device */
3241 int hci_suspend_dev(struct hci_dev
*hdev
)
3243 hci_sock_dev_event(hdev
, HCI_DEV_SUSPEND
);
3246 EXPORT_SYMBOL(hci_suspend_dev
);
3248 /* Resume HCI device */
3249 int hci_resume_dev(struct hci_dev
*hdev
)
3251 hci_sock_dev_event(hdev
, HCI_DEV_RESUME
);
3254 EXPORT_SYMBOL(hci_resume_dev
);
3256 /* Reset HCI device */
3257 int hci_reset_dev(struct hci_dev
*hdev
)
3259 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3260 struct sk_buff
*skb
;
3262 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3266 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
3267 skb_put_data(skb
, hw_err
, 3);
3269 /* Send Hardware Error to upper stack */
3270 return hci_recv_frame(hdev
, skb
);
3272 EXPORT_SYMBOL(hci_reset_dev
);
3274 /* Receive frame from HCI drivers */
3275 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3277 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3278 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3283 if (hci_skb_pkt_type(skb
) != HCI_EVENT_PKT
&&
3284 hci_skb_pkt_type(skb
) != HCI_ACLDATA_PKT
&&
3285 hci_skb_pkt_type(skb
) != HCI_SCODATA_PKT
) {
3291 bt_cb(skb
)->incoming
= 1;
3294 __net_timestamp(skb
);
3296 skb_queue_tail(&hdev
->rx_q
, skb
);
3297 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3301 EXPORT_SYMBOL(hci_recv_frame
);
3303 /* Receive diagnostic message from HCI drivers */
3304 int hci_recv_diag(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3306 /* Mark as diagnostic packet */
3307 hci_skb_pkt_type(skb
) = HCI_DIAG_PKT
;
3310 __net_timestamp(skb
);
3312 skb_queue_tail(&hdev
->rx_q
, skb
);
3313 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3317 EXPORT_SYMBOL(hci_recv_diag
);
3319 void hci_set_hw_info(struct hci_dev
*hdev
, const char *fmt
, ...)
3323 va_start(vargs
, fmt
);
3324 kfree_const(hdev
->hw_info
);
3325 hdev
->hw_info
= kvasprintf_const(GFP_KERNEL
, fmt
, vargs
);
3328 EXPORT_SYMBOL(hci_set_hw_info
);
3330 void hci_set_fw_info(struct hci_dev
*hdev
, const char *fmt
, ...)
3334 va_start(vargs
, fmt
);
3335 kfree_const(hdev
->fw_info
);
3336 hdev
->fw_info
= kvasprintf_const(GFP_KERNEL
, fmt
, vargs
);
3339 EXPORT_SYMBOL(hci_set_fw_info
);
3341 /* ---- Interface to upper protocols ---- */
3343 int hci_register_cb(struct hci_cb
*cb
)
3345 BT_DBG("%p name %s", cb
, cb
->name
);
3347 mutex_lock(&hci_cb_list_lock
);
3348 list_add_tail(&cb
->list
, &hci_cb_list
);
3349 mutex_unlock(&hci_cb_list_lock
);
3353 EXPORT_SYMBOL(hci_register_cb
);
3355 int hci_unregister_cb(struct hci_cb
*cb
)
3357 BT_DBG("%p name %s", cb
, cb
->name
);
3359 mutex_lock(&hci_cb_list_lock
);
3360 list_del(&cb
->list
);
3361 mutex_unlock(&hci_cb_list_lock
);
3365 EXPORT_SYMBOL(hci_unregister_cb
);
3367 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3371 BT_DBG("%s type %d len %d", hdev
->name
, hci_skb_pkt_type(skb
),
3375 __net_timestamp(skb
);
3377 /* Send copy to monitor */
3378 hci_send_to_monitor(hdev
, skb
);
3380 if (atomic_read(&hdev
->promisc
)) {
3381 /* Send copy to the sockets */
3382 hci_send_to_sock(hdev
, skb
);
3385 /* Get rid of skb owner, prior to sending to the driver. */
3388 if (!test_bit(HCI_RUNNING
, &hdev
->flags
)) {
3393 err
= hdev
->send(hdev
, skb
);
3395 bt_dev_err(hdev
, "sending frame failed (%d)", err
);
3400 /* Send HCI command */
3401 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3404 struct sk_buff
*skb
;
3406 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3408 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3410 bt_dev_err(hdev
, "no memory for command");
3414 /* Stand-alone HCI commands must be flagged as
3415 * single-command requests.
3417 bt_cb(skb
)->hci
.req_flags
|= HCI_REQ_START
;
3419 skb_queue_tail(&hdev
->cmd_q
, skb
);
3420 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3425 /* Get data from the previously sent command */
3426 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3428 struct hci_command_hdr
*hdr
;
3430 if (!hdev
->sent_cmd
)
3433 hdr
= (void *) hdev
->sent_cmd
->data
;
3435 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3438 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3440 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3443 /* Send HCI command and wait for command commplete event */
3444 struct sk_buff
*hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
3445 const void *param
, u32 timeout
)
3447 struct sk_buff
*skb
;
3449 if (!test_bit(HCI_UP
, &hdev
->flags
))
3450 return ERR_PTR(-ENETDOWN
);
3452 bt_dev_dbg(hdev
, "opcode 0x%4.4x plen %d", opcode
, plen
);
3454 hci_req_sync_lock(hdev
);
3455 skb
= __hci_cmd_sync(hdev
, opcode
, plen
, param
, timeout
);
3456 hci_req_sync_unlock(hdev
);
3460 EXPORT_SYMBOL(hci_cmd_sync
);
3463 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3465 struct hci_acl_hdr
*hdr
;
3468 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3469 skb_reset_transport_header(skb
);
3470 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3471 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3472 hdr
->dlen
= cpu_to_le16(len
);
3475 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3476 struct sk_buff
*skb
, __u16 flags
)
3478 struct hci_conn
*conn
= chan
->conn
;
3479 struct hci_dev
*hdev
= conn
->hdev
;
3480 struct sk_buff
*list
;
3482 skb
->len
= skb_headlen(skb
);
3485 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3487 switch (hdev
->dev_type
) {
3489 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3492 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3495 bt_dev_err(hdev
, "unknown dev_type %d", hdev
->dev_type
);
3499 list
= skb_shinfo(skb
)->frag_list
;
3501 /* Non fragmented */
3502 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3504 skb_queue_tail(queue
, skb
);
3507 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3509 skb_shinfo(skb
)->frag_list
= NULL
;
3511 /* Queue all fragments atomically. We need to use spin_lock_bh
3512 * here because of 6LoWPAN links, as there this function is
3513 * called from softirq and using normal spin lock could cause
3516 spin_lock_bh(&queue
->lock
);
3518 __skb_queue_tail(queue
, skb
);
3520 flags
&= ~ACL_START
;
3523 skb
= list
; list
= list
->next
;
3525 hci_skb_pkt_type(skb
) = HCI_ACLDATA_PKT
;
3526 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3528 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3530 __skb_queue_tail(queue
, skb
);
3533 spin_unlock_bh(&queue
->lock
);
3537 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3539 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3541 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3543 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3545 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3549 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3551 struct hci_dev
*hdev
= conn
->hdev
;
3552 struct hci_sco_hdr hdr
;
3554 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3556 hdr
.handle
= cpu_to_le16(conn
->handle
);
3557 hdr
.dlen
= skb
->len
;
3559 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3560 skb_reset_transport_header(skb
);
3561 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3563 hci_skb_pkt_type(skb
) = HCI_SCODATA_PKT
;
3565 skb_queue_tail(&conn
->data_q
, skb
);
3566 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3569 /* ---- HCI TX task (outgoing data) ---- */
3571 /* HCI Connection scheduler */
3572 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3575 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3576 struct hci_conn
*conn
= NULL
, *c
;
3577 unsigned int num
= 0, min
= ~0;
3579 /* We don't have to lock device here. Connections are always
3580 * added and removed with TX task disabled. */
3584 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3585 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3588 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3593 if (c
->sent
< min
) {
3598 if (hci_conn_num(hdev
, type
) == num
)
3607 switch (conn
->type
) {
3609 cnt
= hdev
->acl_cnt
;
3613 cnt
= hdev
->sco_cnt
;
3616 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3620 bt_dev_err(hdev
, "unknown link type %d", conn
->type
);
3628 BT_DBG("conn %p quote %d", conn
, *quote
);
3632 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3634 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3637 bt_dev_err(hdev
, "link tx timeout");
3641 /* Kill stalled connections */
3642 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3643 if (c
->type
== type
&& c
->sent
) {
3644 bt_dev_err(hdev
, "killing stalled connection %pMR",
3646 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3653 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3656 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3657 struct hci_chan
*chan
= NULL
;
3658 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3659 struct hci_conn
*conn
;
3660 int cnt
, q
, conn_num
= 0;
3662 BT_DBG("%s", hdev
->name
);
3666 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3667 struct hci_chan
*tmp
;
3669 if (conn
->type
!= type
)
3672 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3677 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3678 struct sk_buff
*skb
;
3680 if (skb_queue_empty(&tmp
->data_q
))
3683 skb
= skb_peek(&tmp
->data_q
);
3684 if (skb
->priority
< cur_prio
)
3687 if (skb
->priority
> cur_prio
) {
3690 cur_prio
= skb
->priority
;
3695 if (conn
->sent
< min
) {
3701 if (hci_conn_num(hdev
, type
) == conn_num
)
3710 switch (chan
->conn
->type
) {
3712 cnt
= hdev
->acl_cnt
;
3715 cnt
= hdev
->block_cnt
;
3719 cnt
= hdev
->sco_cnt
;
3722 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3726 bt_dev_err(hdev
, "unknown link type %d", chan
->conn
->type
);
3731 BT_DBG("chan %p quote %d", chan
, *quote
);
3735 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3737 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3738 struct hci_conn
*conn
;
3741 BT_DBG("%s", hdev
->name
);
3745 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3746 struct hci_chan
*chan
;
3748 if (conn
->type
!= type
)
3751 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3756 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3757 struct sk_buff
*skb
;
3764 if (skb_queue_empty(&chan
->data_q
))
3767 skb
= skb_peek(&chan
->data_q
);
3768 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3771 skb
->priority
= HCI_PRIO_MAX
- 1;
3773 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3777 if (hci_conn_num(hdev
, type
) == num
)
3785 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3787 /* Calculate count of blocks used by this packet */
3788 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3791 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3793 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3794 /* ACL tx timeout must be longer than maximum
3795 * link supervision timeout (40.9 seconds) */
3796 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3797 HCI_ACL_TX_TIMEOUT
))
3798 hci_link_tx_to(hdev
, ACL_LINK
);
3802 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3804 unsigned int cnt
= hdev
->acl_cnt
;
3805 struct hci_chan
*chan
;
3806 struct sk_buff
*skb
;
3809 __check_timeout(hdev
, cnt
);
3811 while (hdev
->acl_cnt
&&
3812 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3813 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3814 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3815 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3816 skb
->len
, skb
->priority
);
3818 /* Stop if priority has changed */
3819 if (skb
->priority
< priority
)
3822 skb
= skb_dequeue(&chan
->data_q
);
3824 hci_conn_enter_active_mode(chan
->conn
,
3825 bt_cb(skb
)->force_active
);
3827 hci_send_frame(hdev
, skb
);
3828 hdev
->acl_last_tx
= jiffies
;
3836 if (cnt
!= hdev
->acl_cnt
)
3837 hci_prio_recalculate(hdev
, ACL_LINK
);
3840 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3842 unsigned int cnt
= hdev
->block_cnt
;
3843 struct hci_chan
*chan
;
3844 struct sk_buff
*skb
;
3848 __check_timeout(hdev
, cnt
);
3850 BT_DBG("%s", hdev
->name
);
3852 if (hdev
->dev_type
== HCI_AMP
)
3857 while (hdev
->block_cnt
> 0 &&
3858 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3859 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3860 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3863 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3864 skb
->len
, skb
->priority
);
3866 /* Stop if priority has changed */
3867 if (skb
->priority
< priority
)
3870 skb
= skb_dequeue(&chan
->data_q
);
3872 blocks
= __get_blocks(hdev
, skb
);
3873 if (blocks
> hdev
->block_cnt
)
3876 hci_conn_enter_active_mode(chan
->conn
,
3877 bt_cb(skb
)->force_active
);
3879 hci_send_frame(hdev
, skb
);
3880 hdev
->acl_last_tx
= jiffies
;
3882 hdev
->block_cnt
-= blocks
;
3885 chan
->sent
+= blocks
;
3886 chan
->conn
->sent
+= blocks
;
3890 if (cnt
!= hdev
->block_cnt
)
3891 hci_prio_recalculate(hdev
, type
);
3894 static void hci_sched_acl(struct hci_dev
*hdev
)
3896 BT_DBG("%s", hdev
->name
);
3898 /* No ACL link over BR/EDR controller */
3899 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_PRIMARY
)
3902 /* No AMP link over AMP controller */
3903 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3906 switch (hdev
->flow_ctl_mode
) {
3907 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3908 hci_sched_acl_pkt(hdev
);
3911 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3912 hci_sched_acl_blk(hdev
);
3918 static void hci_sched_sco(struct hci_dev
*hdev
)
3920 struct hci_conn
*conn
;
3921 struct sk_buff
*skb
;
3924 BT_DBG("%s", hdev
->name
);
3926 if (!hci_conn_num(hdev
, SCO_LINK
))
3929 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3930 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3931 BT_DBG("skb %p len %d", skb
, skb
->len
);
3932 hci_send_frame(hdev
, skb
);
3935 if (conn
->sent
== ~0)
3941 static void hci_sched_esco(struct hci_dev
*hdev
)
3943 struct hci_conn
*conn
;
3944 struct sk_buff
*skb
;
3947 BT_DBG("%s", hdev
->name
);
3949 if (!hci_conn_num(hdev
, ESCO_LINK
))
3952 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3954 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3955 BT_DBG("skb %p len %d", skb
, skb
->len
);
3956 hci_send_frame(hdev
, skb
);
3959 if (conn
->sent
== ~0)
3965 static void hci_sched_le(struct hci_dev
*hdev
)
3967 struct hci_chan
*chan
;
3968 struct sk_buff
*skb
;
3969 int quote
, cnt
, tmp
;
3971 BT_DBG("%s", hdev
->name
);
3973 if (!hci_conn_num(hdev
, LE_LINK
))
3976 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3977 /* LE tx timeout must be longer than maximum
3978 * link supervision timeout (40.9 seconds) */
3979 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3980 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3981 hci_link_tx_to(hdev
, LE_LINK
);
3984 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3986 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3987 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3988 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3989 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3990 skb
->len
, skb
->priority
);
3992 /* Stop if priority has changed */
3993 if (skb
->priority
< priority
)
3996 skb
= skb_dequeue(&chan
->data_q
);
3998 hci_send_frame(hdev
, skb
);
3999 hdev
->le_last_tx
= jiffies
;
4010 hdev
->acl_cnt
= cnt
;
4013 hci_prio_recalculate(hdev
, LE_LINK
);
4016 static void hci_tx_work(struct work_struct
*work
)
4018 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
4019 struct sk_buff
*skb
;
4021 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
4022 hdev
->sco_cnt
, hdev
->le_cnt
);
4024 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4025 /* Schedule queues and send stuff to HCI driver */
4026 hci_sched_acl(hdev
);
4027 hci_sched_sco(hdev
);
4028 hci_sched_esco(hdev
);
4032 /* Send next queued raw (unknown type) packet */
4033 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
4034 hci_send_frame(hdev
, skb
);
4037 /* ----- HCI RX task (incoming data processing) ----- */
4039 /* ACL data packet */
4040 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4042 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4043 struct hci_conn
*conn
;
4044 __u16 handle
, flags
;
4046 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4048 handle
= __le16_to_cpu(hdr
->handle
);
4049 flags
= hci_flags(handle
);
4050 handle
= hci_handle(handle
);
4052 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4055 hdev
->stat
.acl_rx
++;
4058 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4059 hci_dev_unlock(hdev
);
4062 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4064 /* Send to upper protocol */
4065 l2cap_recv_acldata(conn
, skb
, flags
);
4068 bt_dev_err(hdev
, "ACL packet for unknown connection handle %d",
4075 /* SCO data packet */
4076 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4078 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4079 struct hci_conn
*conn
;
4082 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4084 handle
= __le16_to_cpu(hdr
->handle
);
4086 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4088 hdev
->stat
.sco_rx
++;
4091 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4092 hci_dev_unlock(hdev
);
4095 /* Send to upper protocol */
4096 sco_recv_scodata(conn
, skb
);
4099 bt_dev_err(hdev
, "SCO packet for unknown connection handle %d",
4106 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4108 struct sk_buff
*skb
;
4110 skb
= skb_peek(&hdev
->cmd_q
);
4114 return (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
);
4117 static void hci_resend_last(struct hci_dev
*hdev
)
4119 struct hci_command_hdr
*sent
;
4120 struct sk_buff
*skb
;
4123 if (!hdev
->sent_cmd
)
4126 sent
= (void *) hdev
->sent_cmd
->data
;
4127 opcode
= __le16_to_cpu(sent
->opcode
);
4128 if (opcode
== HCI_OP_RESET
)
4131 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4135 skb_queue_head(&hdev
->cmd_q
, skb
);
4136 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4139 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4140 hci_req_complete_t
*req_complete
,
4141 hci_req_complete_skb_t
*req_complete_skb
)
4143 struct sk_buff
*skb
;
4144 unsigned long flags
;
4146 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4148 /* If the completed command doesn't match the last one that was
4149 * sent we need to do special handling of it.
4151 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4152 /* Some CSR based controllers generate a spontaneous
4153 * reset complete event during init and any pending
4154 * command will never be completed. In such a case we
4155 * need to resend whatever was the last sent
4158 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4159 hci_resend_last(hdev
);
4164 /* If the command succeeded and there's still more commands in
4165 * this request the request is not yet complete.
4167 if (!status
&& !hci_req_is_complete(hdev
))
4170 /* If this was the last command in a request the complete
4171 * callback would be found in hdev->sent_cmd instead of the
4172 * command queue (hdev->cmd_q).
4174 if (bt_cb(hdev
->sent_cmd
)->hci
.req_flags
& HCI_REQ_SKB
) {
4175 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete_skb
;
4179 if (bt_cb(hdev
->sent_cmd
)->hci
.req_complete
) {
4180 *req_complete
= bt_cb(hdev
->sent_cmd
)->hci
.req_complete
;
4184 /* Remove all pending commands belonging to this request */
4185 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4186 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4187 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_START
) {
4188 __skb_queue_head(&hdev
->cmd_q
, skb
);
4192 if (bt_cb(skb
)->hci
.req_flags
& HCI_REQ_SKB
)
4193 *req_complete_skb
= bt_cb(skb
)->hci
.req_complete_skb
;
4195 *req_complete
= bt_cb(skb
)->hci
.req_complete
;
4198 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4201 static void hci_rx_work(struct work_struct
*work
)
4203 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4204 struct sk_buff
*skb
;
4206 BT_DBG("%s", hdev
->name
);
4208 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4209 /* Send copy to monitor */
4210 hci_send_to_monitor(hdev
, skb
);
4212 if (atomic_read(&hdev
->promisc
)) {
4213 /* Send copy to the sockets */
4214 hci_send_to_sock(hdev
, skb
);
4217 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4222 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4223 /* Don't process data packets in this states. */
4224 switch (hci_skb_pkt_type(skb
)) {
4225 case HCI_ACLDATA_PKT
:
4226 case HCI_SCODATA_PKT
:
4233 switch (hci_skb_pkt_type(skb
)) {
4235 BT_DBG("%s Event packet", hdev
->name
);
4236 hci_event_packet(hdev
, skb
);
4239 case HCI_ACLDATA_PKT
:
4240 BT_DBG("%s ACL data packet", hdev
->name
);
4241 hci_acldata_packet(hdev
, skb
);
4244 case HCI_SCODATA_PKT
:
4245 BT_DBG("%s SCO data packet", hdev
->name
);
4246 hci_scodata_packet(hdev
, skb
);
4256 static void hci_cmd_work(struct work_struct
*work
)
4258 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4259 struct sk_buff
*skb
;
4261 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4262 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4264 /* Send queued commands */
4265 if (atomic_read(&hdev
->cmd_cnt
)) {
4266 skb
= skb_dequeue(&hdev
->cmd_q
);
4270 kfree_skb(hdev
->sent_cmd
);
4272 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4273 if (hdev
->sent_cmd
) {
4274 atomic_dec(&hdev
->cmd_cnt
);
4275 hci_send_frame(hdev
, skb
);
4276 if (test_bit(HCI_RESET
, &hdev
->flags
))
4277 cancel_delayed_work(&hdev
->cmd_timer
);
4279 schedule_delayed_work(&hdev
->cmd_timer
,
4282 skb_queue_head(&hdev
->cmd_q
, skb
);
4283 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);