2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct
*work
);
45 static void hci_cmd_work(struct work_struct
*work
);
46 static void hci_tx_work(struct work_struct
*work
);
49 LIST_HEAD(hci_dev_list
);
50 DEFINE_RWLOCK(hci_dev_list_lock
);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list
);
54 DEFINE_MUTEX(hci_cb_list_lock
);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida
);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev
*hdev
, int event
)
72 hci_sock_dev_event(hdev
, event
);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
78 size_t count
, loff_t
*ppos
)
80 struct hci_dev
*hdev
= file
->private_data
;
83 buf
[0] = hci_dev_test_flag(hdev
, HCI_DUT_MODE
) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
89 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
90 size_t count
, loff_t
*ppos
)
92 struct hci_dev
*hdev
= file
->private_data
;
95 size_t buf_size
= min(count
, (sizeof(buf
)-1));
99 if (!test_bit(HCI_UP
, &hdev
->flags
))
102 if (copy_from_user(buf
, user_buf
, buf_size
))
105 buf
[buf_size
] = '\0';
106 if (strtobool(buf
, &enable
))
109 if (enable
== hci_dev_test_flag(hdev
, HCI_DUT_MODE
))
114 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
117 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
119 hci_req_unlock(hdev
);
124 err
= -bt_to_errno(skb
->data
[0]);
130 hci_dev_change_flag(hdev
, HCI_DUT_MODE
);
135 static const struct file_operations dut_mode_fops
= {
137 .read
= dut_mode_read
,
138 .write
= dut_mode_write
,
139 .llseek
= default_llseek
,
142 /* ---- HCI requests ---- */
144 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
, u16 opcode
,
147 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
149 if (hdev
->req_status
== HCI_REQ_PEND
) {
150 hdev
->req_result
= result
;
151 hdev
->req_status
= HCI_REQ_DONE
;
153 hdev
->req_skb
= skb_get(skb
);
154 wake_up_interruptible(&hdev
->req_wait_q
);
158 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
160 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
162 if (hdev
->req_status
== HCI_REQ_PEND
) {
163 hdev
->req_result
= err
;
164 hdev
->req_status
= HCI_REQ_CANCELED
;
165 wake_up_interruptible(&hdev
->req_wait_q
);
169 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
170 const void *param
, u8 event
, u32 timeout
)
172 DECLARE_WAITQUEUE(wait
, current
);
173 struct hci_request req
;
177 BT_DBG("%s", hdev
->name
);
179 hci_req_init(&req
, hdev
);
181 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
183 hdev
->req_status
= HCI_REQ_PEND
;
185 add_wait_queue(&hdev
->req_wait_q
, &wait
);
186 set_current_state(TASK_INTERRUPTIBLE
);
188 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
190 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
191 set_current_state(TASK_RUNNING
);
195 schedule_timeout(timeout
);
197 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
199 if (signal_pending(current
))
200 return ERR_PTR(-EINTR
);
202 switch (hdev
->req_status
) {
204 err
= -bt_to_errno(hdev
->req_result
);
207 case HCI_REQ_CANCELED
:
208 err
= -hdev
->req_result
;
216 hdev
->req_status
= hdev
->req_result
= 0;
218 hdev
->req_skb
= NULL
;
220 BT_DBG("%s end: err %d", hdev
->name
, err
);
228 return ERR_PTR(-ENODATA
);
232 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
234 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
235 const void *param
, u32 timeout
)
237 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
239 EXPORT_SYMBOL(__hci_cmd_sync
);
241 /* Execute request and wait for completion. */
242 static int __hci_req_sync(struct hci_dev
*hdev
,
243 void (*func
)(struct hci_request
*req
,
245 unsigned long opt
, __u32 timeout
)
247 struct hci_request req
;
248 DECLARE_WAITQUEUE(wait
, current
);
251 BT_DBG("%s start", hdev
->name
);
253 hci_req_init(&req
, hdev
);
255 hdev
->req_status
= HCI_REQ_PEND
;
259 add_wait_queue(&hdev
->req_wait_q
, &wait
);
260 set_current_state(TASK_INTERRUPTIBLE
);
262 err
= hci_req_run_skb(&req
, hci_req_sync_complete
);
264 hdev
->req_status
= 0;
266 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
267 set_current_state(TASK_RUNNING
);
269 /* ENODATA means the HCI request command queue is empty.
270 * This can happen when a request with conditionals doesn't
271 * trigger any commands to be sent. This is normal behavior
272 * and should not trigger an error return.
280 schedule_timeout(timeout
);
282 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
284 if (signal_pending(current
))
287 switch (hdev
->req_status
) {
289 err
= -bt_to_errno(hdev
->req_result
);
292 case HCI_REQ_CANCELED
:
293 err
= -hdev
->req_result
;
301 hdev
->req_status
= hdev
->req_result
= 0;
303 BT_DBG("%s end: err %d", hdev
->name
, err
);
308 static int hci_req_sync(struct hci_dev
*hdev
,
309 void (*req
)(struct hci_request
*req
,
311 unsigned long opt
, __u32 timeout
)
315 if (!test_bit(HCI_UP
, &hdev
->flags
))
318 /* Serialize all requests */
320 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
321 hci_req_unlock(hdev
);
326 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
328 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
331 set_bit(HCI_RESET
, &req
->hdev
->flags
);
332 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
335 static void bredr_init(struct hci_request
*req
)
337 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
339 /* Read Local Supported Features */
340 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
342 /* Read Local Version */
343 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
345 /* Read BD Address */
346 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
349 static void amp_init1(struct hci_request
*req
)
351 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
353 /* Read Local Version */
354 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
356 /* Read Local Supported Commands */
357 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
359 /* Read Local AMP Info */
360 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
362 /* Read Data Blk size */
363 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
365 /* Read Flow Control Mode */
366 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
368 /* Read Location Data */
369 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
372 static void amp_init2(struct hci_request
*req
)
374 /* Read Local Supported Features. Not all AMP controllers
375 * support this so it's placed conditionally in the second
378 if (req
->hdev
->commands
[14] & 0x20)
379 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
382 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
384 struct hci_dev
*hdev
= req
->hdev
;
386 BT_DBG("%s %ld", hdev
->name
, opt
);
389 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
390 hci_reset_req(req
, 0);
392 switch (hdev
->dev_type
) {
402 BT_ERR("Unknown device type %d", hdev
->dev_type
);
407 static void bredr_setup(struct hci_request
*req
)
412 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
413 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
415 /* Read Class of Device */
416 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
418 /* Read Local Name */
419 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
421 /* Read Voice Setting */
422 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
424 /* Read Number of Supported IAC */
425 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
427 /* Read Current IAC LAP */
428 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
430 /* Clear Event Filters */
431 flt_type
= HCI_FLT_CLEAR_ALL
;
432 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
434 /* Connection accept timeout ~20 secs */
435 param
= cpu_to_le16(0x7d00);
436 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
439 static void le_setup(struct hci_request
*req
)
441 struct hci_dev
*hdev
= req
->hdev
;
443 /* Read LE Buffer Size */
444 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
446 /* Read LE Local Supported Features */
447 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
449 /* Read LE Supported States */
450 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
452 /* Read LE White List Size */
453 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
455 /* Clear LE White List */
456 hci_req_add(req
, HCI_OP_LE_CLEAR_WHITE_LIST
, 0, NULL
);
458 /* LE-only controllers have LE implicitly enabled */
459 if (!lmp_bredr_capable(hdev
))
460 hci_dev_set_flag(hdev
, HCI_LE_ENABLED
);
463 static void hci_setup_event_mask(struct hci_request
*req
)
465 struct hci_dev
*hdev
= req
->hdev
;
467 /* The second byte is 0xff instead of 0x9f (two reserved bits
468 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
471 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
473 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474 * any event mask for pre 1.2 devices.
476 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
479 if (lmp_bredr_capable(hdev
)) {
480 events
[4] |= 0x01; /* Flow Specification Complete */
481 events
[4] |= 0x02; /* Inquiry Result with RSSI */
482 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
483 events
[5] |= 0x08; /* Synchronous Connection Complete */
484 events
[5] |= 0x10; /* Synchronous Connection Changed */
486 /* Use a different default for LE-only devices */
487 memset(events
, 0, sizeof(events
));
488 events
[0] |= 0x10; /* Disconnection Complete */
489 events
[1] |= 0x08; /* Read Remote Version Information Complete */
490 events
[1] |= 0x20; /* Command Complete */
491 events
[1] |= 0x40; /* Command Status */
492 events
[1] |= 0x80; /* Hardware Error */
493 events
[2] |= 0x04; /* Number of Completed Packets */
494 events
[3] |= 0x02; /* Data Buffer Overflow */
496 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
) {
497 events
[0] |= 0x80; /* Encryption Change */
498 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
502 if (lmp_inq_rssi_capable(hdev
))
503 events
[4] |= 0x02; /* Inquiry Result with RSSI */
505 if (lmp_sniffsubr_capable(hdev
))
506 events
[5] |= 0x20; /* Sniff Subrating */
508 if (lmp_pause_enc_capable(hdev
))
509 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
511 if (lmp_ext_inq_capable(hdev
))
512 events
[5] |= 0x40; /* Extended Inquiry Result */
514 if (lmp_no_flush_capable(hdev
))
515 events
[7] |= 0x01; /* Enhanced Flush Complete */
517 if (lmp_lsto_capable(hdev
))
518 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
520 if (lmp_ssp_capable(hdev
)) {
521 events
[6] |= 0x01; /* IO Capability Request */
522 events
[6] |= 0x02; /* IO Capability Response */
523 events
[6] |= 0x04; /* User Confirmation Request */
524 events
[6] |= 0x08; /* User Passkey Request */
525 events
[6] |= 0x10; /* Remote OOB Data Request */
526 events
[6] |= 0x20; /* Simple Pairing Complete */
527 events
[7] |= 0x04; /* User Passkey Notification */
528 events
[7] |= 0x08; /* Keypress Notification */
529 events
[7] |= 0x10; /* Remote Host Supported
530 * Features Notification
534 if (lmp_le_capable(hdev
))
535 events
[7] |= 0x20; /* LE Meta-Event */
537 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
540 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
542 struct hci_dev
*hdev
= req
->hdev
;
544 if (hdev
->dev_type
== HCI_AMP
)
545 return amp_init2(req
);
547 if (lmp_bredr_capable(hdev
))
550 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
552 if (lmp_le_capable(hdev
))
555 /* All Bluetooth 1.2 and later controllers should support the
556 * HCI command for reading the local supported commands.
558 * Unfortunately some controllers indicate Bluetooth 1.2 support,
559 * but do not have support for this command. If that is the case,
560 * the driver can quirk the behavior and skip reading the local
561 * supported commands.
563 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
&&
564 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS
, &hdev
->quirks
))
565 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
567 if (lmp_ssp_capable(hdev
)) {
568 /* When SSP is available, then the host features page
569 * should also be available as well. However some
570 * controllers list the max_page as 0 as long as SSP
571 * has not been enabled. To achieve proper debugging
572 * output, force the minimum max_page to 1 at least.
574 hdev
->max_page
= 0x01;
576 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
579 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
580 sizeof(mode
), &mode
);
582 struct hci_cp_write_eir cp
;
584 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
585 memset(&cp
, 0, sizeof(cp
));
587 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
591 if (lmp_inq_rssi_capable(hdev
) ||
592 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE
, &hdev
->quirks
)) {
595 /* If Extended Inquiry Result events are supported, then
596 * they are clearly preferred over Inquiry Result with RSSI
599 mode
= lmp_ext_inq_capable(hdev
) ? 0x02 : 0x01;
601 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
604 if (lmp_inq_tx_pwr_capable(hdev
))
605 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
607 if (lmp_ext_feat_capable(hdev
)) {
608 struct hci_cp_read_local_ext_features cp
;
611 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
615 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
617 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
622 static void hci_setup_link_policy(struct hci_request
*req
)
624 struct hci_dev
*hdev
= req
->hdev
;
625 struct hci_cp_write_def_link_policy cp
;
628 if (lmp_rswitch_capable(hdev
))
629 link_policy
|= HCI_LP_RSWITCH
;
630 if (lmp_hold_capable(hdev
))
631 link_policy
|= HCI_LP_HOLD
;
632 if (lmp_sniff_capable(hdev
))
633 link_policy
|= HCI_LP_SNIFF
;
634 if (lmp_park_capable(hdev
))
635 link_policy
|= HCI_LP_PARK
;
637 cp
.policy
= cpu_to_le16(link_policy
);
638 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
641 static void hci_set_le_support(struct hci_request
*req
)
643 struct hci_dev
*hdev
= req
->hdev
;
644 struct hci_cp_write_le_host_supported cp
;
646 /* LE-only devices do not support explicit enablement */
647 if (!lmp_bredr_capable(hdev
))
650 memset(&cp
, 0, sizeof(cp
));
652 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
657 if (cp
.le
!= lmp_host_le_capable(hdev
))
658 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
662 static void hci_set_event_mask_page_2(struct hci_request
*req
)
664 struct hci_dev
*hdev
= req
->hdev
;
665 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
667 /* If Connectionless Slave Broadcast master role is supported
668 * enable all necessary events for it.
670 if (lmp_csb_master_capable(hdev
)) {
671 events
[1] |= 0x40; /* Triggered Clock Capture */
672 events
[1] |= 0x80; /* Synchronization Train Complete */
673 events
[2] |= 0x10; /* Slave Page Response Timeout */
674 events
[2] |= 0x20; /* CSB Channel Map Change */
677 /* If Connectionless Slave Broadcast slave role is supported
678 * enable all necessary events for it.
680 if (lmp_csb_slave_capable(hdev
)) {
681 events
[2] |= 0x01; /* Synchronization Train Received */
682 events
[2] |= 0x02; /* CSB Receive */
683 events
[2] |= 0x04; /* CSB Timeout */
684 events
[2] |= 0x08; /* Truncated Page Complete */
687 /* Enable Authenticated Payload Timeout Expired event if supported */
688 if (lmp_ping_capable(hdev
) || hdev
->le_features
[0] & HCI_LE_PING
)
691 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
694 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
696 struct hci_dev
*hdev
= req
->hdev
;
699 hci_setup_event_mask(req
);
701 if (hdev
->commands
[6] & 0x20) {
702 struct hci_cp_read_stored_link_key cp
;
704 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
706 hci_req_add(req
, HCI_OP_READ_STORED_LINK_KEY
, sizeof(cp
), &cp
);
709 if (hdev
->commands
[5] & 0x10)
710 hci_setup_link_policy(req
);
712 if (hdev
->commands
[8] & 0x01)
713 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
716 * support the Read Page Scan Type command. Check support for
717 * this command in the bit mask of supported commands.
719 if (hdev
->commands
[13] & 0x01)
720 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
722 if (lmp_le_capable(hdev
)) {
725 memset(events
, 0, sizeof(events
));
728 if (hdev
->le_features
[0] & HCI_LE_ENCRYPTION
)
729 events
[0] |= 0x10; /* LE Long Term Key Request */
731 /* If controller supports the Connection Parameters Request
732 * Link Layer Procedure, enable the corresponding event.
734 if (hdev
->le_features
[0] & HCI_LE_CONN_PARAM_REQ_PROC
)
735 events
[0] |= 0x20; /* LE Remote Connection
739 /* If the controller supports the Data Length Extension
740 * feature, enable the corresponding event.
742 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
)
743 events
[0] |= 0x40; /* LE Data Length Change */
745 /* If the controller supports Extended Scanner Filter
746 * Policies, enable the correspondig event.
748 if (hdev
->le_features
[0] & HCI_LE_EXT_SCAN_POLICY
)
749 events
[1] |= 0x04; /* LE Direct Advertising
753 /* If the controller supports the LE Read Local P-256
754 * Public Key command, enable the corresponding event.
756 if (hdev
->commands
[34] & 0x02)
757 events
[0] |= 0x80; /* LE Read Local P-256
758 * Public Key Complete
761 /* If the controller supports the LE Generate DHKey
762 * command, enable the corresponding event.
764 if (hdev
->commands
[34] & 0x04)
765 events
[1] |= 0x01; /* LE Generate DHKey Complete */
767 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
, sizeof(events
),
770 if (hdev
->commands
[25] & 0x40) {
771 /* Read LE Advertising Channel TX Power */
772 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
775 if (hdev
->le_features
[0] & HCI_LE_DATA_LEN_EXT
) {
776 /* Read LE Maximum Data Length */
777 hci_req_add(req
, HCI_OP_LE_READ_MAX_DATA_LEN
, 0, NULL
);
779 /* Read LE Suggested Default Data Length */
780 hci_req_add(req
, HCI_OP_LE_READ_DEF_DATA_LEN
, 0, NULL
);
783 hci_set_le_support(req
);
786 /* Read features beyond page 1 if available */
787 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
788 struct hci_cp_read_local_ext_features cp
;
791 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
796 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
798 struct hci_dev
*hdev
= req
->hdev
;
800 /* Some Broadcom based Bluetooth controllers do not support the
801 * Delete Stored Link Key command. They are clearly indicating its
802 * absence in the bit mask of supported commands.
804 * Check the supported commands and only if the the command is marked
805 * as supported send it. If not supported assume that the controller
806 * does not have actual support for stored link keys which makes this
807 * command redundant anyway.
809 * Some controllers indicate that they support handling deleting
810 * stored link keys, but they don't. The quirk lets a driver
811 * just disable this command.
813 if (hdev
->commands
[6] & 0x80 &&
814 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
815 struct hci_cp_delete_stored_link_key cp
;
817 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
818 cp
.delete_all
= 0x01;
819 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
823 /* Set event mask page 2 if the HCI command for it is supported */
824 if (hdev
->commands
[22] & 0x04)
825 hci_set_event_mask_page_2(req
);
827 /* Read local codec list if the HCI command is supported */
828 if (hdev
->commands
[29] & 0x20)
829 hci_req_add(req
, HCI_OP_READ_LOCAL_CODECS
, 0, NULL
);
831 /* Get MWS transport configuration if the HCI command is supported */
832 if (hdev
->commands
[30] & 0x08)
833 hci_req_add(req
, HCI_OP_GET_MWS_TRANSPORT_CONFIG
, 0, NULL
);
835 /* Check for Synchronization Train support */
836 if (lmp_sync_train_capable(hdev
))
837 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
839 /* Enable Secure Connections if supported and configured */
840 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
841 bredr_sc_enabled(hdev
)) {
844 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
845 sizeof(support
), &support
);
849 static int __hci_init(struct hci_dev
*hdev
)
853 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
857 /* The Device Under Test (DUT) mode is special and available for
858 * all controller types. So just create it early on.
860 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
861 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
865 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
869 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870 * BR/EDR/LE type controllers. AMP controllers only need the
871 * first two stages of init.
873 if (hdev
->dev_type
!= HCI_BREDR
)
876 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
880 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
884 /* This function is only called when the controller is actually in
885 * configured state. When the controller is marked as unconfigured,
886 * this initialization procedure is not run.
888 * It means that it is possible that a controller runs through its
889 * setup phase and then discovers missing settings. If that is the
890 * case, then this function will not be called. It then will only
891 * be called during the config phase.
893 * So only when in setup phase or config phase, create the debugfs
894 * entries and register the SMP channels.
896 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
897 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
900 hci_debugfs_create_common(hdev
);
902 if (lmp_bredr_capable(hdev
))
903 hci_debugfs_create_bredr(hdev
);
905 if (lmp_le_capable(hdev
))
906 hci_debugfs_create_le(hdev
);
911 static void hci_init0_req(struct hci_request
*req
, unsigned long opt
)
913 struct hci_dev
*hdev
= req
->hdev
;
915 BT_DBG("%s %ld", hdev
->name
, opt
);
918 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
919 hci_reset_req(req
, 0);
921 /* Read Local Version */
922 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
924 /* Read BD Address */
925 if (hdev
->set_bdaddr
)
926 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
929 static int __hci_unconf_init(struct hci_dev
*hdev
)
933 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
936 err
= __hci_req_sync(hdev
, hci_init0_req
, 0, HCI_INIT_TIMEOUT
);
943 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
947 BT_DBG("%s %x", req
->hdev
->name
, scan
);
949 /* Inquiry and Page scans */
950 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
953 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
957 BT_DBG("%s %x", req
->hdev
->name
, auth
);
960 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
963 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
967 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
970 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
973 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
975 __le16 policy
= cpu_to_le16(opt
);
977 BT_DBG("%s %x", req
->hdev
->name
, policy
);
979 /* Default link policy */
980 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
983 /* Get HCI device by index.
984 * Device is held on return. */
985 struct hci_dev
*hci_dev_get(int index
)
987 struct hci_dev
*hdev
= NULL
, *d
;
994 read_lock(&hci_dev_list_lock
);
995 list_for_each_entry(d
, &hci_dev_list
, list
) {
996 if (d
->id
== index
) {
997 hdev
= hci_dev_hold(d
);
1001 read_unlock(&hci_dev_list_lock
);
1005 /* ---- Inquiry support ---- */
1007 bool hci_discovery_active(struct hci_dev
*hdev
)
1009 struct discovery_state
*discov
= &hdev
->discovery
;
1011 switch (discov
->state
) {
1012 case DISCOVERY_FINDING
:
1013 case DISCOVERY_RESOLVING
:
1021 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1023 int old_state
= hdev
->discovery
.state
;
1025 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1027 if (old_state
== state
)
1030 hdev
->discovery
.state
= state
;
1033 case DISCOVERY_STOPPED
:
1034 hci_update_background_scan(hdev
);
1036 if (old_state
!= DISCOVERY_STARTING
)
1037 mgmt_discovering(hdev
, 0);
1039 case DISCOVERY_STARTING
:
1041 case DISCOVERY_FINDING
:
1042 mgmt_discovering(hdev
, 1);
1044 case DISCOVERY_RESOLVING
:
1046 case DISCOVERY_STOPPING
:
1051 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1053 struct discovery_state
*cache
= &hdev
->discovery
;
1054 struct inquiry_entry
*p
, *n
;
1056 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1061 INIT_LIST_HEAD(&cache
->unknown
);
1062 INIT_LIST_HEAD(&cache
->resolve
);
1065 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1068 struct discovery_state
*cache
= &hdev
->discovery
;
1069 struct inquiry_entry
*e
;
1071 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1073 list_for_each_entry(e
, &cache
->all
, all
) {
1074 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1081 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1084 struct discovery_state
*cache
= &hdev
->discovery
;
1085 struct inquiry_entry
*e
;
1087 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1089 list_for_each_entry(e
, &cache
->unknown
, list
) {
1090 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1097 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1101 struct discovery_state
*cache
= &hdev
->discovery
;
1102 struct inquiry_entry
*e
;
1104 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1106 list_for_each_entry(e
, &cache
->resolve
, list
) {
1107 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1109 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1116 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1117 struct inquiry_entry
*ie
)
1119 struct discovery_state
*cache
= &hdev
->discovery
;
1120 struct list_head
*pos
= &cache
->resolve
;
1121 struct inquiry_entry
*p
;
1123 list_del(&ie
->list
);
1125 list_for_each_entry(p
, &cache
->resolve
, list
) {
1126 if (p
->name_state
!= NAME_PENDING
&&
1127 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1132 list_add(&ie
->list
, pos
);
1135 u32
hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1138 struct discovery_state
*cache
= &hdev
->discovery
;
1139 struct inquiry_entry
*ie
;
1142 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1144 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
, BDADDR_BREDR
);
1146 if (!data
->ssp_mode
)
1147 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1149 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1151 if (!ie
->data
.ssp_mode
)
1152 flags
|= MGMT_DEV_FOUND_LEGACY_PAIRING
;
1154 if (ie
->name_state
== NAME_NEEDED
&&
1155 data
->rssi
!= ie
->data
.rssi
) {
1156 ie
->data
.rssi
= data
->rssi
;
1157 hci_inquiry_cache_update_resolve(hdev
, ie
);
1163 /* Entry not in the cache. Add new one. */
1164 ie
= kzalloc(sizeof(*ie
), GFP_KERNEL
);
1166 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1170 list_add(&ie
->all
, &cache
->all
);
1173 ie
->name_state
= NAME_KNOWN
;
1175 ie
->name_state
= NAME_NOT_KNOWN
;
1176 list_add(&ie
->list
, &cache
->unknown
);
1180 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1181 ie
->name_state
!= NAME_PENDING
) {
1182 ie
->name_state
= NAME_KNOWN
;
1183 list_del(&ie
->list
);
1186 memcpy(&ie
->data
, data
, sizeof(*data
));
1187 ie
->timestamp
= jiffies
;
1188 cache
->timestamp
= jiffies
;
1190 if (ie
->name_state
== NAME_NOT_KNOWN
)
1191 flags
|= MGMT_DEV_FOUND_CONFIRM_NAME
;
1197 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1199 struct discovery_state
*cache
= &hdev
->discovery
;
1200 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1201 struct inquiry_entry
*e
;
1204 list_for_each_entry(e
, &cache
->all
, all
) {
1205 struct inquiry_data
*data
= &e
->data
;
1210 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1211 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1212 info
->pscan_period_mode
= data
->pscan_period_mode
;
1213 info
->pscan_mode
= data
->pscan_mode
;
1214 memcpy(info
->dev_class
, data
->dev_class
, 3);
1215 info
->clock_offset
= data
->clock_offset
;
1221 BT_DBG("cache %p, copied %d", cache
, copied
);
1225 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1227 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1228 struct hci_dev
*hdev
= req
->hdev
;
1229 struct hci_cp_inquiry cp
;
1231 BT_DBG("%s", hdev
->name
);
1233 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1237 memcpy(&cp
.lap
, &ir
->lap
, 3);
1238 cp
.length
= ir
->length
;
1239 cp
.num_rsp
= ir
->num_rsp
;
1240 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1243 int hci_inquiry(void __user
*arg
)
1245 __u8 __user
*ptr
= arg
;
1246 struct hci_inquiry_req ir
;
1247 struct hci_dev
*hdev
;
1248 int err
= 0, do_inquiry
= 0, max_rsp
;
1252 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1255 hdev
= hci_dev_get(ir
.dev_id
);
1259 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1264 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1269 if (hdev
->dev_type
!= HCI_BREDR
) {
1274 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1280 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1281 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1282 hci_inquiry_cache_flush(hdev
);
1285 hci_dev_unlock(hdev
);
1287 timeo
= ir
.length
* msecs_to_jiffies(2000);
1290 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1295 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296 * cleared). If it is interrupted by a signal, return -EINTR.
1298 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
,
1299 TASK_INTERRUPTIBLE
))
1303 /* for unlimited number of responses we will use buffer with
1306 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1308 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309 * copy it to the user space.
1311 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1318 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1319 hci_dev_unlock(hdev
);
1321 BT_DBG("num_rsp %d", ir
.num_rsp
);
1323 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1325 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1338 static int hci_dev_do_open(struct hci_dev
*hdev
)
1342 BT_DBG("%s %p", hdev
->name
, hdev
);
1346 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
)) {
1351 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1352 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1353 /* Check for rfkill but allow the HCI setup stage to
1354 * proceed (which in itself doesn't cause any RF activity).
1356 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
)) {
1361 /* Check for valid public address or a configured static
1362 * random adddress, but let the HCI setup proceed to
1363 * be able to determine if there is a public address
1366 * In case of user channel usage, it is not important
1367 * if a public address or static random address is
1370 * This check is only valid for BR/EDR controllers
1371 * since AMP controllers do not have an address.
1373 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1374 hdev
->dev_type
== HCI_BREDR
&&
1375 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1376 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1377 ret
= -EADDRNOTAVAIL
;
1382 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1387 if (hdev
->open(hdev
)) {
1392 atomic_set(&hdev
->cmd_cnt
, 1);
1393 set_bit(HCI_INIT
, &hdev
->flags
);
1395 if (hci_dev_test_flag(hdev
, HCI_SETUP
)) {
1397 ret
= hdev
->setup(hdev
);
1399 /* The transport driver can set these quirks before
1400 * creating the HCI device or in its setup callback.
1402 * In case any of them is set, the controller has to
1403 * start up as unconfigured.
1405 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
1406 test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
))
1407 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
1409 /* For an unconfigured controller it is required to
1410 * read at least the version information provided by
1411 * the Read Local Version Information command.
1413 * If the set_bdaddr driver callback is provided, then
1414 * also the original Bluetooth public device address
1415 * will be read using the Read BD Address command.
1417 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
1418 ret
= __hci_unconf_init(hdev
);
1421 if (hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
1422 /* If public address change is configured, ensure that
1423 * the address gets programmed. If the driver does not
1424 * support changing the public address, fail the power
1427 if (bacmp(&hdev
->public_addr
, BDADDR_ANY
) &&
1429 ret
= hdev
->set_bdaddr(hdev
, &hdev
->public_addr
);
1431 ret
= -EADDRNOTAVAIL
;
1435 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1436 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
1437 ret
= __hci_init(hdev
);
1440 clear_bit(HCI_INIT
, &hdev
->flags
);
1444 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1445 set_bit(HCI_UP
, &hdev
->flags
);
1446 hci_notify(hdev
, HCI_DEV_UP
);
1447 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
1448 !hci_dev_test_flag(hdev
, HCI_CONFIG
) &&
1449 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1450 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1451 hdev
->dev_type
== HCI_BREDR
) {
1453 mgmt_powered(hdev
, 1);
1454 hci_dev_unlock(hdev
);
1457 /* Init failed, cleanup */
1458 flush_work(&hdev
->tx_work
);
1459 flush_work(&hdev
->cmd_work
);
1460 flush_work(&hdev
->rx_work
);
1462 skb_queue_purge(&hdev
->cmd_q
);
1463 skb_queue_purge(&hdev
->rx_q
);
1468 if (hdev
->sent_cmd
) {
1469 kfree_skb(hdev
->sent_cmd
);
1470 hdev
->sent_cmd
= NULL
;
1474 hdev
->flags
&= BIT(HCI_RAW
);
1478 hci_req_unlock(hdev
);
1482 /* ---- HCI ioctl helpers ---- */
1484 int hci_dev_open(__u16 dev
)
1486 struct hci_dev
*hdev
;
1489 hdev
= hci_dev_get(dev
);
1493 /* Devices that are marked as unconfigured can only be powered
1494 * up as user channel. Trying to bring them up as normal devices
1495 * will result into a failure. Only user channel operation is
1498 * When this function is called for a user channel, the flag
1499 * HCI_USER_CHANNEL will be set first before attempting to
1502 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1503 !hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1508 /* We need to ensure that no other power on/off work is pending
1509 * before proceeding to call hci_dev_do_open. This is
1510 * particularly important if the setup procedure has not yet
1513 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1514 cancel_delayed_work(&hdev
->power_off
);
1516 /* After this call it is guaranteed that the setup procedure
1517 * has finished. This means that error conditions like RFKILL
1518 * or no valid public or static random address apply.
1520 flush_workqueue(hdev
->req_workqueue
);
1522 /* For controllers not using the management interface and that
1523 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1524 * so that pairing works for them. Once the management interface
1525 * is in use this bit will be cleared again and userspace has
1526 * to explicitly enable it.
1528 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
) &&
1529 !hci_dev_test_flag(hdev
, HCI_MGMT
))
1530 hci_dev_set_flag(hdev
, HCI_BONDABLE
);
1532 err
= hci_dev_do_open(hdev
);
1539 /* This function requires the caller holds hdev->lock */
1540 static void hci_pend_le_actions_clear(struct hci_dev
*hdev
)
1542 struct hci_conn_params
*p
;
1544 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
1546 hci_conn_drop(p
->conn
);
1547 hci_conn_put(p
->conn
);
1550 list_del_init(&p
->action
);
1553 BT_DBG("All LE pending actions cleared");
1556 static int hci_dev_do_close(struct hci_dev
*hdev
)
1558 BT_DBG("%s %p", hdev
->name
, hdev
);
1560 if (!hci_dev_test_flag(hdev
, HCI_UNREGISTER
) &&
1561 test_bit(HCI_UP
, &hdev
->flags
)) {
1562 /* Execute vendor specific shutdown routine */
1564 hdev
->shutdown(hdev
);
1567 cancel_delayed_work(&hdev
->power_off
);
1569 hci_req_cancel(hdev
, ENODEV
);
1572 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1573 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1574 hci_req_unlock(hdev
);
1578 /* Flush RX and TX works */
1579 flush_work(&hdev
->tx_work
);
1580 flush_work(&hdev
->rx_work
);
1582 if (hdev
->discov_timeout
> 0) {
1583 cancel_delayed_work(&hdev
->discov_off
);
1584 hdev
->discov_timeout
= 0;
1585 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1586 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1589 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1590 cancel_delayed_work(&hdev
->service_cache
);
1592 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1593 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
1595 if (hci_dev_test_flag(hdev
, HCI_MGMT
))
1596 cancel_delayed_work_sync(&hdev
->rpa_expired
);
1598 /* Avoid potential lockdep warnings from the *_flush() calls by
1599 * ensuring the workqueue is empty up front.
1601 drain_workqueue(hdev
->workqueue
);
1605 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
1607 if (!hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
1608 if (hdev
->dev_type
== HCI_BREDR
)
1609 mgmt_powered(hdev
, 0);
1612 hci_inquiry_cache_flush(hdev
);
1613 hci_pend_le_actions_clear(hdev
);
1614 hci_conn_hash_flush(hdev
);
1615 hci_dev_unlock(hdev
);
1617 smp_unregister(hdev
);
1619 hci_notify(hdev
, HCI_DEV_DOWN
);
1625 skb_queue_purge(&hdev
->cmd_q
);
1626 atomic_set(&hdev
->cmd_cnt
, 1);
1627 if (!hci_dev_test_flag(hdev
, HCI_AUTO_OFF
) &&
1628 !hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) &&
1629 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
1630 set_bit(HCI_INIT
, &hdev
->flags
);
1631 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1632 clear_bit(HCI_INIT
, &hdev
->flags
);
1635 /* flush cmd work */
1636 flush_work(&hdev
->cmd_work
);
1639 skb_queue_purge(&hdev
->rx_q
);
1640 skb_queue_purge(&hdev
->cmd_q
);
1641 skb_queue_purge(&hdev
->raw_q
);
1643 /* Drop last sent command */
1644 if (hdev
->sent_cmd
) {
1645 cancel_delayed_work_sync(&hdev
->cmd_timer
);
1646 kfree_skb(hdev
->sent_cmd
);
1647 hdev
->sent_cmd
= NULL
;
1650 /* After this point our queues are empty
1651 * and no tasks are scheduled. */
1655 hdev
->flags
&= BIT(HCI_RAW
);
1656 hci_dev_clear_volatile_flags(hdev
);
1658 /* Controller radio is available but is currently powered down */
1659 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
1661 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1662 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1663 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
1665 hci_req_unlock(hdev
);
1671 int hci_dev_close(__u16 dev
)
1673 struct hci_dev
*hdev
;
1676 hdev
= hci_dev_get(dev
);
1680 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1685 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
))
1686 cancel_delayed_work(&hdev
->power_off
);
1688 err
= hci_dev_do_close(hdev
);
1695 static int hci_dev_do_reset(struct hci_dev
*hdev
)
1699 BT_DBG("%s %p", hdev
->name
, hdev
);
1704 skb_queue_purge(&hdev
->rx_q
);
1705 skb_queue_purge(&hdev
->cmd_q
);
1707 /* Avoid potential lockdep warnings from the *_flush() calls by
1708 * ensuring the workqueue is empty up front.
1710 drain_workqueue(hdev
->workqueue
);
1713 hci_inquiry_cache_flush(hdev
);
1714 hci_conn_hash_flush(hdev
);
1715 hci_dev_unlock(hdev
);
1720 atomic_set(&hdev
->cmd_cnt
, 1);
1721 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1723 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
1725 hci_req_unlock(hdev
);
1729 int hci_dev_reset(__u16 dev
)
1731 struct hci_dev
*hdev
;
1734 hdev
= hci_dev_get(dev
);
1738 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
1743 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1748 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1753 err
= hci_dev_do_reset(hdev
);
1760 int hci_dev_reset_stat(__u16 dev
)
1762 struct hci_dev
*hdev
;
1765 hdev
= hci_dev_get(dev
);
1769 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1774 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1779 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1786 static void hci_update_scan_state(struct hci_dev
*hdev
, u8 scan
)
1788 bool conn_changed
, discov_changed
;
1790 BT_DBG("%s scan 0x%02x", hdev
->name
, scan
);
1792 if ((scan
& SCAN_PAGE
))
1793 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
1796 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
1799 if ((scan
& SCAN_INQUIRY
)) {
1800 discov_changed
= !hci_dev_test_and_set_flag(hdev
,
1803 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1804 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
1808 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
1811 if (conn_changed
|| discov_changed
) {
1812 /* In case this was disabled through mgmt */
1813 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
1815 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1816 mgmt_update_adv_data(hdev
);
1818 mgmt_new_settings(hdev
);
1822 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1824 struct hci_dev
*hdev
;
1825 struct hci_dev_req dr
;
1828 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1831 hdev
= hci_dev_get(dr
.dev_id
);
1835 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
1840 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
1845 if (hdev
->dev_type
!= HCI_BREDR
) {
1850 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1857 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1862 if (!lmp_encrypt_capable(hdev
)) {
1867 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1868 /* Auth must be enabled first */
1869 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1875 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1880 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1883 /* Ensure that the connectable and discoverable states
1884 * get correctly modified as this was a non-mgmt change.
1887 hci_update_scan_state(hdev
, dr
.dev_opt
);
1891 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1895 case HCISETLINKMODE
:
1896 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1897 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1901 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1905 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1906 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1910 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1911 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1924 int hci_get_dev_list(void __user
*arg
)
1926 struct hci_dev
*hdev
;
1927 struct hci_dev_list_req
*dl
;
1928 struct hci_dev_req
*dr
;
1929 int n
= 0, size
, err
;
1932 if (get_user(dev_num
, (__u16 __user
*) arg
))
1935 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1938 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1940 dl
= kzalloc(size
, GFP_KERNEL
);
1946 read_lock(&hci_dev_list_lock
);
1947 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1948 unsigned long flags
= hdev
->flags
;
1950 /* When the auto-off is configured it means the transport
1951 * is running, but in that case still indicate that the
1952 * device is actually down.
1954 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1955 flags
&= ~BIT(HCI_UP
);
1957 (dr
+ n
)->dev_id
= hdev
->id
;
1958 (dr
+ n
)->dev_opt
= flags
;
1963 read_unlock(&hci_dev_list_lock
);
1966 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1968 err
= copy_to_user(arg
, dl
, size
);
1971 return err
? -EFAULT
: 0;
1974 int hci_get_dev_info(void __user
*arg
)
1976 struct hci_dev
*hdev
;
1977 struct hci_dev_info di
;
1978 unsigned long flags
;
1981 if (copy_from_user(&di
, arg
, sizeof(di
)))
1984 hdev
= hci_dev_get(di
.dev_id
);
1988 /* When the auto-off is configured it means the transport
1989 * is running, but in that case still indicate that the
1990 * device is actually down.
1992 if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
))
1993 flags
= hdev
->flags
& ~BIT(HCI_UP
);
1995 flags
= hdev
->flags
;
1997 strcpy(di
.name
, hdev
->name
);
1998 di
.bdaddr
= hdev
->bdaddr
;
1999 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2001 di
.pkt_type
= hdev
->pkt_type
;
2002 if (lmp_bredr_capable(hdev
)) {
2003 di
.acl_mtu
= hdev
->acl_mtu
;
2004 di
.acl_pkts
= hdev
->acl_pkts
;
2005 di
.sco_mtu
= hdev
->sco_mtu
;
2006 di
.sco_pkts
= hdev
->sco_pkts
;
2008 di
.acl_mtu
= hdev
->le_mtu
;
2009 di
.acl_pkts
= hdev
->le_pkts
;
2013 di
.link_policy
= hdev
->link_policy
;
2014 di
.link_mode
= hdev
->link_mode
;
2016 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2017 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2019 if (copy_to_user(arg
, &di
, sizeof(di
)))
2027 /* ---- Interface to HCI drivers ---- */
2029 static int hci_rfkill_set_block(void *data
, bool blocked
)
2031 struct hci_dev
*hdev
= data
;
2033 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2035 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
))
2039 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
2040 if (!hci_dev_test_flag(hdev
, HCI_SETUP
) &&
2041 !hci_dev_test_flag(hdev
, HCI_CONFIG
))
2042 hci_dev_do_close(hdev
);
2044 hci_dev_clear_flag(hdev
, HCI_RFKILLED
);
2050 static const struct rfkill_ops hci_rfkill_ops
= {
2051 .set_block
= hci_rfkill_set_block
,
2054 static void hci_power_on(struct work_struct
*work
)
2056 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2059 BT_DBG("%s", hdev
->name
);
2061 err
= hci_dev_do_open(hdev
);
2064 mgmt_set_powered_failed(hdev
, err
);
2065 hci_dev_unlock(hdev
);
2069 /* During the HCI setup phase, a few error conditions are
2070 * ignored and they need to be checked now. If they are still
2071 * valid, it is important to turn the device back off.
2073 if (hci_dev_test_flag(hdev
, HCI_RFKILLED
) ||
2074 hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) ||
2075 (hdev
->dev_type
== HCI_BREDR
&&
2076 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2077 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2078 hci_dev_clear_flag(hdev
, HCI_AUTO_OFF
);
2079 hci_dev_do_close(hdev
);
2080 } else if (hci_dev_test_flag(hdev
, HCI_AUTO_OFF
)) {
2081 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2082 HCI_AUTO_OFF_TIMEOUT
);
2085 if (hci_dev_test_and_clear_flag(hdev
, HCI_SETUP
)) {
2086 /* For unconfigured devices, set the HCI_RAW flag
2087 * so that userspace can easily identify them.
2089 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2090 set_bit(HCI_RAW
, &hdev
->flags
);
2092 /* For fully configured devices, this will send
2093 * the Index Added event. For unconfigured devices,
2094 * it will send Unconfigued Index Added event.
2096 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2097 * and no event will be send.
2099 mgmt_index_added(hdev
);
2100 } else if (hci_dev_test_and_clear_flag(hdev
, HCI_CONFIG
)) {
2101 /* When the controller is now configured, then it
2102 * is important to clear the HCI_RAW flag.
2104 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
2105 clear_bit(HCI_RAW
, &hdev
->flags
);
2107 /* Powering on the controller with HCI_CONFIG set only
2108 * happens with the transition from unconfigured to
2109 * configured. This will send the Index Added event.
2111 mgmt_index_added(hdev
);
2115 static void hci_power_off(struct work_struct
*work
)
2117 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2120 BT_DBG("%s", hdev
->name
);
2122 hci_dev_do_close(hdev
);
2125 static void hci_error_reset(struct work_struct
*work
)
2127 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, error_reset
);
2129 BT_DBG("%s", hdev
->name
);
2132 hdev
->hw_error(hdev
, hdev
->hw_error_code
);
2134 BT_ERR("%s hardware error 0x%2.2x", hdev
->name
,
2135 hdev
->hw_error_code
);
2137 if (hci_dev_do_close(hdev
))
2140 hci_dev_do_open(hdev
);
2143 static void hci_discov_off(struct work_struct
*work
)
2145 struct hci_dev
*hdev
;
2147 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
2149 BT_DBG("%s", hdev
->name
);
2151 mgmt_discoverable_timeout(hdev
);
2154 void hci_uuids_clear(struct hci_dev
*hdev
)
2156 struct bt_uuid
*uuid
, *tmp
;
2158 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2159 list_del(&uuid
->list
);
2164 void hci_link_keys_clear(struct hci_dev
*hdev
)
2166 struct link_key
*key
;
2168 list_for_each_entry_rcu(key
, &hdev
->link_keys
, list
) {
2169 list_del_rcu(&key
->list
);
2170 kfree_rcu(key
, rcu
);
2174 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2178 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2179 list_del_rcu(&k
->list
);
2184 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2188 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2189 list_del_rcu(&k
->list
);
2194 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2199 list_for_each_entry_rcu(k
, &hdev
->link_keys
, list
) {
2200 if (bacmp(bdaddr
, &k
->bdaddr
) == 0) {
2210 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2211 u8 key_type
, u8 old_key_type
)
2214 if (key_type
< 0x03)
2217 /* Debug keys are insecure so don't store them persistently */
2218 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2221 /* Changed combination key and there's no previous one */
2222 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2225 /* Security mode 3 case */
2229 /* BR/EDR key derived using SC from an LE link */
2230 if (conn
->type
== LE_LINK
)
2233 /* Neither local nor remote side had no-bonding as requirement */
2234 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2237 /* Local side had dedicated bonding as requirement */
2238 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2241 /* Remote side had dedicated bonding as requirement */
2242 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2245 /* If none of the above criteria match, then don't store the key
2250 static u8
ltk_role(u8 type
)
2252 if (type
== SMP_LTK
)
2253 return HCI_ROLE_MASTER
;
2255 return HCI_ROLE_SLAVE
;
2258 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2259 u8 addr_type
, u8 role
)
2264 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2265 if (addr_type
!= k
->bdaddr_type
|| bacmp(bdaddr
, &k
->bdaddr
))
2268 if (smp_ltk_is_sc(k
) || ltk_role(k
->type
) == role
) {
2278 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2280 struct smp_irk
*irk
;
2283 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2284 if (!bacmp(&irk
->rpa
, rpa
)) {
2290 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2291 if (smp_irk_matches(hdev
, irk
->val
, rpa
)) {
2292 bacpy(&irk
->rpa
, rpa
);
2302 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2305 struct smp_irk
*irk
;
2307 /* Identity Address must be public or static random */
2308 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2312 list_for_each_entry_rcu(irk
, &hdev
->identity_resolving_keys
, list
) {
2313 if (addr_type
== irk
->addr_type
&&
2314 bacmp(bdaddr
, &irk
->bdaddr
) == 0) {
2324 struct link_key
*hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2325 bdaddr_t
*bdaddr
, u8
*val
, u8 type
,
2326 u8 pin_len
, bool *persistent
)
2328 struct link_key
*key
, *old_key
;
2331 old_key
= hci_find_link_key(hdev
, bdaddr
);
2333 old_key_type
= old_key
->type
;
2336 old_key_type
= conn
? conn
->key_type
: 0xff;
2337 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2340 list_add_rcu(&key
->list
, &hdev
->link_keys
);
2343 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2345 /* Some buggy controller combinations generate a changed
2346 * combination key for legacy pairing even when there's no
2348 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2349 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2350 type
= HCI_LK_COMBINATION
;
2352 conn
->key_type
= type
;
2355 bacpy(&key
->bdaddr
, bdaddr
);
2356 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2357 key
->pin_len
= pin_len
;
2359 if (type
== HCI_LK_CHANGED_COMBINATION
)
2360 key
->type
= old_key_type
;
2365 *persistent
= hci_persistent_key(hdev
, conn
, type
,
2371 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2372 u8 addr_type
, u8 type
, u8 authenticated
,
2373 u8 tk
[16], u8 enc_size
, __le16 ediv
, __le64 rand
)
2375 struct smp_ltk
*key
, *old_key
;
2376 u8 role
= ltk_role(type
);
2378 old_key
= hci_find_ltk(hdev
, bdaddr
, addr_type
, role
);
2382 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2385 list_add_rcu(&key
->list
, &hdev
->long_term_keys
);
2388 bacpy(&key
->bdaddr
, bdaddr
);
2389 key
->bdaddr_type
= addr_type
;
2390 memcpy(key
->val
, tk
, sizeof(key
->val
));
2391 key
->authenticated
= authenticated
;
2394 key
->enc_size
= enc_size
;
2400 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2401 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2403 struct smp_irk
*irk
;
2405 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2407 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2411 bacpy(&irk
->bdaddr
, bdaddr
);
2412 irk
->addr_type
= addr_type
;
2414 list_add_rcu(&irk
->list
, &hdev
->identity_resolving_keys
);
2417 memcpy(irk
->val
, val
, 16);
2418 bacpy(&irk
->rpa
, rpa
);
2423 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2425 struct link_key
*key
;
2427 key
= hci_find_link_key(hdev
, bdaddr
);
2431 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2433 list_del_rcu(&key
->list
);
2434 kfree_rcu(key
, rcu
);
2439 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2444 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2445 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2448 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2450 list_del_rcu(&k
->list
);
2455 return removed
? 0 : -ENOENT
;
2458 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2462 list_for_each_entry_rcu(k
, &hdev
->identity_resolving_keys
, list
) {
2463 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
2466 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2468 list_del_rcu(&k
->list
);
2473 bool hci_bdaddr_is_paired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2476 struct smp_irk
*irk
;
2479 if (type
== BDADDR_BREDR
) {
2480 if (hci_find_link_key(hdev
, bdaddr
))
2485 /* Convert to HCI addr type which struct smp_ltk uses */
2486 if (type
== BDADDR_LE_PUBLIC
)
2487 addr_type
= ADDR_LE_DEV_PUBLIC
;
2489 addr_type
= ADDR_LE_DEV_RANDOM
;
2491 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
2493 bdaddr
= &irk
->bdaddr
;
2494 addr_type
= irk
->addr_type
;
2498 list_for_each_entry_rcu(k
, &hdev
->long_term_keys
, list
) {
2499 if (k
->bdaddr_type
== addr_type
&& !bacmp(bdaddr
, &k
->bdaddr
)) {
2509 /* HCI command timer function */
2510 static void hci_cmd_timeout(struct work_struct
*work
)
2512 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2515 if (hdev
->sent_cmd
) {
2516 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2517 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2519 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2521 BT_ERR("%s command tx timeout", hdev
->name
);
2524 atomic_set(&hdev
->cmd_cnt
, 1);
2525 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2528 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2529 bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2531 struct oob_data
*data
;
2533 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
) {
2534 if (bacmp(bdaddr
, &data
->bdaddr
) != 0)
2536 if (data
->bdaddr_type
!= bdaddr_type
)
2544 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2547 struct oob_data
*data
;
2549 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2553 BT_DBG("%s removing %pMR (%u)", hdev
->name
, bdaddr
, bdaddr_type
);
2555 list_del(&data
->list
);
2561 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2563 struct oob_data
*data
, *n
;
2565 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2566 list_del(&data
->list
);
2571 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2572 u8 bdaddr_type
, u8
*hash192
, u8
*rand192
,
2573 u8
*hash256
, u8
*rand256
)
2575 struct oob_data
*data
;
2577 data
= hci_find_remote_oob_data(hdev
, bdaddr
, bdaddr_type
);
2579 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
2583 bacpy(&data
->bdaddr
, bdaddr
);
2584 data
->bdaddr_type
= bdaddr_type
;
2585 list_add(&data
->list
, &hdev
->remote_oob_data
);
2588 if (hash192
&& rand192
) {
2589 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
2590 memcpy(data
->rand192
, rand192
, sizeof(data
->rand192
));
2591 if (hash256
&& rand256
)
2592 data
->present
= 0x03;
2594 memset(data
->hash192
, 0, sizeof(data
->hash192
));
2595 memset(data
->rand192
, 0, sizeof(data
->rand192
));
2596 if (hash256
&& rand256
)
2597 data
->present
= 0x02;
2599 data
->present
= 0x00;
2602 if (hash256
&& rand256
) {
2603 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
2604 memcpy(data
->rand256
, rand256
, sizeof(data
->rand256
));
2606 memset(data
->hash256
, 0, sizeof(data
->hash256
));
2607 memset(data
->rand256
, 0, sizeof(data
->rand256
));
2608 if (hash192
&& rand192
)
2609 data
->present
= 0x01;
2612 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2617 struct bdaddr_list
*hci_bdaddr_list_lookup(struct list_head
*bdaddr_list
,
2618 bdaddr_t
*bdaddr
, u8 type
)
2620 struct bdaddr_list
*b
;
2622 list_for_each_entry(b
, bdaddr_list
, list
) {
2623 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2630 void hci_bdaddr_list_clear(struct list_head
*bdaddr_list
)
2632 struct list_head
*p
, *n
;
2634 list_for_each_safe(p
, n
, bdaddr_list
) {
2635 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
2642 int hci_bdaddr_list_add(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2644 struct bdaddr_list
*entry
;
2646 if (!bacmp(bdaddr
, BDADDR_ANY
))
2649 if (hci_bdaddr_list_lookup(list
, bdaddr
, type
))
2652 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
2656 bacpy(&entry
->bdaddr
, bdaddr
);
2657 entry
->bdaddr_type
= type
;
2659 list_add(&entry
->list
, list
);
2664 int hci_bdaddr_list_del(struct list_head
*list
, bdaddr_t
*bdaddr
, u8 type
)
2666 struct bdaddr_list
*entry
;
2668 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
2669 hci_bdaddr_list_clear(list
);
2673 entry
= hci_bdaddr_list_lookup(list
, bdaddr
, type
);
2677 list_del(&entry
->list
);
2683 /* This function requires the caller holds hdev->lock */
2684 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
2685 bdaddr_t
*addr
, u8 addr_type
)
2687 struct hci_conn_params
*params
;
2689 /* The conn params list only contains identity addresses */
2690 if (!hci_is_identity_address(addr
, addr_type
))
2693 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
2694 if (bacmp(¶ms
->addr
, addr
) == 0 &&
2695 params
->addr_type
== addr_type
) {
2703 /* This function requires the caller holds hdev->lock */
2704 struct hci_conn_params
*hci_pend_le_action_lookup(struct list_head
*list
,
2705 bdaddr_t
*addr
, u8 addr_type
)
2707 struct hci_conn_params
*param
;
2709 /* The list only contains identity addresses */
2710 if (!hci_is_identity_address(addr
, addr_type
))
2713 list_for_each_entry(param
, list
, action
) {
2714 if (bacmp(¶m
->addr
, addr
) == 0 &&
2715 param
->addr_type
== addr_type
)
2722 /* This function requires the caller holds hdev->lock */
2723 struct hci_conn_params
*hci_conn_params_add(struct hci_dev
*hdev
,
2724 bdaddr_t
*addr
, u8 addr_type
)
2726 struct hci_conn_params
*params
;
2728 if (!hci_is_identity_address(addr
, addr_type
))
2731 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2735 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
2737 BT_ERR("Out of memory");
2741 bacpy(¶ms
->addr
, addr
);
2742 params
->addr_type
= addr_type
;
2744 list_add(¶ms
->list
, &hdev
->le_conn_params
);
2745 INIT_LIST_HEAD(¶ms
->action
);
2747 params
->conn_min_interval
= hdev
->le_conn_min_interval
;
2748 params
->conn_max_interval
= hdev
->le_conn_max_interval
;
2749 params
->conn_latency
= hdev
->le_conn_latency
;
2750 params
->supervision_timeout
= hdev
->le_supv_timeout
;
2751 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2753 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2758 static void hci_conn_params_free(struct hci_conn_params
*params
)
2761 hci_conn_drop(params
->conn
);
2762 hci_conn_put(params
->conn
);
2765 list_del(¶ms
->action
);
2766 list_del(¶ms
->list
);
2770 /* This function requires the caller holds hdev->lock */
2771 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
2773 struct hci_conn_params
*params
;
2775 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
2779 hci_conn_params_free(params
);
2781 hci_update_background_scan(hdev
);
2783 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
2786 /* This function requires the caller holds hdev->lock */
2787 void hci_conn_params_clear_disabled(struct hci_dev
*hdev
)
2789 struct hci_conn_params
*params
, *tmp
;
2791 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
2792 if (params
->auto_connect
!= HCI_AUTO_CONN_DISABLED
)
2794 list_del(¶ms
->list
);
2798 BT_DBG("All LE disabled connection parameters were removed");
2801 /* This function requires the caller holds hdev->lock */
2802 void hci_conn_params_clear_all(struct hci_dev
*hdev
)
2804 struct hci_conn_params
*params
, *tmp
;
2806 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
)
2807 hci_conn_params_free(params
);
2809 hci_update_background_scan(hdev
);
2811 BT_DBG("All LE connection parameters were removed");
2814 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2817 BT_ERR("Failed to start inquiry: status %d", status
);
2820 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2821 hci_dev_unlock(hdev
);
2826 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
,
2829 /* General inquiry access code (GIAC) */
2830 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2831 struct hci_cp_inquiry cp
;
2835 BT_ERR("Failed to disable LE scanning: status %d", status
);
2839 hdev
->discovery
.scan_start
= 0;
2841 switch (hdev
->discovery
.type
) {
2842 case DISCOV_TYPE_LE
:
2844 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2845 hci_dev_unlock(hdev
);
2848 case DISCOV_TYPE_INTERLEAVED
:
2851 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
,
2853 /* If we were running LE only scan, change discovery
2854 * state. If we were running both LE and BR/EDR inquiry
2855 * simultaneously, and BR/EDR inquiry is already
2856 * finished, stop discovery, otherwise BR/EDR inquiry
2857 * will stop discovery when finished. If we will resolve
2858 * remote device name, do not change discovery state.
2860 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
) &&
2861 hdev
->discovery
.state
!= DISCOVERY_RESOLVING
)
2862 hci_discovery_set_state(hdev
,
2865 struct hci_request req
;
2867 hci_inquiry_cache_flush(hdev
);
2869 hci_req_init(&req
, hdev
);
2871 memset(&cp
, 0, sizeof(cp
));
2872 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2873 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
2874 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2876 err
= hci_req_run(&req
, inquiry_complete
);
2878 BT_ERR("Inquiry request failed: err %d", err
);
2879 hci_discovery_set_state(hdev
,
2884 hci_dev_unlock(hdev
);
2889 static void le_scan_disable_work(struct work_struct
*work
)
2891 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2892 le_scan_disable
.work
);
2893 struct hci_request req
;
2896 BT_DBG("%s", hdev
->name
);
2898 cancel_delayed_work_sync(&hdev
->le_scan_restart
);
2900 hci_req_init(&req
, hdev
);
2902 hci_req_add_le_scan_disable(&req
);
2904 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
2906 BT_ERR("Disable LE scanning request failed: err %d", err
);
2909 static void le_scan_restart_work_complete(struct hci_dev
*hdev
, u8 status
,
2912 unsigned long timeout
, duration
, scan_start
, now
;
2914 BT_DBG("%s", hdev
->name
);
2917 BT_ERR("Failed to restart LE scan: status %d", status
);
2921 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
) ||
2922 !hdev
->discovery
.scan_start
)
2925 /* When the scan was started, hdev->le_scan_disable has been queued
2926 * after duration from scan_start. During scan restart this job
2927 * has been canceled, and we need to queue it again after proper
2928 * timeout, to make sure that scan does not run indefinitely.
2930 duration
= hdev
->discovery
.scan_duration
;
2931 scan_start
= hdev
->discovery
.scan_start
;
2933 if (now
- scan_start
<= duration
) {
2936 if (now
>= scan_start
)
2937 elapsed
= now
- scan_start
;
2939 elapsed
= ULONG_MAX
- scan_start
+ now
;
2941 timeout
= duration
- elapsed
;
2945 queue_delayed_work(hdev
->workqueue
,
2946 &hdev
->le_scan_disable
, timeout
);
2949 static void le_scan_restart_work(struct work_struct
*work
)
2951 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2952 le_scan_restart
.work
);
2953 struct hci_request req
;
2954 struct hci_cp_le_set_scan_enable cp
;
2957 BT_DBG("%s", hdev
->name
);
2959 /* If controller is not scanning we are done. */
2960 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
2963 hci_req_init(&req
, hdev
);
2965 hci_req_add_le_scan_disable(&req
);
2967 memset(&cp
, 0, sizeof(cp
));
2968 cp
.enable
= LE_SCAN_ENABLE
;
2969 cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
2970 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
2972 err
= hci_req_run(&req
, le_scan_restart_work_complete
);
2974 BT_ERR("Restart LE scan request failed: err %d", err
);
2977 /* Copy the Identity Address of the controller.
2979 * If the controller has a public BD_ADDR, then by default use that one.
2980 * If this is a LE only controller without a public address, default to
2981 * the static random address.
2983 * For debugging purposes it is possible to force controllers with a
2984 * public address to use the static random address instead.
2986 * In case BR/EDR has been disabled on a dual-mode controller and
2987 * userspace has configured a static address, then that address
2988 * becomes the identity address instead of the public BR/EDR address.
2990 void hci_copy_identity_address(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2993 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
2994 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
2995 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
2996 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2997 bacpy(bdaddr
, &hdev
->static_addr
);
2998 *bdaddr_type
= ADDR_LE_DEV_RANDOM
;
3000 bacpy(bdaddr
, &hdev
->bdaddr
);
3001 *bdaddr_type
= ADDR_LE_DEV_PUBLIC
;
3005 /* Alloc HCI device */
3006 struct hci_dev
*hci_alloc_dev(void)
3008 struct hci_dev
*hdev
;
3010 hdev
= kzalloc(sizeof(*hdev
), GFP_KERNEL
);
3014 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
3015 hdev
->esco_type
= (ESCO_HV1
);
3016 hdev
->link_mode
= (HCI_LM_ACCEPT
);
3017 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
3018 hdev
->io_capability
= 0x03; /* No Input No Output */
3019 hdev
->manufacturer
= 0xffff; /* Default to internal use */
3020 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
3021 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
3023 hdev
->sniff_max_interval
= 800;
3024 hdev
->sniff_min_interval
= 80;
3026 hdev
->le_adv_channel_map
= 0x07;
3027 hdev
->le_adv_min_interval
= 0x0800;
3028 hdev
->le_adv_max_interval
= 0x0800;
3029 hdev
->le_scan_interval
= 0x0060;
3030 hdev
->le_scan_window
= 0x0030;
3031 hdev
->le_conn_min_interval
= 0x0028;
3032 hdev
->le_conn_max_interval
= 0x0038;
3033 hdev
->le_conn_latency
= 0x0000;
3034 hdev
->le_supv_timeout
= 0x002a;
3035 hdev
->le_def_tx_len
= 0x001b;
3036 hdev
->le_def_tx_time
= 0x0148;
3037 hdev
->le_max_tx_len
= 0x001b;
3038 hdev
->le_max_tx_time
= 0x0148;
3039 hdev
->le_max_rx_len
= 0x001b;
3040 hdev
->le_max_rx_time
= 0x0148;
3042 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
3043 hdev
->discov_interleaved_timeout
= DISCOV_INTERLEAVED_TIMEOUT
;
3044 hdev
->conn_info_min_age
= DEFAULT_CONN_INFO_MIN_AGE
;
3045 hdev
->conn_info_max_age
= DEFAULT_CONN_INFO_MAX_AGE
;
3047 mutex_init(&hdev
->lock
);
3048 mutex_init(&hdev
->req_lock
);
3050 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
3051 INIT_LIST_HEAD(&hdev
->blacklist
);
3052 INIT_LIST_HEAD(&hdev
->whitelist
);
3053 INIT_LIST_HEAD(&hdev
->uuids
);
3054 INIT_LIST_HEAD(&hdev
->link_keys
);
3055 INIT_LIST_HEAD(&hdev
->long_term_keys
);
3056 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
3057 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
3058 INIT_LIST_HEAD(&hdev
->le_white_list
);
3059 INIT_LIST_HEAD(&hdev
->le_conn_params
);
3060 INIT_LIST_HEAD(&hdev
->pend_le_conns
);
3061 INIT_LIST_HEAD(&hdev
->pend_le_reports
);
3062 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
3064 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
3065 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
3066 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
3067 INIT_WORK(&hdev
->power_on
, hci_power_on
);
3068 INIT_WORK(&hdev
->error_reset
, hci_error_reset
);
3070 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
3071 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
3072 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
3073 INIT_DELAYED_WORK(&hdev
->le_scan_restart
, le_scan_restart_work
);
3075 skb_queue_head_init(&hdev
->rx_q
);
3076 skb_queue_head_init(&hdev
->cmd_q
);
3077 skb_queue_head_init(&hdev
->raw_q
);
3079 init_waitqueue_head(&hdev
->req_wait_q
);
3081 INIT_DELAYED_WORK(&hdev
->cmd_timer
, hci_cmd_timeout
);
3083 hci_init_sysfs(hdev
);
3084 discovery_init(hdev
);
3085 adv_info_init(hdev
);
3089 EXPORT_SYMBOL(hci_alloc_dev
);
3091 /* Free HCI device */
3092 void hci_free_dev(struct hci_dev
*hdev
)
3094 /* will free via device release */
3095 put_device(&hdev
->dev
);
3097 EXPORT_SYMBOL(hci_free_dev
);
3099 /* Register HCI device */
3100 int hci_register_dev(struct hci_dev
*hdev
)
3104 if (!hdev
->open
|| !hdev
->close
|| !hdev
->send
)
3107 /* Do not allow HCI_AMP devices to register at index 0,
3108 * so the index can be used as the AMP controller ID.
3110 switch (hdev
->dev_type
) {
3112 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3115 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3124 sprintf(hdev
->name
, "hci%d", id
);
3127 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3129 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3130 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3131 if (!hdev
->workqueue
) {
3136 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3137 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3138 if (!hdev
->req_workqueue
) {
3139 destroy_workqueue(hdev
->workqueue
);
3144 if (!IS_ERR_OR_NULL(bt_debugfs
))
3145 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3147 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3149 error
= device_add(&hdev
->dev
);
3153 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3154 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3157 if (rfkill_register(hdev
->rfkill
) < 0) {
3158 rfkill_destroy(hdev
->rfkill
);
3159 hdev
->rfkill
= NULL
;
3163 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3164 hci_dev_set_flag(hdev
, HCI_RFKILLED
);
3166 hci_dev_set_flag(hdev
, HCI_SETUP
);
3167 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
3169 if (hdev
->dev_type
== HCI_BREDR
) {
3170 /* Assume BR/EDR support until proven otherwise (such as
3171 * through reading supported features during init.
3173 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
3176 write_lock(&hci_dev_list_lock
);
3177 list_add(&hdev
->list
, &hci_dev_list
);
3178 write_unlock(&hci_dev_list_lock
);
3180 /* Devices that are marked for raw-only usage are unconfigured
3181 * and should not be included in normal operation.
3183 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
3184 hci_dev_set_flag(hdev
, HCI_UNCONFIGURED
);
3186 hci_notify(hdev
, HCI_DEV_REG
);
3189 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3194 destroy_workqueue(hdev
->workqueue
);
3195 destroy_workqueue(hdev
->req_workqueue
);
3197 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3201 EXPORT_SYMBOL(hci_register_dev
);
3203 /* Unregister HCI device */
3204 void hci_unregister_dev(struct hci_dev
*hdev
)
3208 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3210 hci_dev_set_flag(hdev
, HCI_UNREGISTER
);
3214 write_lock(&hci_dev_list_lock
);
3215 list_del(&hdev
->list
);
3216 write_unlock(&hci_dev_list_lock
);
3218 hci_dev_do_close(hdev
);
3220 cancel_work_sync(&hdev
->power_on
);
3222 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3223 !hci_dev_test_flag(hdev
, HCI_SETUP
) &&
3224 !hci_dev_test_flag(hdev
, HCI_CONFIG
)) {
3226 mgmt_index_removed(hdev
);
3227 hci_dev_unlock(hdev
);
3230 /* mgmt_index_removed should take care of emptying the
3232 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3234 hci_notify(hdev
, HCI_DEV_UNREG
);
3237 rfkill_unregister(hdev
->rfkill
);
3238 rfkill_destroy(hdev
->rfkill
);
3241 device_del(&hdev
->dev
);
3243 debugfs_remove_recursive(hdev
->debugfs
);
3245 destroy_workqueue(hdev
->workqueue
);
3246 destroy_workqueue(hdev
->req_workqueue
);
3249 hci_bdaddr_list_clear(&hdev
->blacklist
);
3250 hci_bdaddr_list_clear(&hdev
->whitelist
);
3251 hci_uuids_clear(hdev
);
3252 hci_link_keys_clear(hdev
);
3253 hci_smp_ltks_clear(hdev
);
3254 hci_smp_irks_clear(hdev
);
3255 hci_remote_oob_data_clear(hdev
);
3256 hci_bdaddr_list_clear(&hdev
->le_white_list
);
3257 hci_conn_params_clear_all(hdev
);
3258 hci_discovery_filter_clear(hdev
);
3259 hci_dev_unlock(hdev
);
3263 ida_simple_remove(&hci_index_ida
, id
);
3265 EXPORT_SYMBOL(hci_unregister_dev
);
3267 /* Suspend HCI device */
3268 int hci_suspend_dev(struct hci_dev
*hdev
)
3270 hci_notify(hdev
, HCI_DEV_SUSPEND
);
3273 EXPORT_SYMBOL(hci_suspend_dev
);
3275 /* Resume HCI device */
3276 int hci_resume_dev(struct hci_dev
*hdev
)
3278 hci_notify(hdev
, HCI_DEV_RESUME
);
3281 EXPORT_SYMBOL(hci_resume_dev
);
3283 /* Reset HCI device */
3284 int hci_reset_dev(struct hci_dev
*hdev
)
3286 const u8 hw_err
[] = { HCI_EV_HARDWARE_ERROR
, 0x01, 0x00 };
3287 struct sk_buff
*skb
;
3289 skb
= bt_skb_alloc(3, GFP_ATOMIC
);
3293 bt_cb(skb
)->pkt_type
= HCI_EVENT_PKT
;
3294 memcpy(skb_put(skb
, 3), hw_err
, 3);
3296 /* Send Hardware Error to upper stack */
3297 return hci_recv_frame(hdev
, skb
);
3299 EXPORT_SYMBOL(hci_reset_dev
);
3301 /* Receive frame from HCI drivers */
3302 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3304 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3305 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3311 bt_cb(skb
)->incoming
= 1;
3314 __net_timestamp(skb
);
3316 skb_queue_tail(&hdev
->rx_q
, skb
);
3317 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3321 EXPORT_SYMBOL(hci_recv_frame
);
3323 /* ---- Interface to upper protocols ---- */
3325 int hci_register_cb(struct hci_cb
*cb
)
3327 BT_DBG("%p name %s", cb
, cb
->name
);
3329 mutex_lock(&hci_cb_list_lock
);
3330 list_add_tail(&cb
->list
, &hci_cb_list
);
3331 mutex_unlock(&hci_cb_list_lock
);
3335 EXPORT_SYMBOL(hci_register_cb
);
3337 int hci_unregister_cb(struct hci_cb
*cb
)
3339 BT_DBG("%p name %s", cb
, cb
->name
);
3341 mutex_lock(&hci_cb_list_lock
);
3342 list_del(&cb
->list
);
3343 mutex_unlock(&hci_cb_list_lock
);
3347 EXPORT_SYMBOL(hci_unregister_cb
);
3349 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3353 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
3356 __net_timestamp(skb
);
3358 /* Send copy to monitor */
3359 hci_send_to_monitor(hdev
, skb
);
3361 if (atomic_read(&hdev
->promisc
)) {
3362 /* Send copy to the sockets */
3363 hci_send_to_sock(hdev
, skb
);
3366 /* Get rid of skb owner, prior to sending to the driver. */
3369 err
= hdev
->send(hdev
, skb
);
3371 BT_ERR("%s sending frame failed (%d)", hdev
->name
, err
);
3376 /* Send HCI command */
3377 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3380 struct sk_buff
*skb
;
3382 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3384 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3386 BT_ERR("%s no memory for command", hdev
->name
);
3390 /* Stand-alone HCI commands must be flagged as
3391 * single-command requests.
3393 bt_cb(skb
)->req
.start
= true;
3395 skb_queue_tail(&hdev
->cmd_q
, skb
);
3396 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3401 /* Get data from the previously sent command */
3402 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3404 struct hci_command_hdr
*hdr
;
3406 if (!hdev
->sent_cmd
)
3409 hdr
= (void *) hdev
->sent_cmd
->data
;
3411 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3414 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3416 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3420 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3422 struct hci_acl_hdr
*hdr
;
3425 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3426 skb_reset_transport_header(skb
);
3427 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3428 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3429 hdr
->dlen
= cpu_to_le16(len
);
3432 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3433 struct sk_buff
*skb
, __u16 flags
)
3435 struct hci_conn
*conn
= chan
->conn
;
3436 struct hci_dev
*hdev
= conn
->hdev
;
3437 struct sk_buff
*list
;
3439 skb
->len
= skb_headlen(skb
);
3442 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3444 switch (hdev
->dev_type
) {
3446 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3449 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3452 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3456 list
= skb_shinfo(skb
)->frag_list
;
3458 /* Non fragmented */
3459 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3461 skb_queue_tail(queue
, skb
);
3464 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3466 skb_shinfo(skb
)->frag_list
= NULL
;
3468 /* Queue all fragments atomically. We need to use spin_lock_bh
3469 * here because of 6LoWPAN links, as there this function is
3470 * called from softirq and using normal spin lock could cause
3473 spin_lock_bh(&queue
->lock
);
3475 __skb_queue_tail(queue
, skb
);
3477 flags
&= ~ACL_START
;
3480 skb
= list
; list
= list
->next
;
3482 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3483 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3485 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3487 __skb_queue_tail(queue
, skb
);
3490 spin_unlock_bh(&queue
->lock
);
3494 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3496 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3498 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3500 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3502 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3506 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3508 struct hci_dev
*hdev
= conn
->hdev
;
3509 struct hci_sco_hdr hdr
;
3511 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3513 hdr
.handle
= cpu_to_le16(conn
->handle
);
3514 hdr
.dlen
= skb
->len
;
3516 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3517 skb_reset_transport_header(skb
);
3518 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3520 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
3522 skb_queue_tail(&conn
->data_q
, skb
);
3523 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3526 /* ---- HCI TX task (outgoing data) ---- */
3528 /* HCI Connection scheduler */
3529 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3532 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3533 struct hci_conn
*conn
= NULL
, *c
;
3534 unsigned int num
= 0, min
= ~0;
3536 /* We don't have to lock device here. Connections are always
3537 * added and removed with TX task disabled. */
3541 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3542 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3545 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3550 if (c
->sent
< min
) {
3555 if (hci_conn_num(hdev
, type
) == num
)
3564 switch (conn
->type
) {
3566 cnt
= hdev
->acl_cnt
;
3570 cnt
= hdev
->sco_cnt
;
3573 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3577 BT_ERR("Unknown link type");
3585 BT_DBG("conn %p quote %d", conn
, *quote
);
3589 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3591 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3594 BT_ERR("%s link tx timeout", hdev
->name
);
3598 /* Kill stalled connections */
3599 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3600 if (c
->type
== type
&& c
->sent
) {
3601 BT_ERR("%s killing stalled connection %pMR",
3602 hdev
->name
, &c
->dst
);
3603 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3610 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3613 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3614 struct hci_chan
*chan
= NULL
;
3615 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3616 struct hci_conn
*conn
;
3617 int cnt
, q
, conn_num
= 0;
3619 BT_DBG("%s", hdev
->name
);
3623 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3624 struct hci_chan
*tmp
;
3626 if (conn
->type
!= type
)
3629 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3634 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3635 struct sk_buff
*skb
;
3637 if (skb_queue_empty(&tmp
->data_q
))
3640 skb
= skb_peek(&tmp
->data_q
);
3641 if (skb
->priority
< cur_prio
)
3644 if (skb
->priority
> cur_prio
) {
3647 cur_prio
= skb
->priority
;
3652 if (conn
->sent
< min
) {
3658 if (hci_conn_num(hdev
, type
) == conn_num
)
3667 switch (chan
->conn
->type
) {
3669 cnt
= hdev
->acl_cnt
;
3672 cnt
= hdev
->block_cnt
;
3676 cnt
= hdev
->sco_cnt
;
3679 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3683 BT_ERR("Unknown link type");
3688 BT_DBG("chan %p quote %d", chan
, *quote
);
3692 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3694 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3695 struct hci_conn
*conn
;
3698 BT_DBG("%s", hdev
->name
);
3702 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3703 struct hci_chan
*chan
;
3705 if (conn
->type
!= type
)
3708 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3713 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3714 struct sk_buff
*skb
;
3721 if (skb_queue_empty(&chan
->data_q
))
3724 skb
= skb_peek(&chan
->data_q
);
3725 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3728 skb
->priority
= HCI_PRIO_MAX
- 1;
3730 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3734 if (hci_conn_num(hdev
, type
) == num
)
3742 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3744 /* Calculate count of blocks used by this packet */
3745 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3748 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3750 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3751 /* ACL tx timeout must be longer than maximum
3752 * link supervision timeout (40.9 seconds) */
3753 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3754 HCI_ACL_TX_TIMEOUT
))
3755 hci_link_tx_to(hdev
, ACL_LINK
);
3759 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3761 unsigned int cnt
= hdev
->acl_cnt
;
3762 struct hci_chan
*chan
;
3763 struct sk_buff
*skb
;
3766 __check_timeout(hdev
, cnt
);
3768 while (hdev
->acl_cnt
&&
3769 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3770 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3771 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3772 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3773 skb
->len
, skb
->priority
);
3775 /* Stop if priority has changed */
3776 if (skb
->priority
< priority
)
3779 skb
= skb_dequeue(&chan
->data_q
);
3781 hci_conn_enter_active_mode(chan
->conn
,
3782 bt_cb(skb
)->force_active
);
3784 hci_send_frame(hdev
, skb
);
3785 hdev
->acl_last_tx
= jiffies
;
3793 if (cnt
!= hdev
->acl_cnt
)
3794 hci_prio_recalculate(hdev
, ACL_LINK
);
3797 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3799 unsigned int cnt
= hdev
->block_cnt
;
3800 struct hci_chan
*chan
;
3801 struct sk_buff
*skb
;
3805 __check_timeout(hdev
, cnt
);
3807 BT_DBG("%s", hdev
->name
);
3809 if (hdev
->dev_type
== HCI_AMP
)
3814 while (hdev
->block_cnt
> 0 &&
3815 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3816 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3817 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3820 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3821 skb
->len
, skb
->priority
);
3823 /* Stop if priority has changed */
3824 if (skb
->priority
< priority
)
3827 skb
= skb_dequeue(&chan
->data_q
);
3829 blocks
= __get_blocks(hdev
, skb
);
3830 if (blocks
> hdev
->block_cnt
)
3833 hci_conn_enter_active_mode(chan
->conn
,
3834 bt_cb(skb
)->force_active
);
3836 hci_send_frame(hdev
, skb
);
3837 hdev
->acl_last_tx
= jiffies
;
3839 hdev
->block_cnt
-= blocks
;
3842 chan
->sent
+= blocks
;
3843 chan
->conn
->sent
+= blocks
;
3847 if (cnt
!= hdev
->block_cnt
)
3848 hci_prio_recalculate(hdev
, type
);
3851 static void hci_sched_acl(struct hci_dev
*hdev
)
3853 BT_DBG("%s", hdev
->name
);
3855 /* No ACL link over BR/EDR controller */
3856 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
3859 /* No AMP link over AMP controller */
3860 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3863 switch (hdev
->flow_ctl_mode
) {
3864 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3865 hci_sched_acl_pkt(hdev
);
3868 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3869 hci_sched_acl_blk(hdev
);
3875 static void hci_sched_sco(struct hci_dev
*hdev
)
3877 struct hci_conn
*conn
;
3878 struct sk_buff
*skb
;
3881 BT_DBG("%s", hdev
->name
);
3883 if (!hci_conn_num(hdev
, SCO_LINK
))
3886 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3887 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3888 BT_DBG("skb %p len %d", skb
, skb
->len
);
3889 hci_send_frame(hdev
, skb
);
3892 if (conn
->sent
== ~0)
3898 static void hci_sched_esco(struct hci_dev
*hdev
)
3900 struct hci_conn
*conn
;
3901 struct sk_buff
*skb
;
3904 BT_DBG("%s", hdev
->name
);
3906 if (!hci_conn_num(hdev
, ESCO_LINK
))
3909 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3911 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3912 BT_DBG("skb %p len %d", skb
, skb
->len
);
3913 hci_send_frame(hdev
, skb
);
3916 if (conn
->sent
== ~0)
3922 static void hci_sched_le(struct hci_dev
*hdev
)
3924 struct hci_chan
*chan
;
3925 struct sk_buff
*skb
;
3926 int quote
, cnt
, tmp
;
3928 BT_DBG("%s", hdev
->name
);
3930 if (!hci_conn_num(hdev
, LE_LINK
))
3933 if (!hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
3934 /* LE tx timeout must be longer than maximum
3935 * link supervision timeout (40.9 seconds) */
3936 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3937 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3938 hci_link_tx_to(hdev
, LE_LINK
);
3941 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3943 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3944 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3945 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3946 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3947 skb
->len
, skb
->priority
);
3949 /* Stop if priority has changed */
3950 if (skb
->priority
< priority
)
3953 skb
= skb_dequeue(&chan
->data_q
);
3955 hci_send_frame(hdev
, skb
);
3956 hdev
->le_last_tx
= jiffies
;
3967 hdev
->acl_cnt
= cnt
;
3970 hci_prio_recalculate(hdev
, LE_LINK
);
3973 static void hci_tx_work(struct work_struct
*work
)
3975 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3976 struct sk_buff
*skb
;
3978 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3979 hdev
->sco_cnt
, hdev
->le_cnt
);
3981 if (!hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
3982 /* Schedule queues and send stuff to HCI driver */
3983 hci_sched_acl(hdev
);
3984 hci_sched_sco(hdev
);
3985 hci_sched_esco(hdev
);
3989 /* Send next queued raw (unknown type) packet */
3990 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3991 hci_send_frame(hdev
, skb
);
3994 /* ----- HCI RX task (incoming data processing) ----- */
3996 /* ACL data packet */
3997 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3999 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4000 struct hci_conn
*conn
;
4001 __u16 handle
, flags
;
4003 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4005 handle
= __le16_to_cpu(hdr
->handle
);
4006 flags
= hci_flags(handle
);
4007 handle
= hci_handle(handle
);
4009 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4012 hdev
->stat
.acl_rx
++;
4015 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4016 hci_dev_unlock(hdev
);
4019 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4021 /* Send to upper protocol */
4022 l2cap_recv_acldata(conn
, skb
, flags
);
4025 BT_ERR("%s ACL packet for unknown connection handle %d",
4026 hdev
->name
, handle
);
4032 /* SCO data packet */
4033 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4035 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4036 struct hci_conn
*conn
;
4039 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4041 handle
= __le16_to_cpu(hdr
->handle
);
4043 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4045 hdev
->stat
.sco_rx
++;
4048 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4049 hci_dev_unlock(hdev
);
4052 /* Send to upper protocol */
4053 sco_recv_scodata(conn
, skb
);
4056 BT_ERR("%s SCO packet for unknown connection handle %d",
4057 hdev
->name
, handle
);
4063 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4065 struct sk_buff
*skb
;
4067 skb
= skb_peek(&hdev
->cmd_q
);
4071 return bt_cb(skb
)->req
.start
;
4074 static void hci_resend_last(struct hci_dev
*hdev
)
4076 struct hci_command_hdr
*sent
;
4077 struct sk_buff
*skb
;
4080 if (!hdev
->sent_cmd
)
4083 sent
= (void *) hdev
->sent_cmd
->data
;
4084 opcode
= __le16_to_cpu(sent
->opcode
);
4085 if (opcode
== HCI_OP_RESET
)
4088 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4092 skb_queue_head(&hdev
->cmd_q
, skb
);
4093 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4096 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
,
4097 hci_req_complete_t
*req_complete
,
4098 hci_req_complete_skb_t
*req_complete_skb
)
4100 struct sk_buff
*skb
;
4101 unsigned long flags
;
4103 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4105 /* If the completed command doesn't match the last one that was
4106 * sent we need to do special handling of it.
4108 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4109 /* Some CSR based controllers generate a spontaneous
4110 * reset complete event during init and any pending
4111 * command will never be completed. In such a case we
4112 * need to resend whatever was the last sent
4115 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4116 hci_resend_last(hdev
);
4121 /* If the command succeeded and there's still more commands in
4122 * this request the request is not yet complete.
4124 if (!status
&& !hci_req_is_complete(hdev
))
4127 /* If this was the last command in a request the complete
4128 * callback would be found in hdev->sent_cmd instead of the
4129 * command queue (hdev->cmd_q).
4131 if (bt_cb(hdev
->sent_cmd
)->req
.complete
) {
4132 *req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
4136 if (bt_cb(hdev
->sent_cmd
)->req
.complete_skb
) {
4137 *req_complete_skb
= bt_cb(hdev
->sent_cmd
)->req
.complete_skb
;
4141 /* Remove all pending commands belonging to this request */
4142 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4143 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4144 if (bt_cb(skb
)->req
.start
) {
4145 __skb_queue_head(&hdev
->cmd_q
, skb
);
4149 *req_complete
= bt_cb(skb
)->req
.complete
;
4150 *req_complete_skb
= bt_cb(skb
)->req
.complete_skb
;
4153 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4156 static void hci_rx_work(struct work_struct
*work
)
4158 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4159 struct sk_buff
*skb
;
4161 BT_DBG("%s", hdev
->name
);
4163 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4164 /* Send copy to monitor */
4165 hci_send_to_monitor(hdev
, skb
);
4167 if (atomic_read(&hdev
->promisc
)) {
4168 /* Send copy to the sockets */
4169 hci_send_to_sock(hdev
, skb
);
4172 if (hci_dev_test_flag(hdev
, HCI_USER_CHANNEL
)) {
4177 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4178 /* Don't process data packets in this states. */
4179 switch (bt_cb(skb
)->pkt_type
) {
4180 case HCI_ACLDATA_PKT
:
4181 case HCI_SCODATA_PKT
:
4188 switch (bt_cb(skb
)->pkt_type
) {
4190 BT_DBG("%s Event packet", hdev
->name
);
4191 hci_event_packet(hdev
, skb
);
4194 case HCI_ACLDATA_PKT
:
4195 BT_DBG("%s ACL data packet", hdev
->name
);
4196 hci_acldata_packet(hdev
, skb
);
4199 case HCI_SCODATA_PKT
:
4200 BT_DBG("%s SCO data packet", hdev
->name
);
4201 hci_scodata_packet(hdev
, skb
);
4211 static void hci_cmd_work(struct work_struct
*work
)
4213 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4214 struct sk_buff
*skb
;
4216 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4217 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4219 /* Send queued commands */
4220 if (atomic_read(&hdev
->cmd_cnt
)) {
4221 skb
= skb_dequeue(&hdev
->cmd_q
);
4225 kfree_skb(hdev
->sent_cmd
);
4227 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4228 if (hdev
->sent_cmd
) {
4229 atomic_dec(&hdev
->cmd_cnt
);
4230 hci_send_frame(hdev
, skb
);
4231 if (test_bit(HCI_RESET
, &hdev
->flags
))
4232 cancel_delayed_work(&hdev
->cmd_timer
);
4234 schedule_delayed_work(&hdev
->cmd_timer
,
4237 skb_queue_head(&hdev
->cmd_q
, skb
);
4238 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);