2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
62 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
64 if (hdev
->req_status
== HCI_REQ_PEND
) {
65 hdev
->req_result
= result
;
66 hdev
->req_status
= HCI_REQ_DONE
;
67 wake_up_interruptible(&hdev
->req_wait_q
);
71 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
73 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
75 if (hdev
->req_status
== HCI_REQ_PEND
) {
76 hdev
->req_result
= err
;
77 hdev
->req_status
= HCI_REQ_CANCELED
;
78 wake_up_interruptible(&hdev
->req_wait_q
);
82 static struct sk_buff
*hci_get_cmd_complete(struct hci_dev
*hdev
, u16 opcode
,
85 struct hci_ev_cmd_complete
*ev
;
86 struct hci_event_hdr
*hdr
;
92 hdev
->recv_evt
= NULL
;
97 return ERR_PTR(-ENODATA
);
99 if (skb
->len
< sizeof(*hdr
)) {
100 BT_ERR("Too short HCI event");
104 hdr
= (void *) skb
->data
;
105 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
108 if (hdr
->evt
!= event
)
113 if (hdr
->evt
!= HCI_EV_CMD_COMPLETE
) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr
->evt
);
118 if (skb
->len
< sizeof(*ev
)) {
119 BT_ERR("Too short cmd_complete event");
123 ev
= (void *) skb
->data
;
124 skb_pull(skb
, sizeof(*ev
));
126 if (opcode
== __le16_to_cpu(ev
->opcode
))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode
,
130 __le16_to_cpu(ev
->opcode
));
134 return ERR_PTR(-ENODATA
);
137 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
138 const void *param
, u8 event
, u32 timeout
)
140 DECLARE_WAITQUEUE(wait
, current
);
141 struct hci_request req
;
144 BT_DBG("%s", hdev
->name
);
146 hci_req_init(&req
, hdev
);
148 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
150 hdev
->req_status
= HCI_REQ_PEND
;
152 err
= hci_req_run(&req
, hci_req_sync_complete
);
156 add_wait_queue(&hdev
->req_wait_q
, &wait
);
157 set_current_state(TASK_INTERRUPTIBLE
);
159 schedule_timeout(timeout
);
161 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
163 if (signal_pending(current
))
164 return ERR_PTR(-EINTR
);
166 switch (hdev
->req_status
) {
168 err
= -bt_to_errno(hdev
->req_result
);
171 case HCI_REQ_CANCELED
:
172 err
= -hdev
->req_result
;
180 hdev
->req_status
= hdev
->req_result
= 0;
182 BT_DBG("%s end: err %d", hdev
->name
, err
);
187 return hci_get_cmd_complete(hdev
, opcode
, event
);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
191 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
192 const void *param
, u32 timeout
)
194 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
196 EXPORT_SYMBOL(__hci_cmd_sync
);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev
*hdev
,
200 void (*func
)(struct hci_request
*req
,
202 unsigned long opt
, __u32 timeout
)
204 struct hci_request req
;
205 DECLARE_WAITQUEUE(wait
, current
);
208 BT_DBG("%s start", hdev
->name
);
210 hci_req_init(&req
, hdev
);
212 hdev
->req_status
= HCI_REQ_PEND
;
216 err
= hci_req_run(&req
, hci_req_sync_complete
);
218 hdev
->req_status
= 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev
->req_wait_q
, &wait
);
232 set_current_state(TASK_INTERRUPTIBLE
);
234 schedule_timeout(timeout
);
236 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
238 if (signal_pending(current
))
241 switch (hdev
->req_status
) {
243 err
= -bt_to_errno(hdev
->req_result
);
246 case HCI_REQ_CANCELED
:
247 err
= -hdev
->req_result
;
255 hdev
->req_status
= hdev
->req_result
= 0;
257 BT_DBG("%s end: err %d", hdev
->name
, err
);
262 static int hci_req_sync(struct hci_dev
*hdev
,
263 void (*req
)(struct hci_request
*req
,
265 unsigned long opt
, __u32 timeout
)
269 if (!test_bit(HCI_UP
, &hdev
->flags
))
272 /* Serialize all requests */
274 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
275 hci_req_unlock(hdev
);
280 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
282 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
285 set_bit(HCI_RESET
, &req
->hdev
->flags
);
286 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
289 static void bredr_init(struct hci_request
*req
)
291 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
293 /* Read Local Supported Features */
294 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
296 /* Read Local Version */
297 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
299 /* Read BD Address */
300 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
303 static void amp_init(struct hci_request
*req
)
305 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
307 /* Read Local Version */
308 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
310 /* Read Local AMP Info */
311 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
313 /* Read Data Blk size */
314 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
317 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
319 struct hci_dev
*hdev
= req
->hdev
;
321 BT_DBG("%s %ld", hdev
->name
, opt
);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
325 hci_reset_req(req
, 0);
327 switch (hdev
->dev_type
) {
337 BT_ERR("Unknown device type %d", hdev
->dev_type
);
342 static void bredr_setup(struct hci_request
*req
)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
350 /* Read Class of Device */
351 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
353 /* Read Local Name */
354 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
356 /* Read Voice Setting */
357 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
359 /* Clear Event Filters */
360 flt_type
= HCI_FLT_CLEAR_ALL
;
361 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
363 /* Connection accept timeout ~20 secs */
364 param
= __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
367 /* Read page scan parameters */
368 if (req
->hdev
->hci_ver
> BLUETOOTH_VER_1_1
) {
369 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
370 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
374 static void le_setup(struct hci_request
*req
)
376 struct hci_dev
*hdev
= req
->hdev
;
378 /* Read LE Buffer Size */
379 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
381 /* Read LE Local Supported Features */
382 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
387 /* Read LE White List Size */
388 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
390 /* Read LE Supported States */
391 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev
))
395 set_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
398 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
400 if (lmp_ext_inq_capable(hdev
))
403 if (lmp_inq_rssi_capable(hdev
))
406 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
407 hdev
->lmp_subver
== 0x0757)
410 if (hdev
->manufacturer
== 15) {
411 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
413 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
415 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
419 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
420 hdev
->lmp_subver
== 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request
*req
)
430 mode
= hci_get_inquiry_mode(req
->hdev
);
432 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
435 static void hci_setup_event_mask(struct hci_request
*req
)
437 struct hci_dev
*hdev
= req
->hdev
;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
451 if (lmp_bredr_capable(hdev
)) {
452 events
[4] |= 0x01; /* Flow Specification Complete */
453 events
[4] |= 0x02; /* Inquiry Result with RSSI */
454 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events
[5] |= 0x08; /* Synchronous Connection Complete */
456 events
[5] |= 0x10; /* Synchronous Connection Changed */
459 if (lmp_inq_rssi_capable(hdev
))
460 events
[4] |= 0x02; /* Inquiry Result with RSSI */
462 if (lmp_sniffsubr_capable(hdev
))
463 events
[5] |= 0x20; /* Sniff Subrating */
465 if (lmp_pause_enc_capable(hdev
))
466 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
468 if (lmp_ext_inq_capable(hdev
))
469 events
[5] |= 0x40; /* Extended Inquiry Result */
471 if (lmp_no_flush_capable(hdev
))
472 events
[7] |= 0x01; /* Enhanced Flush Complete */
474 if (lmp_lsto_capable(hdev
))
475 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
477 if (lmp_ssp_capable(hdev
)) {
478 events
[6] |= 0x01; /* IO Capability Request */
479 events
[6] |= 0x02; /* IO Capability Response */
480 events
[6] |= 0x04; /* User Confirmation Request */
481 events
[6] |= 0x08; /* User Passkey Request */
482 events
[6] |= 0x10; /* Remote OOB Data Request */
483 events
[6] |= 0x20; /* Simple Pairing Complete */
484 events
[7] |= 0x04; /* User Passkey Notification */
485 events
[7] |= 0x08; /* Keypress Notification */
486 events
[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
491 if (lmp_le_capable(hdev
))
492 events
[7] |= 0x20; /* LE Meta-Event */
494 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
496 if (lmp_le_capable(hdev
)) {
497 memset(events
, 0, sizeof(events
));
499 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
,
500 sizeof(events
), events
);
504 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
506 struct hci_dev
*hdev
= req
->hdev
;
508 if (lmp_bredr_capable(hdev
))
511 if (lmp_le_capable(hdev
))
514 hci_setup_event_mask(req
);
516 if (hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
517 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
519 if (lmp_ssp_capable(hdev
)) {
520 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
522 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
523 sizeof(mode
), &mode
);
525 struct hci_cp_write_eir cp
;
527 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
528 memset(&cp
, 0, sizeof(cp
));
530 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
534 if (lmp_inq_rssi_capable(hdev
))
535 hci_setup_inquiry_mode(req
);
537 if (lmp_inq_tx_pwr_capable(hdev
))
538 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
540 if (lmp_ext_feat_capable(hdev
)) {
541 struct hci_cp_read_local_ext_features cp
;
544 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
548 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
550 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
555 static void hci_setup_link_policy(struct hci_request
*req
)
557 struct hci_dev
*hdev
= req
->hdev
;
558 struct hci_cp_write_def_link_policy cp
;
561 if (lmp_rswitch_capable(hdev
))
562 link_policy
|= HCI_LP_RSWITCH
;
563 if (lmp_hold_capable(hdev
))
564 link_policy
|= HCI_LP_HOLD
;
565 if (lmp_sniff_capable(hdev
))
566 link_policy
|= HCI_LP_SNIFF
;
567 if (lmp_park_capable(hdev
))
568 link_policy
|= HCI_LP_PARK
;
570 cp
.policy
= cpu_to_le16(link_policy
);
571 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
574 static void hci_set_le_support(struct hci_request
*req
)
576 struct hci_dev
*hdev
= req
->hdev
;
577 struct hci_cp_write_le_host_supported cp
;
579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev
))
583 memset(&cp
, 0, sizeof(cp
));
585 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
587 cp
.simul
= lmp_le_br_capable(hdev
);
590 if (cp
.le
!= lmp_host_le_capable(hdev
))
591 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
595 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
597 struct hci_dev
*hdev
= req
->hdev
;
600 /* Some Broadcom based Bluetooth controllers do not support the
601 * Delete Stored Link Key command. They are clearly indicating its
602 * absence in the bit mask of supported commands.
604 * Check the supported commands and only if the the command is marked
605 * as supported send it. If not supported assume that the controller
606 * does not have actual support for stored link keys which makes this
607 * command redundant anyway.
609 if (hdev
->commands
[6] & 0x80) {
610 struct hci_cp_delete_stored_link_key cp
;
612 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
613 cp
.delete_all
= 0x01;
614 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
618 if (hdev
->commands
[5] & 0x10)
619 hci_setup_link_policy(req
);
621 if (lmp_le_capable(hdev
)) {
622 hci_set_le_support(req
);
626 /* Read features beyond page 1 if available */
627 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
628 struct hci_cp_read_local_ext_features cp
;
631 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
636 static int __hci_init(struct hci_dev
*hdev
)
640 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
644 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
645 * BR/EDR/LE type controllers. AMP controllers only need the
648 if (hdev
->dev_type
!= HCI_BREDR
)
651 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
655 return __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
658 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
662 BT_DBG("%s %x", req
->hdev
->name
, scan
);
664 /* Inquiry and Page scans */
665 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
668 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
672 BT_DBG("%s %x", req
->hdev
->name
, auth
);
675 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
678 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
682 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
685 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
688 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
690 __le16 policy
= cpu_to_le16(opt
);
692 BT_DBG("%s %x", req
->hdev
->name
, policy
);
694 /* Default link policy */
695 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
698 /* Get HCI device by index.
699 * Device is held on return. */
700 struct hci_dev
*hci_dev_get(int index
)
702 struct hci_dev
*hdev
= NULL
, *d
;
709 read_lock(&hci_dev_list_lock
);
710 list_for_each_entry(d
, &hci_dev_list
, list
) {
711 if (d
->id
== index
) {
712 hdev
= hci_dev_hold(d
);
716 read_unlock(&hci_dev_list_lock
);
720 /* ---- Inquiry support ---- */
722 bool hci_discovery_active(struct hci_dev
*hdev
)
724 struct discovery_state
*discov
= &hdev
->discovery
;
726 switch (discov
->state
) {
727 case DISCOVERY_FINDING
:
728 case DISCOVERY_RESOLVING
:
736 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
738 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
740 if (hdev
->discovery
.state
== state
)
744 case DISCOVERY_STOPPED
:
745 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
746 mgmt_discovering(hdev
, 0);
748 case DISCOVERY_STARTING
:
750 case DISCOVERY_FINDING
:
751 mgmt_discovering(hdev
, 1);
753 case DISCOVERY_RESOLVING
:
755 case DISCOVERY_STOPPING
:
759 hdev
->discovery
.state
= state
;
762 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
764 struct discovery_state
*cache
= &hdev
->discovery
;
765 struct inquiry_entry
*p
, *n
;
767 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
772 INIT_LIST_HEAD(&cache
->unknown
);
773 INIT_LIST_HEAD(&cache
->resolve
);
776 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
779 struct discovery_state
*cache
= &hdev
->discovery
;
780 struct inquiry_entry
*e
;
782 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
784 list_for_each_entry(e
, &cache
->all
, all
) {
785 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
792 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
795 struct discovery_state
*cache
= &hdev
->discovery
;
796 struct inquiry_entry
*e
;
798 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
800 list_for_each_entry(e
, &cache
->unknown
, list
) {
801 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
808 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
812 struct discovery_state
*cache
= &hdev
->discovery
;
813 struct inquiry_entry
*e
;
815 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
817 list_for_each_entry(e
, &cache
->resolve
, list
) {
818 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
820 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
827 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
828 struct inquiry_entry
*ie
)
830 struct discovery_state
*cache
= &hdev
->discovery
;
831 struct list_head
*pos
= &cache
->resolve
;
832 struct inquiry_entry
*p
;
836 list_for_each_entry(p
, &cache
->resolve
, list
) {
837 if (p
->name_state
!= NAME_PENDING
&&
838 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
843 list_add(&ie
->list
, pos
);
846 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
847 bool name_known
, bool *ssp
)
849 struct discovery_state
*cache
= &hdev
->discovery
;
850 struct inquiry_entry
*ie
;
852 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
854 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
857 *ssp
= data
->ssp_mode
;
859 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
861 if (ie
->data
.ssp_mode
&& ssp
)
864 if (ie
->name_state
== NAME_NEEDED
&&
865 data
->rssi
!= ie
->data
.rssi
) {
866 ie
->data
.rssi
= data
->rssi
;
867 hci_inquiry_cache_update_resolve(hdev
, ie
);
873 /* Entry not in the cache. Add new one. */
874 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
878 list_add(&ie
->all
, &cache
->all
);
881 ie
->name_state
= NAME_KNOWN
;
883 ie
->name_state
= NAME_NOT_KNOWN
;
884 list_add(&ie
->list
, &cache
->unknown
);
888 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
889 ie
->name_state
!= NAME_PENDING
) {
890 ie
->name_state
= NAME_KNOWN
;
894 memcpy(&ie
->data
, data
, sizeof(*data
));
895 ie
->timestamp
= jiffies
;
896 cache
->timestamp
= jiffies
;
898 if (ie
->name_state
== NAME_NOT_KNOWN
)
904 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
906 struct discovery_state
*cache
= &hdev
->discovery
;
907 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
908 struct inquiry_entry
*e
;
911 list_for_each_entry(e
, &cache
->all
, all
) {
912 struct inquiry_data
*data
= &e
->data
;
917 bacpy(&info
->bdaddr
, &data
->bdaddr
);
918 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
919 info
->pscan_period_mode
= data
->pscan_period_mode
;
920 info
->pscan_mode
= data
->pscan_mode
;
921 memcpy(info
->dev_class
, data
->dev_class
, 3);
922 info
->clock_offset
= data
->clock_offset
;
928 BT_DBG("cache %p, copied %d", cache
, copied
);
932 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
934 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
935 struct hci_dev
*hdev
= req
->hdev
;
936 struct hci_cp_inquiry cp
;
938 BT_DBG("%s", hdev
->name
);
940 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
944 memcpy(&cp
.lap
, &ir
->lap
, 3);
945 cp
.length
= ir
->length
;
946 cp
.num_rsp
= ir
->num_rsp
;
947 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
950 static int wait_inquiry(void *word
)
953 return signal_pending(current
);
956 int hci_inquiry(void __user
*arg
)
958 __u8 __user
*ptr
= arg
;
959 struct hci_inquiry_req ir
;
960 struct hci_dev
*hdev
;
961 int err
= 0, do_inquiry
= 0, max_rsp
;
965 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
968 hdev
= hci_dev_get(ir
.dev_id
);
973 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
974 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
975 hci_inquiry_cache_flush(hdev
);
978 hci_dev_unlock(hdev
);
980 timeo
= ir
.length
* msecs_to_jiffies(2000);
983 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
988 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
989 * cleared). If it is interrupted by a signal, return -EINTR.
991 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
, wait_inquiry
,
996 /* for unlimited number of responses we will use buffer with
999 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1001 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1002 * copy it to the user space.
1004 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1011 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1012 hci_dev_unlock(hdev
);
1014 BT_DBG("num_rsp %d", ir
.num_rsp
);
1016 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1018 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1031 static u8
create_ad(struct hci_dev
*hdev
, u8
*ptr
)
1033 u8 ad_len
= 0, flags
= 0;
1036 if (test_bit(HCI_LE_PERIPHERAL
, &hdev
->dev_flags
))
1037 flags
|= LE_AD_GENERAL
;
1039 if (!lmp_bredr_capable(hdev
))
1040 flags
|= LE_AD_NO_BREDR
;
1042 if (lmp_le_br_capable(hdev
))
1043 flags
|= LE_AD_SIM_LE_BREDR_CTRL
;
1045 if (lmp_host_le_br_capable(hdev
))
1046 flags
|= LE_AD_SIM_LE_BREDR_HOST
;
1049 BT_DBG("adv flags 0x%02x", flags
);
1059 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
1061 ptr
[1] = EIR_TX_POWER
;
1062 ptr
[2] = (u8
) hdev
->adv_tx_power
;
1068 name_len
= strlen(hdev
->dev_name
);
1070 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
1072 if (name_len
> max_len
) {
1074 ptr
[1] = EIR_NAME_SHORT
;
1076 ptr
[1] = EIR_NAME_COMPLETE
;
1078 ptr
[0] = name_len
+ 1;
1080 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
1082 ad_len
+= (name_len
+ 2);
1083 ptr
+= (name_len
+ 2);
1089 void hci_update_ad(struct hci_request
*req
)
1091 struct hci_dev
*hdev
= req
->hdev
;
1092 struct hci_cp_le_set_adv_data cp
;
1095 if (!lmp_le_capable(hdev
))
1098 memset(&cp
, 0, sizeof(cp
));
1100 len
= create_ad(hdev
, cp
.data
);
1102 if (hdev
->adv_data_len
== len
&&
1103 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1106 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1107 hdev
->adv_data_len
= len
;
1111 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1114 /* ---- HCI ioctl helpers ---- */
1116 int hci_dev_open(__u16 dev
)
1118 struct hci_dev
*hdev
;
1121 hdev
= hci_dev_get(dev
);
1125 BT_DBG("%s %p", hdev
->name
, hdev
);
1129 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
1134 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
1139 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1144 if (hdev
->open(hdev
)) {
1149 atomic_set(&hdev
->cmd_cnt
, 1);
1150 set_bit(HCI_INIT
, &hdev
->flags
);
1152 if (hdev
->setup
&& test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1153 ret
= hdev
->setup(hdev
);
1156 /* Treat all non BR/EDR controllers as raw devices if
1157 * enable_hs is not set.
1159 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
1160 set_bit(HCI_RAW
, &hdev
->flags
);
1162 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
1163 set_bit(HCI_RAW
, &hdev
->flags
);
1165 if (!test_bit(HCI_RAW
, &hdev
->flags
))
1166 ret
= __hci_init(hdev
);
1169 clear_bit(HCI_INIT
, &hdev
->flags
);
1173 set_bit(HCI_UP
, &hdev
->flags
);
1174 hci_notify(hdev
, HCI_DEV_UP
);
1175 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
1176 mgmt_valid_hdev(hdev
)) {
1178 mgmt_powered(hdev
, 1);
1179 hci_dev_unlock(hdev
);
1182 /* Init failed, cleanup */
1183 flush_work(&hdev
->tx_work
);
1184 flush_work(&hdev
->cmd_work
);
1185 flush_work(&hdev
->rx_work
);
1187 skb_queue_purge(&hdev
->cmd_q
);
1188 skb_queue_purge(&hdev
->rx_q
);
1193 if (hdev
->sent_cmd
) {
1194 kfree_skb(hdev
->sent_cmd
);
1195 hdev
->sent_cmd
= NULL
;
1203 hci_req_unlock(hdev
);
1208 static int hci_dev_do_close(struct hci_dev
*hdev
)
1210 BT_DBG("%s %p", hdev
->name
, hdev
);
1212 cancel_delayed_work(&hdev
->power_off
);
1214 hci_req_cancel(hdev
, ENODEV
);
1217 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1218 del_timer_sync(&hdev
->cmd_timer
);
1219 hci_req_unlock(hdev
);
1223 /* Flush RX and TX works */
1224 flush_work(&hdev
->tx_work
);
1225 flush_work(&hdev
->rx_work
);
1227 if (hdev
->discov_timeout
> 0) {
1228 cancel_delayed_work(&hdev
->discov_off
);
1229 hdev
->discov_timeout
= 0;
1230 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1233 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1234 cancel_delayed_work(&hdev
->service_cache
);
1236 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
1239 hci_inquiry_cache_flush(hdev
);
1240 hci_conn_hash_flush(hdev
);
1241 hci_dev_unlock(hdev
);
1243 hci_notify(hdev
, HCI_DEV_DOWN
);
1249 skb_queue_purge(&hdev
->cmd_q
);
1250 atomic_set(&hdev
->cmd_cnt
, 1);
1251 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
1252 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
1253 set_bit(HCI_INIT
, &hdev
->flags
);
1254 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
1255 clear_bit(HCI_INIT
, &hdev
->flags
);
1258 /* flush cmd work */
1259 flush_work(&hdev
->cmd_work
);
1262 skb_queue_purge(&hdev
->rx_q
);
1263 skb_queue_purge(&hdev
->cmd_q
);
1264 skb_queue_purge(&hdev
->raw_q
);
1266 /* Drop last sent command */
1267 if (hdev
->sent_cmd
) {
1268 del_timer_sync(&hdev
->cmd_timer
);
1269 kfree_skb(hdev
->sent_cmd
);
1270 hdev
->sent_cmd
= NULL
;
1273 kfree_skb(hdev
->recv_evt
);
1274 hdev
->recv_evt
= NULL
;
1276 /* After this point our queues are empty
1277 * and no tasks are scheduled. */
1282 hdev
->dev_flags
&= ~HCI_PERSISTENT_MASK
;
1284 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
1285 mgmt_valid_hdev(hdev
)) {
1287 mgmt_powered(hdev
, 0);
1288 hci_dev_unlock(hdev
);
1291 /* Controller radio is available but is currently powered down */
1292 hdev
->amp_status
= 0;
1294 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1295 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
1297 hci_req_unlock(hdev
);
1303 int hci_dev_close(__u16 dev
)
1305 struct hci_dev
*hdev
;
1308 hdev
= hci_dev_get(dev
);
1312 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1313 cancel_delayed_work(&hdev
->power_off
);
1315 err
= hci_dev_do_close(hdev
);
1321 int hci_dev_reset(__u16 dev
)
1323 struct hci_dev
*hdev
;
1326 hdev
= hci_dev_get(dev
);
1332 if (!test_bit(HCI_UP
, &hdev
->flags
))
1336 skb_queue_purge(&hdev
->rx_q
);
1337 skb_queue_purge(&hdev
->cmd_q
);
1340 hci_inquiry_cache_flush(hdev
);
1341 hci_conn_hash_flush(hdev
);
1342 hci_dev_unlock(hdev
);
1347 atomic_set(&hdev
->cmd_cnt
, 1);
1348 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
1350 if (!test_bit(HCI_RAW
, &hdev
->flags
))
1351 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
1354 hci_req_unlock(hdev
);
1359 int hci_dev_reset_stat(__u16 dev
)
1361 struct hci_dev
*hdev
;
1364 hdev
= hci_dev_get(dev
);
1368 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1375 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
1377 struct hci_dev
*hdev
;
1378 struct hci_dev_req dr
;
1381 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
1384 hdev
= hci_dev_get(dr
.dev_id
);
1390 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1395 if (!lmp_encrypt_capable(hdev
)) {
1400 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
1401 /* Auth must be enabled first */
1402 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
1408 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
1413 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
1418 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
1422 case HCISETLINKMODE
:
1423 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
1424 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
1428 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
1432 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1433 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1437 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
1438 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
1450 int hci_get_dev_list(void __user
*arg
)
1452 struct hci_dev
*hdev
;
1453 struct hci_dev_list_req
*dl
;
1454 struct hci_dev_req
*dr
;
1455 int n
= 0, size
, err
;
1458 if (get_user(dev_num
, (__u16 __user
*) arg
))
1461 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
1464 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
1466 dl
= kzalloc(size
, GFP_KERNEL
);
1472 read_lock(&hci_dev_list_lock
);
1473 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1474 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1475 cancel_delayed_work(&hdev
->power_off
);
1477 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1478 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1480 (dr
+ n
)->dev_id
= hdev
->id
;
1481 (dr
+ n
)->dev_opt
= hdev
->flags
;
1486 read_unlock(&hci_dev_list_lock
);
1489 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1491 err
= copy_to_user(arg
, dl
, size
);
1494 return err
? -EFAULT
: 0;
1497 int hci_get_dev_info(void __user
*arg
)
1499 struct hci_dev
*hdev
;
1500 struct hci_dev_info di
;
1503 if (copy_from_user(&di
, arg
, sizeof(di
)))
1506 hdev
= hci_dev_get(di
.dev_id
);
1510 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1511 cancel_delayed_work_sync(&hdev
->power_off
);
1513 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1514 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1516 strcpy(di
.name
, hdev
->name
);
1517 di
.bdaddr
= hdev
->bdaddr
;
1518 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1519 di
.flags
= hdev
->flags
;
1520 di
.pkt_type
= hdev
->pkt_type
;
1521 if (lmp_bredr_capable(hdev
)) {
1522 di
.acl_mtu
= hdev
->acl_mtu
;
1523 di
.acl_pkts
= hdev
->acl_pkts
;
1524 di
.sco_mtu
= hdev
->sco_mtu
;
1525 di
.sco_pkts
= hdev
->sco_pkts
;
1527 di
.acl_mtu
= hdev
->le_mtu
;
1528 di
.acl_pkts
= hdev
->le_pkts
;
1532 di
.link_policy
= hdev
->link_policy
;
1533 di
.link_mode
= hdev
->link_mode
;
1535 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1536 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1538 if (copy_to_user(arg
, &di
, sizeof(di
)))
1546 /* ---- Interface to HCI drivers ---- */
1548 static int hci_rfkill_set_block(void *data
, bool blocked
)
1550 struct hci_dev
*hdev
= data
;
1552 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1557 hci_dev_do_close(hdev
);
1562 static const struct rfkill_ops hci_rfkill_ops
= {
1563 .set_block
= hci_rfkill_set_block
,
1566 static void hci_power_on(struct work_struct
*work
)
1568 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1571 BT_DBG("%s", hdev
->name
);
1573 err
= hci_dev_open(hdev
->id
);
1575 mgmt_set_powered_failed(hdev
, err
);
1579 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1580 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1581 HCI_AUTO_OFF_TIMEOUT
);
1583 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1584 mgmt_index_added(hdev
);
1587 static void hci_power_off(struct work_struct
*work
)
1589 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1592 BT_DBG("%s", hdev
->name
);
1594 hci_dev_do_close(hdev
);
1597 static void hci_discov_off(struct work_struct
*work
)
1599 struct hci_dev
*hdev
;
1600 u8 scan
= SCAN_PAGE
;
1602 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1604 BT_DBG("%s", hdev
->name
);
1608 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1610 hdev
->discov_timeout
= 0;
1612 hci_dev_unlock(hdev
);
1615 int hci_uuids_clear(struct hci_dev
*hdev
)
1617 struct bt_uuid
*uuid
, *tmp
;
1619 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
1620 list_del(&uuid
->list
);
1627 int hci_link_keys_clear(struct hci_dev
*hdev
)
1629 struct list_head
*p
, *n
;
1631 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1632 struct link_key
*key
;
1634 key
= list_entry(p
, struct link_key
, list
);
1643 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1645 struct smp_ltk
*k
, *tmp
;
1647 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1655 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1659 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1660 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1666 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1667 u8 key_type
, u8 old_key_type
)
1670 if (key_type
< 0x03)
1673 /* Debug keys are insecure so don't store them persistently */
1674 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1677 /* Changed combination key and there's no previous one */
1678 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1681 /* Security mode 3 case */
1685 /* Neither local nor remote side had no-bonding as requirement */
1686 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1689 /* Local side had dedicated bonding as requirement */
1690 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1693 /* Remote side had dedicated bonding as requirement */
1694 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1697 /* If none of the above criteria match, then don't store the key
1702 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1706 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1707 if (k
->ediv
!= ediv
||
1708 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1717 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1722 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1723 if (addr_type
== k
->bdaddr_type
&&
1724 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1730 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1731 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1733 struct link_key
*key
, *old_key
;
1737 old_key
= hci_find_link_key(hdev
, bdaddr
);
1739 old_key_type
= old_key
->type
;
1742 old_key_type
= conn
? conn
->key_type
: 0xff;
1743 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1746 list_add(&key
->list
, &hdev
->link_keys
);
1749 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
1751 /* Some buggy controller combinations generate a changed
1752 * combination key for legacy pairing even when there's no
1754 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1755 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1756 type
= HCI_LK_COMBINATION
;
1758 conn
->key_type
= type
;
1761 bacpy(&key
->bdaddr
, bdaddr
);
1762 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1763 key
->pin_len
= pin_len
;
1765 if (type
== HCI_LK_CHANGED_COMBINATION
)
1766 key
->type
= old_key_type
;
1773 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1775 mgmt_new_link_key(hdev
, key
, persistent
);
1778 conn
->flush_key
= !persistent
;
1783 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1784 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1787 struct smp_ltk
*key
, *old_key
;
1789 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1792 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1796 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1799 list_add(&key
->list
, &hdev
->long_term_keys
);
1802 bacpy(&key
->bdaddr
, bdaddr
);
1803 key
->bdaddr_type
= addr_type
;
1804 memcpy(key
->val
, tk
, sizeof(key
->val
));
1805 key
->authenticated
= authenticated
;
1807 key
->enc_size
= enc_size
;
1809 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1814 if (type
& HCI_SMP_LTK
)
1815 mgmt_new_ltk(hdev
, key
, 1);
1820 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1822 struct link_key
*key
;
1824 key
= hci_find_link_key(hdev
, bdaddr
);
1828 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1830 list_del(&key
->list
);
1836 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1838 struct smp_ltk
*k
, *tmp
;
1840 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1841 if (bacmp(bdaddr
, &k
->bdaddr
))
1844 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1853 /* HCI command timer function */
1854 static void hci_cmd_timeout(unsigned long arg
)
1856 struct hci_dev
*hdev
= (void *) arg
;
1858 if (hdev
->sent_cmd
) {
1859 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1860 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1862 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1864 BT_ERR("%s command tx timeout", hdev
->name
);
1867 atomic_set(&hdev
->cmd_cnt
, 1);
1868 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1871 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1874 struct oob_data
*data
;
1876 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1877 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1883 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1885 struct oob_data
*data
;
1887 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1891 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
1893 list_del(&data
->list
);
1899 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1901 struct oob_data
*data
, *n
;
1903 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1904 list_del(&data
->list
);
1911 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1914 struct oob_data
*data
;
1916 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1919 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1923 bacpy(&data
->bdaddr
, bdaddr
);
1924 list_add(&data
->list
, &hdev
->remote_oob_data
);
1927 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1928 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1930 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
1935 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1937 struct bdaddr_list
*b
;
1939 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1940 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1946 int hci_blacklist_clear(struct hci_dev
*hdev
)
1948 struct list_head
*p
, *n
;
1950 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1951 struct bdaddr_list
*b
;
1953 b
= list_entry(p
, struct bdaddr_list
, list
);
1962 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1964 struct bdaddr_list
*entry
;
1966 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1969 if (hci_blacklist_lookup(hdev
, bdaddr
))
1972 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1976 bacpy(&entry
->bdaddr
, bdaddr
);
1978 list_add(&entry
->list
, &hdev
->blacklist
);
1980 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1983 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1985 struct bdaddr_list
*entry
;
1987 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1988 return hci_blacklist_clear(hdev
);
1990 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1994 list_del(&entry
->list
);
1997 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
2000 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
)
2003 BT_ERR("Failed to start inquiry: status %d", status
);
2006 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2007 hci_dev_unlock(hdev
);
2012 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
)
2014 /* General inquiry access code (GIAC) */
2015 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2016 struct hci_request req
;
2017 struct hci_cp_inquiry cp
;
2021 BT_ERR("Failed to disable LE scanning: status %d", status
);
2025 switch (hdev
->discovery
.type
) {
2026 case DISCOV_TYPE_LE
:
2028 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2029 hci_dev_unlock(hdev
);
2032 case DISCOV_TYPE_INTERLEAVED
:
2033 hci_req_init(&req
, hdev
);
2035 memset(&cp
, 0, sizeof(cp
));
2036 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2037 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
2038 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2042 hci_inquiry_cache_flush(hdev
);
2044 err
= hci_req_run(&req
, inquiry_complete
);
2046 BT_ERR("Inquiry request failed: err %d", err
);
2047 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2050 hci_dev_unlock(hdev
);
2055 static void le_scan_disable_work(struct work_struct
*work
)
2057 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2058 le_scan_disable
.work
);
2059 struct hci_cp_le_set_scan_enable cp
;
2060 struct hci_request req
;
2063 BT_DBG("%s", hdev
->name
);
2065 hci_req_init(&req
, hdev
);
2067 memset(&cp
, 0, sizeof(cp
));
2068 cp
.enable
= LE_SCAN_DISABLE
;
2069 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
2071 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
2073 BT_ERR("Disable LE scanning request failed: err %d", err
);
2076 /* Alloc HCI device */
2077 struct hci_dev
*hci_alloc_dev(void)
2079 struct hci_dev
*hdev
;
2081 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
2085 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2086 hdev
->esco_type
= (ESCO_HV1
);
2087 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2088 hdev
->io_capability
= 0x03; /* No Input No Output */
2089 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2090 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2092 hdev
->sniff_max_interval
= 800;
2093 hdev
->sniff_min_interval
= 80;
2095 mutex_init(&hdev
->lock
);
2096 mutex_init(&hdev
->req_lock
);
2098 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
2099 INIT_LIST_HEAD(&hdev
->blacklist
);
2100 INIT_LIST_HEAD(&hdev
->uuids
);
2101 INIT_LIST_HEAD(&hdev
->link_keys
);
2102 INIT_LIST_HEAD(&hdev
->long_term_keys
);
2103 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
2104 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
2106 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
2107 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2108 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2109 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2111 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2112 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
2113 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2115 skb_queue_head_init(&hdev
->rx_q
);
2116 skb_queue_head_init(&hdev
->cmd_q
);
2117 skb_queue_head_init(&hdev
->raw_q
);
2119 init_waitqueue_head(&hdev
->req_wait_q
);
2121 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
2123 hci_init_sysfs(hdev
);
2124 discovery_init(hdev
);
2128 EXPORT_SYMBOL(hci_alloc_dev
);
2130 /* Free HCI device */
2131 void hci_free_dev(struct hci_dev
*hdev
)
2133 /* will free via device release */
2134 put_device(&hdev
->dev
);
2136 EXPORT_SYMBOL(hci_free_dev
);
2138 /* Register HCI device */
2139 int hci_register_dev(struct hci_dev
*hdev
)
2143 if (!hdev
->open
|| !hdev
->close
)
2146 /* Do not allow HCI_AMP devices to register at index 0,
2147 * so the index can be used as the AMP controller ID.
2149 switch (hdev
->dev_type
) {
2151 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
2154 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
2163 sprintf(hdev
->name
, "hci%d", id
);
2166 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2168 write_lock(&hci_dev_list_lock
);
2169 list_add(&hdev
->list
, &hci_dev_list
);
2170 write_unlock(&hci_dev_list_lock
);
2172 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
2173 WQ_MEM_RECLAIM
, 1, hdev
->name
);
2174 if (!hdev
->workqueue
) {
2179 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
2180 WQ_MEM_RECLAIM
, 1, hdev
->name
);
2181 if (!hdev
->req_workqueue
) {
2182 destroy_workqueue(hdev
->workqueue
);
2187 error
= hci_add_sysfs(hdev
);
2191 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
2192 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
2195 if (rfkill_register(hdev
->rfkill
) < 0) {
2196 rfkill_destroy(hdev
->rfkill
);
2197 hdev
->rfkill
= NULL
;
2201 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
2203 if (hdev
->dev_type
!= HCI_AMP
)
2204 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2206 hci_notify(hdev
, HCI_DEV_REG
);
2209 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
2214 destroy_workqueue(hdev
->workqueue
);
2215 destroy_workqueue(hdev
->req_workqueue
);
2217 ida_simple_remove(&hci_index_ida
, hdev
->id
);
2218 write_lock(&hci_dev_list_lock
);
2219 list_del(&hdev
->list
);
2220 write_unlock(&hci_dev_list_lock
);
2224 EXPORT_SYMBOL(hci_register_dev
);
2226 /* Unregister HCI device */
2227 void hci_unregister_dev(struct hci_dev
*hdev
)
2231 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
2233 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
2237 write_lock(&hci_dev_list_lock
);
2238 list_del(&hdev
->list
);
2239 write_unlock(&hci_dev_list_lock
);
2241 hci_dev_do_close(hdev
);
2243 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
2244 kfree_skb(hdev
->reassembly
[i
]);
2246 cancel_work_sync(&hdev
->power_on
);
2248 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
2249 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
2251 mgmt_index_removed(hdev
);
2252 hci_dev_unlock(hdev
);
2255 /* mgmt_index_removed should take care of emptying the
2257 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
2259 hci_notify(hdev
, HCI_DEV_UNREG
);
2262 rfkill_unregister(hdev
->rfkill
);
2263 rfkill_destroy(hdev
->rfkill
);
2266 hci_del_sysfs(hdev
);
2268 destroy_workqueue(hdev
->workqueue
);
2269 destroy_workqueue(hdev
->req_workqueue
);
2272 hci_blacklist_clear(hdev
);
2273 hci_uuids_clear(hdev
);
2274 hci_link_keys_clear(hdev
);
2275 hci_smp_ltks_clear(hdev
);
2276 hci_remote_oob_data_clear(hdev
);
2277 hci_dev_unlock(hdev
);
2281 ida_simple_remove(&hci_index_ida
, id
);
2283 EXPORT_SYMBOL(hci_unregister_dev
);
2285 /* Suspend HCI device */
2286 int hci_suspend_dev(struct hci_dev
*hdev
)
2288 hci_notify(hdev
, HCI_DEV_SUSPEND
);
2291 EXPORT_SYMBOL(hci_suspend_dev
);
2293 /* Resume HCI device */
2294 int hci_resume_dev(struct hci_dev
*hdev
)
2296 hci_notify(hdev
, HCI_DEV_RESUME
);
2299 EXPORT_SYMBOL(hci_resume_dev
);
2301 /* Receive frame from HCI drivers */
2302 int hci_recv_frame(struct sk_buff
*skb
)
2304 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2305 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
2306 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
2312 bt_cb(skb
)->incoming
= 1;
2315 __net_timestamp(skb
);
2317 skb_queue_tail(&hdev
->rx_q
, skb
);
2318 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
2322 EXPORT_SYMBOL(hci_recv_frame
);
2324 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
2325 int count
, __u8 index
)
2330 struct sk_buff
*skb
;
2331 struct bt_skb_cb
*scb
;
2333 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
2334 index
>= NUM_REASSEMBLY
)
2337 skb
= hdev
->reassembly
[index
];
2341 case HCI_ACLDATA_PKT
:
2342 len
= HCI_MAX_FRAME_SIZE
;
2343 hlen
= HCI_ACL_HDR_SIZE
;
2346 len
= HCI_MAX_EVENT_SIZE
;
2347 hlen
= HCI_EVENT_HDR_SIZE
;
2349 case HCI_SCODATA_PKT
:
2350 len
= HCI_MAX_SCO_SIZE
;
2351 hlen
= HCI_SCO_HDR_SIZE
;
2355 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2359 scb
= (void *) skb
->cb
;
2361 scb
->pkt_type
= type
;
2363 skb
->dev
= (void *) hdev
;
2364 hdev
->reassembly
[index
] = skb
;
2368 scb
= (void *) skb
->cb
;
2369 len
= min_t(uint
, scb
->expect
, count
);
2371 memcpy(skb_put(skb
, len
), data
, len
);
2380 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
2381 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
2382 scb
->expect
= h
->plen
;
2384 if (skb_tailroom(skb
) < scb
->expect
) {
2386 hdev
->reassembly
[index
] = NULL
;
2392 case HCI_ACLDATA_PKT
:
2393 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
2394 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
2395 scb
->expect
= __le16_to_cpu(h
->dlen
);
2397 if (skb_tailroom(skb
) < scb
->expect
) {
2399 hdev
->reassembly
[index
] = NULL
;
2405 case HCI_SCODATA_PKT
:
2406 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
2407 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
2408 scb
->expect
= h
->dlen
;
2410 if (skb_tailroom(skb
) < scb
->expect
) {
2412 hdev
->reassembly
[index
] = NULL
;
2419 if (scb
->expect
== 0) {
2420 /* Complete frame */
2422 bt_cb(skb
)->pkt_type
= type
;
2423 hci_recv_frame(skb
);
2425 hdev
->reassembly
[index
] = NULL
;
2433 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
2437 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
2441 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
2445 data
+= (count
- rem
);
2451 EXPORT_SYMBOL(hci_recv_fragment
);
2453 #define STREAM_REASSEMBLY 0
2455 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2461 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2464 struct { char type
; } *pkt
;
2466 /* Start of the frame */
2473 type
= bt_cb(skb
)->pkt_type
;
2475 rem
= hci_reassembly(hdev
, type
, data
, count
,
2480 data
+= (count
- rem
);
2486 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2488 /* ---- Interface to upper protocols ---- */
2490 int hci_register_cb(struct hci_cb
*cb
)
2492 BT_DBG("%p name %s", cb
, cb
->name
);
2494 write_lock(&hci_cb_list_lock
);
2495 list_add(&cb
->list
, &hci_cb_list
);
2496 write_unlock(&hci_cb_list_lock
);
2500 EXPORT_SYMBOL(hci_register_cb
);
2502 int hci_unregister_cb(struct hci_cb
*cb
)
2504 BT_DBG("%p name %s", cb
, cb
->name
);
2506 write_lock(&hci_cb_list_lock
);
2507 list_del(&cb
->list
);
2508 write_unlock(&hci_cb_list_lock
);
2512 EXPORT_SYMBOL(hci_unregister_cb
);
2514 static int hci_send_frame(struct sk_buff
*skb
)
2516 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2523 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2526 __net_timestamp(skb
);
2528 /* Send copy to monitor */
2529 hci_send_to_monitor(hdev
, skb
);
2531 if (atomic_read(&hdev
->promisc
)) {
2532 /* Send copy to the sockets */
2533 hci_send_to_sock(hdev
, skb
);
2536 /* Get rid of skb owner, prior to sending to the driver. */
2539 return hdev
->send(skb
);
2542 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
2544 skb_queue_head_init(&req
->cmd_q
);
2549 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
2551 struct hci_dev
*hdev
= req
->hdev
;
2552 struct sk_buff
*skb
;
2553 unsigned long flags
;
2555 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
2557 /* If an error occured during request building, remove all HCI
2558 * commands queued on the HCI request queue.
2561 skb_queue_purge(&req
->cmd_q
);
2565 /* Do not allow empty requests */
2566 if (skb_queue_empty(&req
->cmd_q
))
2569 skb
= skb_peek_tail(&req
->cmd_q
);
2570 bt_cb(skb
)->req
.complete
= complete
;
2572 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
2573 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
2574 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
2576 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2581 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
2582 u32 plen
, const void *param
)
2584 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2585 struct hci_command_hdr
*hdr
;
2586 struct sk_buff
*skb
;
2588 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2592 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2593 hdr
->opcode
= cpu_to_le16(opcode
);
2597 memcpy(skb_put(skb
, plen
), param
, plen
);
2599 BT_DBG("skb len %d", skb
->len
);
2601 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2602 skb
->dev
= (void *) hdev
;
2607 /* Send HCI command */
2608 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
2611 struct sk_buff
*skb
;
2613 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2615 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
2617 BT_ERR("%s no memory for command", hdev
->name
);
2621 /* Stand-alone HCI commands must be flaged as
2622 * single-command requests.
2624 bt_cb(skb
)->req
.start
= true;
2626 skb_queue_tail(&hdev
->cmd_q
, skb
);
2627 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2632 /* Queue a command to an asynchronous HCI request */
2633 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
2634 const void *param
, u8 event
)
2636 struct hci_dev
*hdev
= req
->hdev
;
2637 struct sk_buff
*skb
;
2639 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2641 /* If an error occured during request building, there is no point in
2642 * queueing the HCI command. We can simply return.
2647 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
2649 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2650 hdev
->name
, opcode
);
2655 if (skb_queue_empty(&req
->cmd_q
))
2656 bt_cb(skb
)->req
.start
= true;
2658 bt_cb(skb
)->req
.event
= event
;
2660 skb_queue_tail(&req
->cmd_q
, skb
);
2663 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
2666 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
2669 /* Get data from the previously sent command */
2670 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2672 struct hci_command_hdr
*hdr
;
2674 if (!hdev
->sent_cmd
)
2677 hdr
= (void *) hdev
->sent_cmd
->data
;
2679 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2682 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2684 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2688 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2690 struct hci_acl_hdr
*hdr
;
2693 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2694 skb_reset_transport_header(skb
);
2695 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2696 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2697 hdr
->dlen
= cpu_to_le16(len
);
2700 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
2701 struct sk_buff
*skb
, __u16 flags
)
2703 struct hci_conn
*conn
= chan
->conn
;
2704 struct hci_dev
*hdev
= conn
->hdev
;
2705 struct sk_buff
*list
;
2707 skb
->len
= skb_headlen(skb
);
2710 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2712 switch (hdev
->dev_type
) {
2714 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2717 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
2720 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
2724 list
= skb_shinfo(skb
)->frag_list
;
2726 /* Non fragmented */
2727 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2729 skb_queue_tail(queue
, skb
);
2732 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2734 skb_shinfo(skb
)->frag_list
= NULL
;
2736 /* Queue all fragments atomically */
2737 spin_lock(&queue
->lock
);
2739 __skb_queue_tail(queue
, skb
);
2741 flags
&= ~ACL_START
;
2744 skb
= list
; list
= list
->next
;
2746 skb
->dev
= (void *) hdev
;
2747 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2748 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2750 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2752 __skb_queue_tail(queue
, skb
);
2755 spin_unlock(&queue
->lock
);
2759 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2761 struct hci_dev
*hdev
= chan
->conn
->hdev
;
2763 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2765 skb
->dev
= (void *) hdev
;
2767 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
2769 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2773 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2775 struct hci_dev
*hdev
= conn
->hdev
;
2776 struct hci_sco_hdr hdr
;
2778 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2780 hdr
.handle
= cpu_to_le16(conn
->handle
);
2781 hdr
.dlen
= skb
->len
;
2783 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2784 skb_reset_transport_header(skb
);
2785 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2787 skb
->dev
= (void *) hdev
;
2788 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2790 skb_queue_tail(&conn
->data_q
, skb
);
2791 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2794 /* ---- HCI TX task (outgoing data) ---- */
2796 /* HCI Connection scheduler */
2797 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2800 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2801 struct hci_conn
*conn
= NULL
, *c
;
2802 unsigned int num
= 0, min
= ~0;
2804 /* We don't have to lock device here. Connections are always
2805 * added and removed with TX task disabled. */
2809 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2810 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2813 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2818 if (c
->sent
< min
) {
2823 if (hci_conn_num(hdev
, type
) == num
)
2832 switch (conn
->type
) {
2834 cnt
= hdev
->acl_cnt
;
2838 cnt
= hdev
->sco_cnt
;
2841 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2845 BT_ERR("Unknown link type");
2853 BT_DBG("conn %p quote %d", conn
, *quote
);
2857 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2859 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2862 BT_ERR("%s link tx timeout", hdev
->name
);
2866 /* Kill stalled connections */
2867 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2868 if (c
->type
== type
&& c
->sent
) {
2869 BT_ERR("%s killing stalled connection %pMR",
2870 hdev
->name
, &c
->dst
);
2871 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
2878 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2881 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2882 struct hci_chan
*chan
= NULL
;
2883 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2884 struct hci_conn
*conn
;
2885 int cnt
, q
, conn_num
= 0;
2887 BT_DBG("%s", hdev
->name
);
2891 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2892 struct hci_chan
*tmp
;
2894 if (conn
->type
!= type
)
2897 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2902 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2903 struct sk_buff
*skb
;
2905 if (skb_queue_empty(&tmp
->data_q
))
2908 skb
= skb_peek(&tmp
->data_q
);
2909 if (skb
->priority
< cur_prio
)
2912 if (skb
->priority
> cur_prio
) {
2915 cur_prio
= skb
->priority
;
2920 if (conn
->sent
< min
) {
2926 if (hci_conn_num(hdev
, type
) == conn_num
)
2935 switch (chan
->conn
->type
) {
2937 cnt
= hdev
->acl_cnt
;
2940 cnt
= hdev
->block_cnt
;
2944 cnt
= hdev
->sco_cnt
;
2947 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2951 BT_ERR("Unknown link type");
2956 BT_DBG("chan %p quote %d", chan
, *quote
);
2960 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2962 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2963 struct hci_conn
*conn
;
2966 BT_DBG("%s", hdev
->name
);
2970 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2971 struct hci_chan
*chan
;
2973 if (conn
->type
!= type
)
2976 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2981 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2982 struct sk_buff
*skb
;
2989 if (skb_queue_empty(&chan
->data_q
))
2992 skb
= skb_peek(&chan
->data_q
);
2993 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2996 skb
->priority
= HCI_PRIO_MAX
- 1;
2998 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3002 if (hci_conn_num(hdev
, type
) == num
)
3010 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3012 /* Calculate count of blocks used by this packet */
3013 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3016 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3018 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3019 /* ACL tx timeout must be longer than maximum
3020 * link supervision timeout (40.9 seconds) */
3021 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3022 HCI_ACL_TX_TIMEOUT
))
3023 hci_link_tx_to(hdev
, ACL_LINK
);
3027 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3029 unsigned int cnt
= hdev
->acl_cnt
;
3030 struct hci_chan
*chan
;
3031 struct sk_buff
*skb
;
3034 __check_timeout(hdev
, cnt
);
3036 while (hdev
->acl_cnt
&&
3037 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3038 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3039 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3040 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3041 skb
->len
, skb
->priority
);
3043 /* Stop if priority has changed */
3044 if (skb
->priority
< priority
)
3047 skb
= skb_dequeue(&chan
->data_q
);
3049 hci_conn_enter_active_mode(chan
->conn
,
3050 bt_cb(skb
)->force_active
);
3052 hci_send_frame(skb
);
3053 hdev
->acl_last_tx
= jiffies
;
3061 if (cnt
!= hdev
->acl_cnt
)
3062 hci_prio_recalculate(hdev
, ACL_LINK
);
3065 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3067 unsigned int cnt
= hdev
->block_cnt
;
3068 struct hci_chan
*chan
;
3069 struct sk_buff
*skb
;
3073 __check_timeout(hdev
, cnt
);
3075 BT_DBG("%s", hdev
->name
);
3077 if (hdev
->dev_type
== HCI_AMP
)
3082 while (hdev
->block_cnt
> 0 &&
3083 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3084 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3085 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3088 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3089 skb
->len
, skb
->priority
);
3091 /* Stop if priority has changed */
3092 if (skb
->priority
< priority
)
3095 skb
= skb_dequeue(&chan
->data_q
);
3097 blocks
= __get_blocks(hdev
, skb
);
3098 if (blocks
> hdev
->block_cnt
)
3101 hci_conn_enter_active_mode(chan
->conn
,
3102 bt_cb(skb
)->force_active
);
3104 hci_send_frame(skb
);
3105 hdev
->acl_last_tx
= jiffies
;
3107 hdev
->block_cnt
-= blocks
;
3110 chan
->sent
+= blocks
;
3111 chan
->conn
->sent
+= blocks
;
3115 if (cnt
!= hdev
->block_cnt
)
3116 hci_prio_recalculate(hdev
, type
);
3119 static void hci_sched_acl(struct hci_dev
*hdev
)
3121 BT_DBG("%s", hdev
->name
);
3123 /* No ACL link over BR/EDR controller */
3124 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
3127 /* No AMP link over AMP controller */
3128 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3131 switch (hdev
->flow_ctl_mode
) {
3132 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3133 hci_sched_acl_pkt(hdev
);
3136 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3137 hci_sched_acl_blk(hdev
);
3143 static void hci_sched_sco(struct hci_dev
*hdev
)
3145 struct hci_conn
*conn
;
3146 struct sk_buff
*skb
;
3149 BT_DBG("%s", hdev
->name
);
3151 if (!hci_conn_num(hdev
, SCO_LINK
))
3154 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3155 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3156 BT_DBG("skb %p len %d", skb
, skb
->len
);
3157 hci_send_frame(skb
);
3160 if (conn
->sent
== ~0)
3166 static void hci_sched_esco(struct hci_dev
*hdev
)
3168 struct hci_conn
*conn
;
3169 struct sk_buff
*skb
;
3172 BT_DBG("%s", hdev
->name
);
3174 if (!hci_conn_num(hdev
, ESCO_LINK
))
3177 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
3179 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3180 BT_DBG("skb %p len %d", skb
, skb
->len
);
3181 hci_send_frame(skb
);
3184 if (conn
->sent
== ~0)
3190 static void hci_sched_le(struct hci_dev
*hdev
)
3192 struct hci_chan
*chan
;
3193 struct sk_buff
*skb
;
3194 int quote
, cnt
, tmp
;
3196 BT_DBG("%s", hdev
->name
);
3198 if (!hci_conn_num(hdev
, LE_LINK
))
3201 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3202 /* LE tx timeout must be longer than maximum
3203 * link supervision timeout (40.9 seconds) */
3204 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
3205 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
3206 hci_link_tx_to(hdev
, LE_LINK
);
3209 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
3211 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
3212 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3213 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3214 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3215 skb
->len
, skb
->priority
);
3217 /* Stop if priority has changed */
3218 if (skb
->priority
< priority
)
3221 skb
= skb_dequeue(&chan
->data_q
);
3223 hci_send_frame(skb
);
3224 hdev
->le_last_tx
= jiffies
;
3235 hdev
->acl_cnt
= cnt
;
3238 hci_prio_recalculate(hdev
, LE_LINK
);
3241 static void hci_tx_work(struct work_struct
*work
)
3243 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
3244 struct sk_buff
*skb
;
3246 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
3247 hdev
->sco_cnt
, hdev
->le_cnt
);
3249 /* Schedule queues and send stuff to HCI driver */
3251 hci_sched_acl(hdev
);
3253 hci_sched_sco(hdev
);
3255 hci_sched_esco(hdev
);
3259 /* Send next queued raw (unknown type) packet */
3260 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
3261 hci_send_frame(skb
);
3264 /* ----- HCI RX task (incoming data processing) ----- */
3266 /* ACL data packet */
3267 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3269 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
3270 struct hci_conn
*conn
;
3271 __u16 handle
, flags
;
3273 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
3275 handle
= __le16_to_cpu(hdr
->handle
);
3276 flags
= hci_flags(handle
);
3277 handle
= hci_handle(handle
);
3279 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
3282 hdev
->stat
.acl_rx
++;
3285 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3286 hci_dev_unlock(hdev
);
3289 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
3291 /* Send to upper protocol */
3292 l2cap_recv_acldata(conn
, skb
, flags
);
3295 BT_ERR("%s ACL packet for unknown connection handle %d",
3296 hdev
->name
, handle
);
3302 /* SCO data packet */
3303 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3305 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
3306 struct hci_conn
*conn
;
3309 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
3311 handle
= __le16_to_cpu(hdr
->handle
);
3313 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
3315 hdev
->stat
.sco_rx
++;
3318 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
3319 hci_dev_unlock(hdev
);
3322 /* Send to upper protocol */
3323 sco_recv_scodata(conn
, skb
);
3326 BT_ERR("%s SCO packet for unknown connection handle %d",
3327 hdev
->name
, handle
);
3333 static bool hci_req_is_complete(struct hci_dev
*hdev
)
3335 struct sk_buff
*skb
;
3337 skb
= skb_peek(&hdev
->cmd_q
);
3341 return bt_cb(skb
)->req
.start
;
3344 static void hci_resend_last(struct hci_dev
*hdev
)
3346 struct hci_command_hdr
*sent
;
3347 struct sk_buff
*skb
;
3350 if (!hdev
->sent_cmd
)
3353 sent
= (void *) hdev
->sent_cmd
->data
;
3354 opcode
= __le16_to_cpu(sent
->opcode
);
3355 if (opcode
== HCI_OP_RESET
)
3358 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
3362 skb_queue_head(&hdev
->cmd_q
, skb
);
3363 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3366 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
3368 hci_req_complete_t req_complete
= NULL
;
3369 struct sk_buff
*skb
;
3370 unsigned long flags
;
3372 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
3374 /* If the completed command doesn't match the last one that was
3375 * sent we need to do special handling of it.
3377 if (!hci_sent_cmd_data(hdev
, opcode
)) {
3378 /* Some CSR based controllers generate a spontaneous
3379 * reset complete event during init and any pending
3380 * command will never be completed. In such a case we
3381 * need to resend whatever was the last sent
3384 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
3385 hci_resend_last(hdev
);
3390 /* If the command succeeded and there's still more commands in
3391 * this request the request is not yet complete.
3393 if (!status
&& !hci_req_is_complete(hdev
))
3396 /* If this was the last command in a request the complete
3397 * callback would be found in hdev->sent_cmd instead of the
3398 * command queue (hdev->cmd_q).
3400 if (hdev
->sent_cmd
) {
3401 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
3406 /* Remove all pending commands belonging to this request */
3407 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3408 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
3409 if (bt_cb(skb
)->req
.start
) {
3410 __skb_queue_head(&hdev
->cmd_q
, skb
);
3414 req_complete
= bt_cb(skb
)->req
.complete
;
3417 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3421 req_complete(hdev
, status
);
3424 static void hci_rx_work(struct work_struct
*work
)
3426 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
3427 struct sk_buff
*skb
;
3429 BT_DBG("%s", hdev
->name
);
3431 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
3432 /* Send copy to monitor */
3433 hci_send_to_monitor(hdev
, skb
);
3435 if (atomic_read(&hdev
->promisc
)) {
3436 /* Send copy to the sockets */
3437 hci_send_to_sock(hdev
, skb
);
3440 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
3445 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
3446 /* Don't process data packets in this states. */
3447 switch (bt_cb(skb
)->pkt_type
) {
3448 case HCI_ACLDATA_PKT
:
3449 case HCI_SCODATA_PKT
:
3456 switch (bt_cb(skb
)->pkt_type
) {
3458 BT_DBG("%s Event packet", hdev
->name
);
3459 hci_event_packet(hdev
, skb
);
3462 case HCI_ACLDATA_PKT
:
3463 BT_DBG("%s ACL data packet", hdev
->name
);
3464 hci_acldata_packet(hdev
, skb
);
3467 case HCI_SCODATA_PKT
:
3468 BT_DBG("%s SCO data packet", hdev
->name
);
3469 hci_scodata_packet(hdev
, skb
);
3479 static void hci_cmd_work(struct work_struct
*work
)
3481 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
3482 struct sk_buff
*skb
;
3484 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
3485 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
3487 /* Send queued commands */
3488 if (atomic_read(&hdev
->cmd_cnt
)) {
3489 skb
= skb_dequeue(&hdev
->cmd_q
);
3493 kfree_skb(hdev
->sent_cmd
);
3495 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
3496 if (hdev
->sent_cmd
) {
3497 atomic_dec(&hdev
->cmd_cnt
);
3498 hci_send_frame(skb
);
3499 if (test_bit(HCI_RESET
, &hdev
->flags
))
3500 del_timer(&hdev
->cmd_timer
);
3502 mod_timer(&hdev
->cmd_timer
,
3503 jiffies
+ HCI_CMD_TIMEOUT
);
3505 skb_queue_head(&hdev
->cmd_q
, skb
);
3506 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3511 u8
bdaddr_to_le(u8 bdaddr_type
)
3513 switch (bdaddr_type
) {
3514 case BDADDR_LE_PUBLIC
:
3515 return ADDR_LE_DEV_PUBLIC
;
3518 /* Fallback to LE Random address type */
3519 return ADDR_LE_DEV_RANDOM
;