2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct
*work
);
37 static void hci_cmd_work(struct work_struct
*work
);
38 static void hci_tx_work(struct work_struct
*work
);
41 LIST_HEAD(hci_dev_list
);
42 DEFINE_RWLOCK(hci_dev_list_lock
);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list
);
46 DEFINE_RWLOCK(hci_cb_list_lock
);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida
);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev
*hdev
, int event
)
55 hci_sock_dev_event(hdev
, event
);
58 /* ---- HCI requests ---- */
60 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev
->name
, cmd
, result
);
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
67 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
) {
68 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
69 u16 opcode
= __le16_to_cpu(sent
->opcode
);
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
79 if (cmd
!= HCI_OP_RESET
|| opcode
== HCI_OP_RESET
)
82 skb
= skb_clone(hdev
->sent_cmd
, GFP_ATOMIC
);
84 skb_queue_head(&hdev
->cmd_q
, skb
);
85 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
91 if (hdev
->req_status
== HCI_REQ_PEND
) {
92 hdev
->req_result
= result
;
93 hdev
->req_status
= HCI_REQ_DONE
;
94 wake_up_interruptible(&hdev
->req_wait_q
);
98 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
100 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
102 if (hdev
->req_status
== HCI_REQ_PEND
) {
103 hdev
->req_result
= err
;
104 hdev
->req_status
= HCI_REQ_CANCELED
;
105 wake_up_interruptible(&hdev
->req_wait_q
);
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev
*hdev
,
111 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
112 unsigned long opt
, __u32 timeout
)
114 DECLARE_WAITQUEUE(wait
, current
);
117 BT_DBG("%s start", hdev
->name
);
119 hdev
->req_status
= HCI_REQ_PEND
;
121 add_wait_queue(&hdev
->req_wait_q
, &wait
);
122 set_current_state(TASK_INTERRUPTIBLE
);
125 schedule_timeout(timeout
);
127 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
129 if (signal_pending(current
))
132 switch (hdev
->req_status
) {
134 err
= -bt_to_errno(hdev
->req_result
);
137 case HCI_REQ_CANCELED
:
138 err
= -hdev
->req_result
;
146 hdev
->req_status
= hdev
->req_result
= 0;
148 BT_DBG("%s end: err %d", hdev
->name
, err
);
153 static int hci_request(struct hci_dev
*hdev
,
154 void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
155 unsigned long opt
, __u32 timeout
)
159 if (!test_bit(HCI_UP
, &hdev
->flags
))
162 /* Serialize all requests */
164 ret
= __hci_request(hdev
, req
, opt
, timeout
);
165 hci_req_unlock(hdev
);
170 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
172 BT_DBG("%s %ld", hdev
->name
, opt
);
175 set_bit(HCI_RESET
, &hdev
->flags
);
176 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
179 static void bredr_init(struct hci_dev
*hdev
)
181 struct hci_cp_delete_stored_link_key cp
;
185 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
187 /* Mandatory initialization */
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
192 /* Read Local Version */
193 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
198 /* Read BD Address */
199 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
201 /* Read Class of Device */
202 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
204 /* Read Local Name */
205 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
207 /* Read Voice Setting */
208 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
210 /* Optional initialization */
212 /* Clear Event Filters */
213 flt_type
= HCI_FLT_CLEAR_ALL
;
214 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
216 /* Connection accept timeout ~20 secs */
217 param
= __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
220 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
222 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
225 static void amp_init(struct hci_dev
*hdev
)
227 hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
229 /* Read Local Version */
230 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
236 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
240 BT_DBG("%s %ld", hdev
->name
, opt
);
242 /* Driver initialization */
244 /* Special commands */
245 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
246 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
247 skb
->dev
= (void *) hdev
;
249 skb_queue_tail(&hdev
->cmd_q
, skb
);
250 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
252 skb_queue_purge(&hdev
->driver_init
);
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
256 hci_reset_req(hdev
, 0);
258 switch (hdev
->dev_type
) {
268 BT_ERR("Unknown device type %d", hdev
->dev_type
);
274 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
276 BT_DBG("%s", hdev
->name
);
278 /* Read LE buffer size */
279 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
282 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
286 BT_DBG("%s %x", hdev
->name
, scan
);
288 /* Inquiry and Page scans */
289 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
292 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
296 BT_DBG("%s %x", hdev
->name
, auth
);
299 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
302 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
306 BT_DBG("%s %x", hdev
->name
, encrypt
);
309 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
312 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
314 __le16 policy
= cpu_to_le16(opt
);
316 BT_DBG("%s %x", hdev
->name
, policy
);
318 /* Default link policy */
319 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
322 /* Get HCI device by index.
323 * Device is held on return. */
324 struct hci_dev
*hci_dev_get(int index
)
326 struct hci_dev
*hdev
= NULL
, *d
;
333 read_lock(&hci_dev_list_lock
);
334 list_for_each_entry(d
, &hci_dev_list
, list
) {
335 if (d
->id
== index
) {
336 hdev
= hci_dev_hold(d
);
340 read_unlock(&hci_dev_list_lock
);
344 /* ---- Inquiry support ---- */
346 bool hci_discovery_active(struct hci_dev
*hdev
)
348 struct discovery_state
*discov
= &hdev
->discovery
;
350 switch (discov
->state
) {
351 case DISCOVERY_FINDING
:
352 case DISCOVERY_RESOLVING
:
360 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
362 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
364 if (hdev
->discovery
.state
== state
)
368 case DISCOVERY_STOPPED
:
369 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
370 mgmt_discovering(hdev
, 0);
372 case DISCOVERY_STARTING
:
374 case DISCOVERY_FINDING
:
375 mgmt_discovering(hdev
, 1);
377 case DISCOVERY_RESOLVING
:
379 case DISCOVERY_STOPPING
:
383 hdev
->discovery
.state
= state
;
386 static void inquiry_cache_flush(struct hci_dev
*hdev
)
388 struct discovery_state
*cache
= &hdev
->discovery
;
389 struct inquiry_entry
*p
, *n
;
391 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
396 INIT_LIST_HEAD(&cache
->unknown
);
397 INIT_LIST_HEAD(&cache
->resolve
);
400 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
403 struct discovery_state
*cache
= &hdev
->discovery
;
404 struct inquiry_entry
*e
;
406 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
408 list_for_each_entry(e
, &cache
->all
, all
) {
409 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
416 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
419 struct discovery_state
*cache
= &hdev
->discovery
;
420 struct inquiry_entry
*e
;
422 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
424 list_for_each_entry(e
, &cache
->unknown
, list
) {
425 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
432 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
436 struct discovery_state
*cache
= &hdev
->discovery
;
437 struct inquiry_entry
*e
;
439 BT_DBG("cache %p bdaddr %s state %d", cache
, batostr(bdaddr
), state
);
441 list_for_each_entry(e
, &cache
->resolve
, list
) {
442 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
444 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
451 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
452 struct inquiry_entry
*ie
)
454 struct discovery_state
*cache
= &hdev
->discovery
;
455 struct list_head
*pos
= &cache
->resolve
;
456 struct inquiry_entry
*p
;
460 list_for_each_entry(p
, &cache
->resolve
, list
) {
461 if (p
->name_state
!= NAME_PENDING
&&
462 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
467 list_add(&ie
->list
, pos
);
470 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
471 bool name_known
, bool *ssp
)
473 struct discovery_state
*cache
= &hdev
->discovery
;
474 struct inquiry_entry
*ie
;
476 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
479 *ssp
= data
->ssp_mode
;
481 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
483 if (ie
->data
.ssp_mode
&& ssp
)
486 if (ie
->name_state
== NAME_NEEDED
&&
487 data
->rssi
!= ie
->data
.rssi
) {
488 ie
->data
.rssi
= data
->rssi
;
489 hci_inquiry_cache_update_resolve(hdev
, ie
);
495 /* Entry not in the cache. Add new one. */
496 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
500 list_add(&ie
->all
, &cache
->all
);
503 ie
->name_state
= NAME_KNOWN
;
505 ie
->name_state
= NAME_NOT_KNOWN
;
506 list_add(&ie
->list
, &cache
->unknown
);
510 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
511 ie
->name_state
!= NAME_PENDING
) {
512 ie
->name_state
= NAME_KNOWN
;
516 memcpy(&ie
->data
, data
, sizeof(*data
));
517 ie
->timestamp
= jiffies
;
518 cache
->timestamp
= jiffies
;
520 if (ie
->name_state
== NAME_NOT_KNOWN
)
526 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
528 struct discovery_state
*cache
= &hdev
->discovery
;
529 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
530 struct inquiry_entry
*e
;
533 list_for_each_entry(e
, &cache
->all
, all
) {
534 struct inquiry_data
*data
= &e
->data
;
539 bacpy(&info
->bdaddr
, &data
->bdaddr
);
540 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
541 info
->pscan_period_mode
= data
->pscan_period_mode
;
542 info
->pscan_mode
= data
->pscan_mode
;
543 memcpy(info
->dev_class
, data
->dev_class
, 3);
544 info
->clock_offset
= data
->clock_offset
;
550 BT_DBG("cache %p, copied %d", cache
, copied
);
554 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
556 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
557 struct hci_cp_inquiry cp
;
559 BT_DBG("%s", hdev
->name
);
561 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
565 memcpy(&cp
.lap
, &ir
->lap
, 3);
566 cp
.length
= ir
->length
;
567 cp
.num_rsp
= ir
->num_rsp
;
568 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
571 int hci_inquiry(void __user
*arg
)
573 __u8 __user
*ptr
= arg
;
574 struct hci_inquiry_req ir
;
575 struct hci_dev
*hdev
;
576 int err
= 0, do_inquiry
= 0, max_rsp
;
580 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
583 hdev
= hci_dev_get(ir
.dev_id
);
588 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
589 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
590 inquiry_cache_flush(hdev
);
593 hci_dev_unlock(hdev
);
595 timeo
= ir
.length
* msecs_to_jiffies(2000);
598 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
603 /* for unlimited number of responses we will use buffer with
606 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
608 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
609 * copy it to the user space.
611 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
618 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
619 hci_dev_unlock(hdev
);
621 BT_DBG("num_rsp %d", ir
.num_rsp
);
623 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
625 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
638 /* ---- HCI ioctl helpers ---- */
640 int hci_dev_open(__u16 dev
)
642 struct hci_dev
*hdev
;
645 hdev
= hci_dev_get(dev
);
649 BT_DBG("%s %p", hdev
->name
, hdev
);
653 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
658 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
663 if (test_bit(HCI_UP
, &hdev
->flags
)) {
668 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
669 set_bit(HCI_RAW
, &hdev
->flags
);
671 /* Treat all non BR/EDR controllers as raw devices if
672 enable_hs is not set */
673 if (hdev
->dev_type
!= HCI_BREDR
&& !enable_hs
)
674 set_bit(HCI_RAW
, &hdev
->flags
);
676 if (hdev
->open(hdev
)) {
681 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
682 atomic_set(&hdev
->cmd_cnt
, 1);
683 set_bit(HCI_INIT
, &hdev
->flags
);
684 hdev
->init_last_cmd
= 0;
686 ret
= __hci_request(hdev
, hci_init_req
, 0, HCI_INIT_TIMEOUT
);
688 if (lmp_host_le_capable(hdev
))
689 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
692 clear_bit(HCI_INIT
, &hdev
->flags
);
697 set_bit(HCI_UP
, &hdev
->flags
);
698 hci_notify(hdev
, HCI_DEV_UP
);
699 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
701 mgmt_powered(hdev
, 1);
702 hci_dev_unlock(hdev
);
705 /* Init failed, cleanup */
706 flush_work(&hdev
->tx_work
);
707 flush_work(&hdev
->cmd_work
);
708 flush_work(&hdev
->rx_work
);
710 skb_queue_purge(&hdev
->cmd_q
);
711 skb_queue_purge(&hdev
->rx_q
);
716 if (hdev
->sent_cmd
) {
717 kfree_skb(hdev
->sent_cmd
);
718 hdev
->sent_cmd
= NULL
;
726 hci_req_unlock(hdev
);
731 static int hci_dev_do_close(struct hci_dev
*hdev
)
733 BT_DBG("%s %p", hdev
->name
, hdev
);
735 cancel_work_sync(&hdev
->le_scan
);
737 cancel_delayed_work(&hdev
->power_off
);
739 hci_req_cancel(hdev
, ENODEV
);
742 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
743 del_timer_sync(&hdev
->cmd_timer
);
744 hci_req_unlock(hdev
);
748 /* Flush RX and TX works */
749 flush_work(&hdev
->tx_work
);
750 flush_work(&hdev
->rx_work
);
752 if (hdev
->discov_timeout
> 0) {
753 cancel_delayed_work(&hdev
->discov_off
);
754 hdev
->discov_timeout
= 0;
755 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
758 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
759 cancel_delayed_work(&hdev
->service_cache
);
761 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
764 inquiry_cache_flush(hdev
);
765 hci_conn_hash_flush(hdev
);
766 hci_dev_unlock(hdev
);
768 hci_notify(hdev
, HCI_DEV_DOWN
);
774 skb_queue_purge(&hdev
->cmd_q
);
775 atomic_set(&hdev
->cmd_cnt
, 1);
776 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
777 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
778 set_bit(HCI_INIT
, &hdev
->flags
);
779 __hci_request(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
780 clear_bit(HCI_INIT
, &hdev
->flags
);
784 flush_work(&hdev
->cmd_work
);
787 skb_queue_purge(&hdev
->rx_q
);
788 skb_queue_purge(&hdev
->cmd_q
);
789 skb_queue_purge(&hdev
->raw_q
);
791 /* Drop last sent command */
792 if (hdev
->sent_cmd
) {
793 del_timer_sync(&hdev
->cmd_timer
);
794 kfree_skb(hdev
->sent_cmd
);
795 hdev
->sent_cmd
= NULL
;
798 /* After this point our queues are empty
799 * and no tasks are scheduled. */
802 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
804 mgmt_powered(hdev
, 0);
805 hci_dev_unlock(hdev
);
811 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
812 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
814 hci_req_unlock(hdev
);
820 int hci_dev_close(__u16 dev
)
822 struct hci_dev
*hdev
;
825 hdev
= hci_dev_get(dev
);
829 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
830 cancel_delayed_work(&hdev
->power_off
);
832 err
= hci_dev_do_close(hdev
);
838 int hci_dev_reset(__u16 dev
)
840 struct hci_dev
*hdev
;
843 hdev
= hci_dev_get(dev
);
849 if (!test_bit(HCI_UP
, &hdev
->flags
))
853 skb_queue_purge(&hdev
->rx_q
);
854 skb_queue_purge(&hdev
->cmd_q
);
857 inquiry_cache_flush(hdev
);
858 hci_conn_hash_flush(hdev
);
859 hci_dev_unlock(hdev
);
864 atomic_set(&hdev
->cmd_cnt
, 1);
865 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
867 if (!test_bit(HCI_RAW
, &hdev
->flags
))
868 ret
= __hci_request(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
871 hci_req_unlock(hdev
);
876 int hci_dev_reset_stat(__u16 dev
)
878 struct hci_dev
*hdev
;
881 hdev
= hci_dev_get(dev
);
885 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
892 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
894 struct hci_dev
*hdev
;
895 struct hci_dev_req dr
;
898 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
901 hdev
= hci_dev_get(dr
.dev_id
);
907 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
912 if (!lmp_encrypt_capable(hdev
)) {
917 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
918 /* Auth must be enabled first */
919 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
925 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
930 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
935 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
940 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
941 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
945 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
949 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
950 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
954 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
955 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
967 int hci_get_dev_list(void __user
*arg
)
969 struct hci_dev
*hdev
;
970 struct hci_dev_list_req
*dl
;
971 struct hci_dev_req
*dr
;
972 int n
= 0, size
, err
;
975 if (get_user(dev_num
, (__u16 __user
*) arg
))
978 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
981 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
983 dl
= kzalloc(size
, GFP_KERNEL
);
989 read_lock(&hci_dev_list_lock
);
990 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
991 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
992 cancel_delayed_work(&hdev
->power_off
);
994 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
995 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
997 (dr
+ n
)->dev_id
= hdev
->id
;
998 (dr
+ n
)->dev_opt
= hdev
->flags
;
1003 read_unlock(&hci_dev_list_lock
);
1006 size
= sizeof(*dl
) + n
* sizeof(*dr
);
1008 err
= copy_to_user(arg
, dl
, size
);
1011 return err
? -EFAULT
: 0;
1014 int hci_get_dev_info(void __user
*arg
)
1016 struct hci_dev
*hdev
;
1017 struct hci_dev_info di
;
1020 if (copy_from_user(&di
, arg
, sizeof(di
)))
1023 hdev
= hci_dev_get(di
.dev_id
);
1027 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1028 cancel_delayed_work_sync(&hdev
->power_off
);
1030 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
1031 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1033 strcpy(di
.name
, hdev
->name
);
1034 di
.bdaddr
= hdev
->bdaddr
;
1035 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
1036 di
.flags
= hdev
->flags
;
1037 di
.pkt_type
= hdev
->pkt_type
;
1038 di
.acl_mtu
= hdev
->acl_mtu
;
1039 di
.acl_pkts
= hdev
->acl_pkts
;
1040 di
.sco_mtu
= hdev
->sco_mtu
;
1041 di
.sco_pkts
= hdev
->sco_pkts
;
1042 di
.link_policy
= hdev
->link_policy
;
1043 di
.link_mode
= hdev
->link_mode
;
1045 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
1046 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
1048 if (copy_to_user(arg
, &di
, sizeof(di
)))
1056 /* ---- Interface to HCI drivers ---- */
1058 static int hci_rfkill_set_block(void *data
, bool blocked
)
1060 struct hci_dev
*hdev
= data
;
1062 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
1067 hci_dev_do_close(hdev
);
1072 static const struct rfkill_ops hci_rfkill_ops
= {
1073 .set_block
= hci_rfkill_set_block
,
1076 static void hci_power_on(struct work_struct
*work
)
1078 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
1080 BT_DBG("%s", hdev
->name
);
1082 if (hci_dev_open(hdev
->id
) < 0)
1085 if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1086 schedule_delayed_work(&hdev
->power_off
, HCI_AUTO_OFF_TIMEOUT
);
1088 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
1089 mgmt_index_added(hdev
);
1092 static void hci_power_off(struct work_struct
*work
)
1094 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1097 BT_DBG("%s", hdev
->name
);
1099 hci_dev_do_close(hdev
);
1102 static void hci_discov_off(struct work_struct
*work
)
1104 struct hci_dev
*hdev
;
1105 u8 scan
= SCAN_PAGE
;
1107 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
1109 BT_DBG("%s", hdev
->name
);
1113 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1115 hdev
->discov_timeout
= 0;
1117 hci_dev_unlock(hdev
);
1120 int hci_uuids_clear(struct hci_dev
*hdev
)
1122 struct list_head
*p
, *n
;
1124 list_for_each_safe(p
, n
, &hdev
->uuids
) {
1125 struct bt_uuid
*uuid
;
1127 uuid
= list_entry(p
, struct bt_uuid
, list
);
1136 int hci_link_keys_clear(struct hci_dev
*hdev
)
1138 struct list_head
*p
, *n
;
1140 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1141 struct link_key
*key
;
1143 key
= list_entry(p
, struct link_key
, list
);
1152 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
1154 struct smp_ltk
*k
, *tmp
;
1156 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1164 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1168 list_for_each_entry(k
, &hdev
->link_keys
, list
)
1169 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1175 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1176 u8 key_type
, u8 old_key_type
)
1179 if (key_type
< 0x03)
1182 /* Debug keys are insecure so don't store them persistently */
1183 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1186 /* Changed combination key and there's no previous one */
1187 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1190 /* Security mode 3 case */
1194 /* Neither local nor remote side had no-bonding as requirement */
1195 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1198 /* Local side had dedicated bonding as requirement */
1199 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1202 /* Remote side had dedicated bonding as requirement */
1203 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1206 /* If none of the above criteria match, then don't store the key
1211 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
1215 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
1216 if (k
->ediv
!= ediv
||
1217 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
1226 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
1231 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
1232 if (addr_type
== k
->bdaddr_type
&&
1233 bacmp(bdaddr
, &k
->bdaddr
) == 0)
1239 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1240 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1242 struct link_key
*key
, *old_key
;
1246 old_key
= hci_find_link_key(hdev
, bdaddr
);
1248 old_key_type
= old_key
->type
;
1251 old_key_type
= conn
? conn
->key_type
: 0xff;
1252 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1255 list_add(&key
->list
, &hdev
->link_keys
);
1258 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1260 /* Some buggy controller combinations generate a changed
1261 * combination key for legacy pairing even when there's no
1263 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1264 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
1265 type
= HCI_LK_COMBINATION
;
1267 conn
->key_type
= type
;
1270 bacpy(&key
->bdaddr
, bdaddr
);
1271 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
1272 key
->pin_len
= pin_len
;
1274 if (type
== HCI_LK_CHANGED_COMBINATION
)
1275 key
->type
= old_key_type
;
1282 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1284 mgmt_new_link_key(hdev
, key
, persistent
);
1287 conn
->flush_key
= !persistent
;
1292 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
1293 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
1296 struct smp_ltk
*key
, *old_key
;
1298 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
1301 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
1305 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1308 list_add(&key
->list
, &hdev
->long_term_keys
);
1311 bacpy(&key
->bdaddr
, bdaddr
);
1312 key
->bdaddr_type
= addr_type
;
1313 memcpy(key
->val
, tk
, sizeof(key
->val
));
1314 key
->authenticated
= authenticated
;
1316 key
->enc_size
= enc_size
;
1318 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
1323 if (type
& HCI_SMP_LTK
)
1324 mgmt_new_ltk(hdev
, key
, 1);
1329 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1331 struct link_key
*key
;
1333 key
= hci_find_link_key(hdev
, bdaddr
);
1337 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1339 list_del(&key
->list
);
1345 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1347 struct smp_ltk
*k
, *tmp
;
1349 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
1350 if (bacmp(bdaddr
, &k
->bdaddr
))
1353 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1362 /* HCI command timer function */
1363 static void hci_cmd_timeout(unsigned long arg
)
1365 struct hci_dev
*hdev
= (void *) arg
;
1367 if (hdev
->sent_cmd
) {
1368 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
1369 u16 opcode
= __le16_to_cpu(sent
->opcode
);
1371 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
1373 BT_ERR("%s command tx timeout", hdev
->name
);
1376 atomic_set(&hdev
->cmd_cnt
, 1);
1377 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
1380 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1383 struct oob_data
*data
;
1385 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1386 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1392 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1394 struct oob_data
*data
;
1396 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1400 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1402 list_del(&data
->list
);
1408 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1410 struct oob_data
*data
, *n
;
1412 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1413 list_del(&data
->list
);
1420 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1423 struct oob_data
*data
;
1425 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1428 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1432 bacpy(&data
->bdaddr
, bdaddr
);
1433 list_add(&data
->list
, &hdev
->remote_oob_data
);
1436 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1437 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1439 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1444 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1446 struct bdaddr_list
*b
;
1448 list_for_each_entry(b
, &hdev
->blacklist
, list
)
1449 if (bacmp(bdaddr
, &b
->bdaddr
) == 0)
1455 int hci_blacklist_clear(struct hci_dev
*hdev
)
1457 struct list_head
*p
, *n
;
1459 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
1460 struct bdaddr_list
*b
;
1462 b
= list_entry(p
, struct bdaddr_list
, list
);
1471 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1473 struct bdaddr_list
*entry
;
1475 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1478 if (hci_blacklist_lookup(hdev
, bdaddr
))
1481 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
1485 bacpy(&entry
->bdaddr
, bdaddr
);
1487 list_add(&entry
->list
, &hdev
->blacklist
);
1489 return mgmt_device_blocked(hdev
, bdaddr
, type
);
1492 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
1494 struct bdaddr_list
*entry
;
1496 if (bacmp(bdaddr
, BDADDR_ANY
) == 0)
1497 return hci_blacklist_clear(hdev
);
1499 entry
= hci_blacklist_lookup(hdev
, bdaddr
);
1503 list_del(&entry
->list
);
1506 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
1509 static void le_scan_param_req(struct hci_dev
*hdev
, unsigned long opt
)
1511 struct le_scan_params
*param
= (struct le_scan_params
*) opt
;
1512 struct hci_cp_le_set_scan_param cp
;
1514 memset(&cp
, 0, sizeof(cp
));
1515 cp
.type
= param
->type
;
1516 cp
.interval
= cpu_to_le16(param
->interval
);
1517 cp
.window
= cpu_to_le16(param
->window
);
1519 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(cp
), &cp
);
1522 static void le_scan_enable_req(struct hci_dev
*hdev
, unsigned long opt
)
1524 struct hci_cp_le_set_scan_enable cp
;
1526 memset(&cp
, 0, sizeof(cp
));
1530 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1533 static int hci_do_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
,
1534 u16 window
, int timeout
)
1536 long timeo
= msecs_to_jiffies(3000);
1537 struct le_scan_params param
;
1540 BT_DBG("%s", hdev
->name
);
1542 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1543 return -EINPROGRESS
;
1546 param
.interval
= interval
;
1547 param
.window
= window
;
1551 err
= __hci_request(hdev
, le_scan_param_req
, (unsigned long) ¶m
,
1554 err
= __hci_request(hdev
, le_scan_enable_req
, 0, timeo
);
1556 hci_req_unlock(hdev
);
1561 schedule_delayed_work(&hdev
->le_scan_disable
,
1562 msecs_to_jiffies(timeout
));
1567 int hci_cancel_le_scan(struct hci_dev
*hdev
)
1569 BT_DBG("%s", hdev
->name
);
1571 if (!test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1574 if (cancel_delayed_work(&hdev
->le_scan_disable
)) {
1575 struct hci_cp_le_set_scan_enable cp
;
1577 /* Send HCI command to disable LE Scan */
1578 memset(&cp
, 0, sizeof(cp
));
1579 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1585 static void le_scan_disable_work(struct work_struct
*work
)
1587 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1588 le_scan_disable
.work
);
1589 struct hci_cp_le_set_scan_enable cp
;
1591 BT_DBG("%s", hdev
->name
);
1593 memset(&cp
, 0, sizeof(cp
));
1595 hci_send_cmd(hdev
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
1598 static void le_scan_work(struct work_struct
*work
)
1600 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, le_scan
);
1601 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1603 BT_DBG("%s", hdev
->name
);
1605 hci_do_le_scan(hdev
, param
->type
, param
->interval
, param
->window
,
1609 int hci_le_scan(struct hci_dev
*hdev
, u8 type
, u16 interval
, u16 window
,
1612 struct le_scan_params
*param
= &hdev
->le_scan_params
;
1614 BT_DBG("%s", hdev
->name
);
1616 if (work_busy(&hdev
->le_scan
))
1617 return -EINPROGRESS
;
1620 param
->interval
= interval
;
1621 param
->window
= window
;
1622 param
->timeout
= timeout
;
1624 queue_work(system_long_wq
, &hdev
->le_scan
);
1629 /* Alloc HCI device */
1630 struct hci_dev
*hci_alloc_dev(void)
1632 struct hci_dev
*hdev
;
1634 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
1638 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1639 hdev
->esco_type
= (ESCO_HV1
);
1640 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1641 hdev
->io_capability
= 0x03; /* No Input No Output */
1643 hdev
->sniff_max_interval
= 800;
1644 hdev
->sniff_min_interval
= 80;
1646 mutex_init(&hdev
->lock
);
1647 mutex_init(&hdev
->req_lock
);
1649 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
1650 INIT_LIST_HEAD(&hdev
->blacklist
);
1651 INIT_LIST_HEAD(&hdev
->uuids
);
1652 INIT_LIST_HEAD(&hdev
->link_keys
);
1653 INIT_LIST_HEAD(&hdev
->long_term_keys
);
1654 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1656 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
1657 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
1658 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
1659 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1660 INIT_WORK(&hdev
->le_scan
, le_scan_work
);
1662 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
1663 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
1664 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
1666 skb_queue_head_init(&hdev
->driver_init
);
1667 skb_queue_head_init(&hdev
->rx_q
);
1668 skb_queue_head_init(&hdev
->cmd_q
);
1669 skb_queue_head_init(&hdev
->raw_q
);
1671 init_waitqueue_head(&hdev
->req_wait_q
);
1673 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
1675 hci_init_sysfs(hdev
);
1676 discovery_init(hdev
);
1677 hci_conn_hash_init(hdev
);
1681 EXPORT_SYMBOL(hci_alloc_dev
);
1683 /* Free HCI device */
1684 void hci_free_dev(struct hci_dev
*hdev
)
1686 skb_queue_purge(&hdev
->driver_init
);
1688 /* will free via device release */
1689 put_device(&hdev
->dev
);
1691 EXPORT_SYMBOL(hci_free_dev
);
1693 /* Register HCI device */
1694 int hci_register_dev(struct hci_dev
*hdev
)
1698 if (!hdev
->open
|| !hdev
->close
)
1701 /* Do not allow HCI_AMP devices to register at index 0,
1702 * so the index can be used as the AMP controller ID.
1704 switch (hdev
->dev_type
) {
1706 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
1709 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
1718 sprintf(hdev
->name
, "hci%d", id
);
1721 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1723 write_lock(&hci_dev_list_lock
);
1724 list_add(&hdev
->list
, &hci_dev_list
);
1725 write_unlock(&hci_dev_list_lock
);
1727 hdev
->workqueue
= alloc_workqueue(hdev
->name
, WQ_HIGHPRI
| WQ_UNBOUND
|
1729 if (!hdev
->workqueue
) {
1734 error
= hci_add_sysfs(hdev
);
1738 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1739 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
1742 if (rfkill_register(hdev
->rfkill
) < 0) {
1743 rfkill_destroy(hdev
->rfkill
);
1744 hdev
->rfkill
= NULL
;
1748 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
1750 if (hdev
->dev_type
!= HCI_AMP
)
1751 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
1753 schedule_work(&hdev
->power_on
);
1755 hci_notify(hdev
, HCI_DEV_REG
);
1761 destroy_workqueue(hdev
->workqueue
);
1763 ida_simple_remove(&hci_index_ida
, hdev
->id
);
1764 write_lock(&hci_dev_list_lock
);
1765 list_del(&hdev
->list
);
1766 write_unlock(&hci_dev_list_lock
);
1770 EXPORT_SYMBOL(hci_register_dev
);
1772 /* Unregister HCI device */
1773 void hci_unregister_dev(struct hci_dev
*hdev
)
1777 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1779 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
1783 write_lock(&hci_dev_list_lock
);
1784 list_del(&hdev
->list
);
1785 write_unlock(&hci_dev_list_lock
);
1787 hci_dev_do_close(hdev
);
1789 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1790 kfree_skb(hdev
->reassembly
[i
]);
1792 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1793 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1795 mgmt_index_removed(hdev
);
1796 hci_dev_unlock(hdev
);
1799 /* mgmt_index_removed should take care of emptying the
1801 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
1803 hci_notify(hdev
, HCI_DEV_UNREG
);
1806 rfkill_unregister(hdev
->rfkill
);
1807 rfkill_destroy(hdev
->rfkill
);
1810 hci_del_sysfs(hdev
);
1812 destroy_workqueue(hdev
->workqueue
);
1815 hci_blacklist_clear(hdev
);
1816 hci_uuids_clear(hdev
);
1817 hci_link_keys_clear(hdev
);
1818 hci_smp_ltks_clear(hdev
);
1819 hci_remote_oob_data_clear(hdev
);
1820 hci_dev_unlock(hdev
);
1824 ida_simple_remove(&hci_index_ida
, id
);
1826 EXPORT_SYMBOL(hci_unregister_dev
);
1828 /* Suspend HCI device */
1829 int hci_suspend_dev(struct hci_dev
*hdev
)
1831 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1834 EXPORT_SYMBOL(hci_suspend_dev
);
1836 /* Resume HCI device */
1837 int hci_resume_dev(struct hci_dev
*hdev
)
1839 hci_notify(hdev
, HCI_DEV_RESUME
);
1842 EXPORT_SYMBOL(hci_resume_dev
);
1844 /* Receive frame from HCI drivers */
1845 int hci_recv_frame(struct sk_buff
*skb
)
1847 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1848 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1849 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1855 bt_cb(skb
)->incoming
= 1;
1858 __net_timestamp(skb
);
1860 skb_queue_tail(&hdev
->rx_q
, skb
);
1861 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
1865 EXPORT_SYMBOL(hci_recv_frame
);
1867 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1868 int count
, __u8 index
)
1873 struct sk_buff
*skb
;
1874 struct bt_skb_cb
*scb
;
1876 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1877 index
>= NUM_REASSEMBLY
)
1880 skb
= hdev
->reassembly
[index
];
1884 case HCI_ACLDATA_PKT
:
1885 len
= HCI_MAX_FRAME_SIZE
;
1886 hlen
= HCI_ACL_HDR_SIZE
;
1889 len
= HCI_MAX_EVENT_SIZE
;
1890 hlen
= HCI_EVENT_HDR_SIZE
;
1892 case HCI_SCODATA_PKT
:
1893 len
= HCI_MAX_SCO_SIZE
;
1894 hlen
= HCI_SCO_HDR_SIZE
;
1898 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1902 scb
= (void *) skb
->cb
;
1904 scb
->pkt_type
= type
;
1906 skb
->dev
= (void *) hdev
;
1907 hdev
->reassembly
[index
] = skb
;
1911 scb
= (void *) skb
->cb
;
1912 len
= min_t(uint
, scb
->expect
, count
);
1914 memcpy(skb_put(skb
, len
), data
, len
);
1923 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1924 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1925 scb
->expect
= h
->plen
;
1927 if (skb_tailroom(skb
) < scb
->expect
) {
1929 hdev
->reassembly
[index
] = NULL
;
1935 case HCI_ACLDATA_PKT
:
1936 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1937 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1938 scb
->expect
= __le16_to_cpu(h
->dlen
);
1940 if (skb_tailroom(skb
) < scb
->expect
) {
1942 hdev
->reassembly
[index
] = NULL
;
1948 case HCI_SCODATA_PKT
:
1949 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1950 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1951 scb
->expect
= h
->dlen
;
1953 if (skb_tailroom(skb
) < scb
->expect
) {
1955 hdev
->reassembly
[index
] = NULL
;
1962 if (scb
->expect
== 0) {
1963 /* Complete frame */
1965 bt_cb(skb
)->pkt_type
= type
;
1966 hci_recv_frame(skb
);
1968 hdev
->reassembly
[index
] = NULL
;
1976 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1980 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1984 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1988 data
+= (count
- rem
);
1994 EXPORT_SYMBOL(hci_recv_fragment
);
1996 #define STREAM_REASSEMBLY 0
1998 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
2004 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
2007 struct { char type
; } *pkt
;
2009 /* Start of the frame */
2016 type
= bt_cb(skb
)->pkt_type
;
2018 rem
= hci_reassembly(hdev
, type
, data
, count
,
2023 data
+= (count
- rem
);
2029 EXPORT_SYMBOL(hci_recv_stream_fragment
);
2031 /* ---- Interface to upper protocols ---- */
2033 int hci_register_cb(struct hci_cb
*cb
)
2035 BT_DBG("%p name %s", cb
, cb
->name
);
2037 write_lock(&hci_cb_list_lock
);
2038 list_add(&cb
->list
, &hci_cb_list
);
2039 write_unlock(&hci_cb_list_lock
);
2043 EXPORT_SYMBOL(hci_register_cb
);
2045 int hci_unregister_cb(struct hci_cb
*cb
)
2047 BT_DBG("%p name %s", cb
, cb
->name
);
2049 write_lock(&hci_cb_list_lock
);
2050 list_del(&cb
->list
);
2051 write_unlock(&hci_cb_list_lock
);
2055 EXPORT_SYMBOL(hci_unregister_cb
);
2057 static int hci_send_frame(struct sk_buff
*skb
)
2059 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
2066 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
2069 __net_timestamp(skb
);
2071 /* Send copy to monitor */
2072 hci_send_to_monitor(hdev
, skb
);
2074 if (atomic_read(&hdev
->promisc
)) {
2075 /* Send copy to the sockets */
2076 hci_send_to_sock(hdev
, skb
);
2079 /* Get rid of skb owner, prior to sending to the driver. */
2082 return hdev
->send(skb
);
2085 /* Send HCI command */
2086 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
2088 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
2089 struct hci_command_hdr
*hdr
;
2090 struct sk_buff
*skb
;
2092 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
2094 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
2096 BT_ERR("%s no memory for command", hdev
->name
);
2100 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
2101 hdr
->opcode
= cpu_to_le16(opcode
);
2105 memcpy(skb_put(skb
, plen
), param
, plen
);
2107 BT_DBG("skb len %d", skb
->len
);
2109 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
2110 skb
->dev
= (void *) hdev
;
2112 if (test_bit(HCI_INIT
, &hdev
->flags
))
2113 hdev
->init_last_cmd
= opcode
;
2115 skb_queue_tail(&hdev
->cmd_q
, skb
);
2116 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2121 /* Get data from the previously sent command */
2122 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
2124 struct hci_command_hdr
*hdr
;
2126 if (!hdev
->sent_cmd
)
2129 hdr
= (void *) hdev
->sent_cmd
->data
;
2131 if (hdr
->opcode
!= cpu_to_le16(opcode
))
2134 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
2136 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
2140 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
2142 struct hci_acl_hdr
*hdr
;
2145 skb_push(skb
, HCI_ACL_HDR_SIZE
);
2146 skb_reset_transport_header(skb
);
2147 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
2148 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
2149 hdr
->dlen
= cpu_to_le16(len
);
2152 static void hci_queue_acl(struct hci_conn
*conn
, struct sk_buff_head
*queue
,
2153 struct sk_buff
*skb
, __u16 flags
)
2155 struct hci_dev
*hdev
= conn
->hdev
;
2156 struct sk_buff
*list
;
2158 skb
->len
= skb_headlen(skb
);
2161 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2162 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2164 list
= skb_shinfo(skb
)->frag_list
;
2166 /* Non fragmented */
2167 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
2169 skb_queue_tail(queue
, skb
);
2172 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2174 skb_shinfo(skb
)->frag_list
= NULL
;
2176 /* Queue all fragments atomically */
2177 spin_lock(&queue
->lock
);
2179 __skb_queue_tail(queue
, skb
);
2181 flags
&= ~ACL_START
;
2184 skb
= list
; list
= list
->next
;
2186 skb
->dev
= (void *) hdev
;
2187 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
2188 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
2190 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
2192 __skb_queue_tail(queue
, skb
);
2195 spin_unlock(&queue
->lock
);
2199 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
2201 struct hci_conn
*conn
= chan
->conn
;
2202 struct hci_dev
*hdev
= conn
->hdev
;
2204 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
2206 skb
->dev
= (void *) hdev
;
2208 hci_queue_acl(conn
, &chan
->data_q
, skb
, flags
);
2210 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2214 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
2216 struct hci_dev
*hdev
= conn
->hdev
;
2217 struct hci_sco_hdr hdr
;
2219 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
2221 hdr
.handle
= cpu_to_le16(conn
->handle
);
2222 hdr
.dlen
= skb
->len
;
2224 skb_push(skb
, HCI_SCO_HDR_SIZE
);
2225 skb_reset_transport_header(skb
);
2226 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
2228 skb
->dev
= (void *) hdev
;
2229 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
2231 skb_queue_tail(&conn
->data_q
, skb
);
2232 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
2235 /* ---- HCI TX task (outgoing data) ---- */
2237 /* HCI Connection scheduler */
2238 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
2241 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2242 struct hci_conn
*conn
= NULL
, *c
;
2243 unsigned int num
= 0, min
= ~0;
2245 /* We don't have to lock device here. Connections are always
2246 * added and removed with TX task disabled. */
2250 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2251 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
2254 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
2259 if (c
->sent
< min
) {
2264 if (hci_conn_num(hdev
, type
) == num
)
2273 switch (conn
->type
) {
2275 cnt
= hdev
->acl_cnt
;
2279 cnt
= hdev
->sco_cnt
;
2282 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2286 BT_ERR("Unknown link type");
2294 BT_DBG("conn %p quote %d", conn
, *quote
);
2298 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
2300 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2303 BT_ERR("%s link tx timeout", hdev
->name
);
2307 /* Kill stalled connections */
2308 list_for_each_entry_rcu(c
, &h
->list
, list
) {
2309 if (c
->type
== type
&& c
->sent
) {
2310 BT_ERR("%s killing stalled connection %s",
2311 hdev
->name
, batostr(&c
->dst
));
2312 hci_acl_disconn(c
, HCI_ERROR_REMOTE_USER_TERM
);
2319 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
2322 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2323 struct hci_chan
*chan
= NULL
;
2324 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
2325 struct hci_conn
*conn
;
2326 int cnt
, q
, conn_num
= 0;
2328 BT_DBG("%s", hdev
->name
);
2332 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2333 struct hci_chan
*tmp
;
2335 if (conn
->type
!= type
)
2338 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2343 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
2344 struct sk_buff
*skb
;
2346 if (skb_queue_empty(&tmp
->data_q
))
2349 skb
= skb_peek(&tmp
->data_q
);
2350 if (skb
->priority
< cur_prio
)
2353 if (skb
->priority
> cur_prio
) {
2356 cur_prio
= skb
->priority
;
2361 if (conn
->sent
< min
) {
2367 if (hci_conn_num(hdev
, type
) == conn_num
)
2376 switch (chan
->conn
->type
) {
2378 cnt
= hdev
->acl_cnt
;
2382 cnt
= hdev
->sco_cnt
;
2385 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
2389 BT_ERR("Unknown link type");
2394 BT_DBG("chan %p quote %d", chan
, *quote
);
2398 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
2400 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
2401 struct hci_conn
*conn
;
2404 BT_DBG("%s", hdev
->name
);
2408 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
2409 struct hci_chan
*chan
;
2411 if (conn
->type
!= type
)
2414 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
2419 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
2420 struct sk_buff
*skb
;
2427 if (skb_queue_empty(&chan
->data_q
))
2430 skb
= skb_peek(&chan
->data_q
);
2431 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
2434 skb
->priority
= HCI_PRIO_MAX
- 1;
2436 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
2440 if (hci_conn_num(hdev
, type
) == num
)
2448 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2450 /* Calculate count of blocks used by this packet */
2451 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
2454 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
2456 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2457 /* ACL tx timeout must be longer than maximum
2458 * link supervision timeout (40.9 seconds) */
2459 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
2460 HCI_ACL_TX_TIMEOUT
))
2461 hci_link_tx_to(hdev
, ACL_LINK
);
2465 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
2467 unsigned int cnt
= hdev
->acl_cnt
;
2468 struct hci_chan
*chan
;
2469 struct sk_buff
*skb
;
2472 __check_timeout(hdev
, cnt
);
2474 while (hdev
->acl_cnt
&&
2475 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2476 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2477 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2478 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2479 skb
->len
, skb
->priority
);
2481 /* Stop if priority has changed */
2482 if (skb
->priority
< priority
)
2485 skb
= skb_dequeue(&chan
->data_q
);
2487 hci_conn_enter_active_mode(chan
->conn
,
2488 bt_cb(skb
)->force_active
);
2490 hci_send_frame(skb
);
2491 hdev
->acl_last_tx
= jiffies
;
2499 if (cnt
!= hdev
->acl_cnt
)
2500 hci_prio_recalculate(hdev
, ACL_LINK
);
2503 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
2505 unsigned int cnt
= hdev
->block_cnt
;
2506 struct hci_chan
*chan
;
2507 struct sk_buff
*skb
;
2510 __check_timeout(hdev
, cnt
);
2512 while (hdev
->block_cnt
> 0 &&
2513 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
2514 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2515 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
2518 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2519 skb
->len
, skb
->priority
);
2521 /* Stop if priority has changed */
2522 if (skb
->priority
< priority
)
2525 skb
= skb_dequeue(&chan
->data_q
);
2527 blocks
= __get_blocks(hdev
, skb
);
2528 if (blocks
> hdev
->block_cnt
)
2531 hci_conn_enter_active_mode(chan
->conn
,
2532 bt_cb(skb
)->force_active
);
2534 hci_send_frame(skb
);
2535 hdev
->acl_last_tx
= jiffies
;
2537 hdev
->block_cnt
-= blocks
;
2540 chan
->sent
+= blocks
;
2541 chan
->conn
->sent
+= blocks
;
2545 if (cnt
!= hdev
->block_cnt
)
2546 hci_prio_recalculate(hdev
, ACL_LINK
);
2549 static void hci_sched_acl(struct hci_dev
*hdev
)
2551 BT_DBG("%s", hdev
->name
);
2553 if (!hci_conn_num(hdev
, ACL_LINK
))
2556 switch (hdev
->flow_ctl_mode
) {
2557 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
2558 hci_sched_acl_pkt(hdev
);
2561 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
2562 hci_sched_acl_blk(hdev
);
2568 static void hci_sched_sco(struct hci_dev
*hdev
)
2570 struct hci_conn
*conn
;
2571 struct sk_buff
*skb
;
2574 BT_DBG("%s", hdev
->name
);
2576 if (!hci_conn_num(hdev
, SCO_LINK
))
2579 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
2580 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2581 BT_DBG("skb %p len %d", skb
, skb
->len
);
2582 hci_send_frame(skb
);
2585 if (conn
->sent
== ~0)
2591 static void hci_sched_esco(struct hci_dev
*hdev
)
2593 struct hci_conn
*conn
;
2594 struct sk_buff
*skb
;
2597 BT_DBG("%s", hdev
->name
);
2599 if (!hci_conn_num(hdev
, ESCO_LINK
))
2602 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
2604 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
2605 BT_DBG("skb %p len %d", skb
, skb
->len
);
2606 hci_send_frame(skb
);
2609 if (conn
->sent
== ~0)
2615 static void hci_sched_le(struct hci_dev
*hdev
)
2617 struct hci_chan
*chan
;
2618 struct sk_buff
*skb
;
2619 int quote
, cnt
, tmp
;
2621 BT_DBG("%s", hdev
->name
);
2623 if (!hci_conn_num(hdev
, LE_LINK
))
2626 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
2627 /* LE tx timeout must be longer than maximum
2628 * link supervision timeout (40.9 seconds) */
2629 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
2630 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
2631 hci_link_tx_to(hdev
, LE_LINK
);
2634 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
2636 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
2637 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
2638 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
2639 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
2640 skb
->len
, skb
->priority
);
2642 /* Stop if priority has changed */
2643 if (skb
->priority
< priority
)
2646 skb
= skb_dequeue(&chan
->data_q
);
2648 hci_send_frame(skb
);
2649 hdev
->le_last_tx
= jiffies
;
2660 hdev
->acl_cnt
= cnt
;
2663 hci_prio_recalculate(hdev
, LE_LINK
);
2666 static void hci_tx_work(struct work_struct
*work
)
2668 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
2669 struct sk_buff
*skb
;
2671 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
2672 hdev
->sco_cnt
, hdev
->le_cnt
);
2674 /* Schedule queues and send stuff to HCI driver */
2676 hci_sched_acl(hdev
);
2678 hci_sched_sco(hdev
);
2680 hci_sched_esco(hdev
);
2684 /* Send next queued raw (unknown type) packet */
2685 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2686 hci_send_frame(skb
);
2689 /* ----- HCI RX task (incoming data processing) ----- */
2691 /* ACL data packet */
2692 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2694 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2695 struct hci_conn
*conn
;
2696 __u16 handle
, flags
;
2698 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2700 handle
= __le16_to_cpu(hdr
->handle
);
2701 flags
= hci_flags(handle
);
2702 handle
= hci_handle(handle
);
2704 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
2707 hdev
->stat
.acl_rx
++;
2710 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2711 hci_dev_unlock(hdev
);
2714 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
2717 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
2718 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &conn
->flags
))
2719 mgmt_device_connected(hdev
, &conn
->dst
, conn
->type
,
2720 conn
->dst_type
, 0, NULL
, 0,
2722 hci_dev_unlock(hdev
);
2724 /* Send to upper protocol */
2725 l2cap_recv_acldata(conn
, skb
, flags
);
2728 BT_ERR("%s ACL packet for unknown connection handle %d",
2729 hdev
->name
, handle
);
2735 /* SCO data packet */
2736 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2738 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2739 struct hci_conn
*conn
;
2742 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2744 handle
= __le16_to_cpu(hdr
->handle
);
2746 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
2748 hdev
->stat
.sco_rx
++;
2751 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2752 hci_dev_unlock(hdev
);
2755 /* Send to upper protocol */
2756 sco_recv_scodata(conn
, skb
);
2759 BT_ERR("%s SCO packet for unknown connection handle %d",
2760 hdev
->name
, handle
);
2766 static void hci_rx_work(struct work_struct
*work
)
2768 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
2769 struct sk_buff
*skb
;
2771 BT_DBG("%s", hdev
->name
);
2773 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2774 /* Send copy to monitor */
2775 hci_send_to_monitor(hdev
, skb
);
2777 if (atomic_read(&hdev
->promisc
)) {
2778 /* Send copy to the sockets */
2779 hci_send_to_sock(hdev
, skb
);
2782 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2787 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2788 /* Don't process data packets in this states. */
2789 switch (bt_cb(skb
)->pkt_type
) {
2790 case HCI_ACLDATA_PKT
:
2791 case HCI_SCODATA_PKT
:
2798 switch (bt_cb(skb
)->pkt_type
) {
2800 BT_DBG("%s Event packet", hdev
->name
);
2801 hci_event_packet(hdev
, skb
);
2804 case HCI_ACLDATA_PKT
:
2805 BT_DBG("%s ACL data packet", hdev
->name
);
2806 hci_acldata_packet(hdev
, skb
);
2809 case HCI_SCODATA_PKT
:
2810 BT_DBG("%s SCO data packet", hdev
->name
);
2811 hci_scodata_packet(hdev
, skb
);
2821 static void hci_cmd_work(struct work_struct
*work
)
2823 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
2824 struct sk_buff
*skb
;
2826 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
2827 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
2829 /* Send queued commands */
2830 if (atomic_read(&hdev
->cmd_cnt
)) {
2831 skb
= skb_dequeue(&hdev
->cmd_q
);
2835 kfree_skb(hdev
->sent_cmd
);
2837 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2838 if (hdev
->sent_cmd
) {
2839 atomic_dec(&hdev
->cmd_cnt
);
2840 hci_send_frame(skb
);
2841 if (test_bit(HCI_RESET
, &hdev
->flags
))
2842 del_timer(&hdev
->cmd_timer
);
2844 mod_timer(&hdev
->cmd_timer
,
2845 jiffies
+ HCI_CMD_TIMEOUT
);
2847 skb_queue_head(&hdev
->cmd_q
, skb
);
2848 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2853 int hci_do_inquiry(struct hci_dev
*hdev
, u8 length
)
2855 /* General inquiry access code (GIAC) */
2856 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2857 struct hci_cp_inquiry cp
;
2859 BT_DBG("%s", hdev
->name
);
2861 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
2862 return -EINPROGRESS
;
2864 inquiry_cache_flush(hdev
);
2866 memset(&cp
, 0, sizeof(cp
));
2867 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2870 return hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2873 int hci_cancel_inquiry(struct hci_dev
*hdev
)
2875 BT_DBG("%s", hdev
->name
);
2877 if (!test_bit(HCI_INQUIRY
, &hdev
->flags
))
2880 return hci_send_cmd(hdev
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
2883 u8
bdaddr_to_le(u8 bdaddr_type
)
2885 switch (bdaddr_type
) {
2886 case BDADDR_LE_PUBLIC
:
2887 return ADDR_LE_DEV_PUBLIC
;
2890 /* Fallback to LE Random address type */
2891 return ADDR_LE_DEV_RANDOM
;