2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
54 #define AUTO_OFF_TIMEOUT 2000
56 static void hci_cmd_task(unsigned long arg
);
57 static void hci_rx_task(unsigned long arg
);
58 static void hci_tx_task(unsigned long arg
);
60 static DEFINE_RWLOCK(hci_task_lock
);
63 LIST_HEAD(hci_dev_list
);
64 DEFINE_RWLOCK(hci_dev_list_lock
);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list
);
68 DEFINE_RWLOCK(hci_cb_list_lock
);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto
*hci_proto
[HCI_MAX_PROTO
];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier
);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block
*nb
)
81 return atomic_notifier_chain_register(&hci_notifier
, nb
);
84 int hci_unregister_notifier(struct notifier_block
*nb
)
86 return atomic_notifier_chain_unregister(&hci_notifier
, nb
);
89 static void hci_notify(struct hci_dev
*hdev
, int event
)
91 atomic_notifier_call_chain(&hci_notifier
, event
, hdev
);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev
*hdev
, __u16 cmd
, int result
)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev
->name
, cmd
, result
);
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
103 if (test_bit(HCI_INIT
, &hdev
->flags
) && hdev
->init_last_cmd
!= cmd
)
106 if (hdev
->req_status
== HCI_REQ_PEND
) {
107 hdev
->req_result
= result
;
108 hdev
->req_status
= HCI_REQ_DONE
;
109 wake_up_interruptible(&hdev
->req_wait_q
);
113 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
115 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
117 if (hdev
->req_status
== HCI_REQ_PEND
) {
118 hdev
->req_result
= err
;
119 hdev
->req_status
= HCI_REQ_CANCELED
;
120 wake_up_interruptible(&hdev
->req_wait_q
);
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
126 unsigned long opt
, __u32 timeout
)
128 DECLARE_WAITQUEUE(wait
, current
);
131 BT_DBG("%s start", hdev
->name
);
133 hdev
->req_status
= HCI_REQ_PEND
;
135 add_wait_queue(&hdev
->req_wait_q
, &wait
);
136 set_current_state(TASK_INTERRUPTIBLE
);
139 schedule_timeout(timeout
);
141 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
143 if (signal_pending(current
))
146 switch (hdev
->req_status
) {
148 err
= -bt_err(hdev
->req_result
);
151 case HCI_REQ_CANCELED
:
152 err
= -hdev
->req_result
;
160 hdev
->req_status
= hdev
->req_result
= 0;
162 BT_DBG("%s end: err %d", hdev
->name
, err
);
167 static inline int hci_request(struct hci_dev
*hdev
, void (*req
)(struct hci_dev
*hdev
, unsigned long opt
),
168 unsigned long opt
, __u32 timeout
)
172 if (!test_bit(HCI_UP
, &hdev
->flags
))
175 /* Serialize all requests */
177 ret
= __hci_request(hdev
, req
, opt
, timeout
);
178 hci_req_unlock(hdev
);
183 static void hci_reset_req(struct hci_dev
*hdev
, unsigned long opt
)
185 BT_DBG("%s %ld", hdev
->name
, opt
);
188 set_bit(HCI_RESET
, &hdev
->flags
);
189 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
192 static void hci_init_req(struct hci_dev
*hdev
, unsigned long opt
)
194 struct hci_cp_delete_stored_link_key cp
;
199 BT_DBG("%s %ld", hdev
->name
, opt
);
201 /* Driver initialization */
203 /* Special commands */
204 while ((skb
= skb_dequeue(&hdev
->driver_init
))) {
205 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
206 skb
->dev
= (void *) hdev
;
208 skb_queue_tail(&hdev
->cmd_q
, skb
);
209 tasklet_schedule(&hdev
->cmd_task
);
211 skb_queue_purge(&hdev
->driver_init
);
213 /* Mandatory initialization */
216 if (!test_bit(HCI_QUIRK_NO_RESET
, &hdev
->quirks
)) {
217 set_bit(HCI_RESET
, &hdev
->flags
);
218 hci_send_cmd(hdev
, HCI_OP_RESET
, 0, NULL
);
221 /* Read Local Supported Features */
222 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
224 /* Read Local Version */
225 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
228 hci_send_cmd(hdev
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
231 /* Host buffer size */
233 struct hci_cp_host_buffer_size cp
;
234 cp
.acl_mtu
= cpu_to_le16(HCI_MAX_ACL_SIZE
);
235 cp
.sco_mtu
= HCI_MAX_SCO_SIZE
;
236 cp
.acl_max_pkt
= cpu_to_le16(0xffff);
237 cp
.sco_max_pkt
= cpu_to_le16(0xffff);
238 hci_send_cmd(hdev
, HCI_OP_HOST_BUFFER_SIZE
, sizeof(cp
), &cp
);
242 /* Read BD Address */
243 hci_send_cmd(hdev
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
245 /* Read Class of Device */
246 hci_send_cmd(hdev
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
248 /* Read Local Name */
249 hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
251 /* Read Voice Setting */
252 hci_send_cmd(hdev
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
254 /* Optional initialization */
256 /* Clear Event Filters */
257 flt_type
= HCI_FLT_CLEAR_ALL
;
258 hci_send_cmd(hdev
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
260 /* Connection accept timeout ~20 secs */
261 param
= cpu_to_le16(0x7d00);
262 hci_send_cmd(hdev
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
264 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
266 hci_send_cmd(hdev
, HCI_OP_DELETE_STORED_LINK_KEY
, sizeof(cp
), &cp
);
269 static void hci_le_init_req(struct hci_dev
*hdev
, unsigned long opt
)
271 BT_DBG("%s", hdev
->name
);
273 /* Read LE buffer size */
274 hci_send_cmd(hdev
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
277 static void hci_scan_req(struct hci_dev
*hdev
, unsigned long opt
)
281 BT_DBG("%s %x", hdev
->name
, scan
);
283 /* Inquiry and Page scans */
284 hci_send_cmd(hdev
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
287 static void hci_auth_req(struct hci_dev
*hdev
, unsigned long opt
)
291 BT_DBG("%s %x", hdev
->name
, auth
);
294 hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
297 static void hci_encrypt_req(struct hci_dev
*hdev
, unsigned long opt
)
301 BT_DBG("%s %x", hdev
->name
, encrypt
);
304 hci_send_cmd(hdev
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
307 static void hci_linkpol_req(struct hci_dev
*hdev
, unsigned long opt
)
309 __le16 policy
= cpu_to_le16(opt
);
311 BT_DBG("%s %x", hdev
->name
, policy
);
313 /* Default link policy */
314 hci_send_cmd(hdev
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
317 /* Get HCI device by index.
318 * Device is held on return. */
319 struct hci_dev
*hci_dev_get(int index
)
321 struct hci_dev
*hdev
= NULL
;
329 read_lock(&hci_dev_list_lock
);
330 list_for_each(p
, &hci_dev_list
) {
331 struct hci_dev
*d
= list_entry(p
, struct hci_dev
, list
);
332 if (d
->id
== index
) {
333 hdev
= hci_dev_hold(d
);
337 read_unlock(&hci_dev_list_lock
);
341 /* ---- Inquiry support ---- */
342 static void inquiry_cache_flush(struct hci_dev
*hdev
)
344 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
345 struct inquiry_entry
*next
= cache
->list
, *e
;
347 BT_DBG("cache %p", cache
);
356 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
358 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
359 struct inquiry_entry
*e
;
361 BT_DBG("cache %p, %s", cache
, batostr(bdaddr
));
363 for (e
= cache
->list
; e
; e
= e
->next
)
364 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
369 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
)
371 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
372 struct inquiry_entry
*ie
;
374 BT_DBG("cache %p, %s", cache
, batostr(&data
->bdaddr
));
376 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
378 /* Entry not in the cache. Add new one. */
379 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
383 ie
->next
= cache
->list
;
387 memcpy(&ie
->data
, data
, sizeof(*data
));
388 ie
->timestamp
= jiffies
;
389 cache
->timestamp
= jiffies
;
392 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
394 struct inquiry_cache
*cache
= &hdev
->inq_cache
;
395 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
396 struct inquiry_entry
*e
;
399 for (e
= cache
->list
; e
&& copied
< num
; e
= e
->next
, copied
++) {
400 struct inquiry_data
*data
= &e
->data
;
401 bacpy(&info
->bdaddr
, &data
->bdaddr
);
402 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
403 info
->pscan_period_mode
= data
->pscan_period_mode
;
404 info
->pscan_mode
= data
->pscan_mode
;
405 memcpy(info
->dev_class
, data
->dev_class
, 3);
406 info
->clock_offset
= data
->clock_offset
;
410 BT_DBG("cache %p, copied %d", cache
, copied
);
414 static void hci_inq_req(struct hci_dev
*hdev
, unsigned long opt
)
416 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
417 struct hci_cp_inquiry cp
;
419 BT_DBG("%s", hdev
->name
);
421 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
425 memcpy(&cp
.lap
, &ir
->lap
, 3);
426 cp
.length
= ir
->length
;
427 cp
.num_rsp
= ir
->num_rsp
;
428 hci_send_cmd(hdev
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
431 int hci_inquiry(void __user
*arg
)
433 __u8 __user
*ptr
= arg
;
434 struct hci_inquiry_req ir
;
435 struct hci_dev
*hdev
;
436 int err
= 0, do_inquiry
= 0, max_rsp
;
440 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
443 hdev
= hci_dev_get(ir
.dev_id
);
447 hci_dev_lock_bh(hdev
);
448 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
449 inquiry_cache_empty(hdev
) ||
450 ir
.flags
& IREQ_CACHE_FLUSH
) {
451 inquiry_cache_flush(hdev
);
454 hci_dev_unlock_bh(hdev
);
456 timeo
= ir
.length
* msecs_to_jiffies(2000);
459 err
= hci_request(hdev
, hci_inq_req
, (unsigned long)&ir
, timeo
);
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
470 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
476 hci_dev_lock_bh(hdev
);
477 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
478 hci_dev_unlock_bh(hdev
);
480 BT_DBG("num_rsp %d", ir
.num_rsp
);
482 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
484 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
497 /* ---- HCI ioctl helpers ---- */
499 int hci_dev_open(__u16 dev
)
501 struct hci_dev
*hdev
;
504 hdev
= hci_dev_get(dev
);
508 BT_DBG("%s %p", hdev
->name
, hdev
);
512 if (test_bit(HCI_UNREGISTER
, &hdev
->flags
)) {
517 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
)) {
522 if (test_bit(HCI_UP
, &hdev
->flags
)) {
527 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
528 set_bit(HCI_RAW
, &hdev
->flags
);
530 /* Treat all non BR/EDR controllers as raw devices for now */
531 if (hdev
->dev_type
!= HCI_BREDR
)
532 set_bit(HCI_RAW
, &hdev
->flags
);
534 if (hdev
->open(hdev
)) {
539 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
540 atomic_set(&hdev
->cmd_cnt
, 1);
541 set_bit(HCI_INIT
, &hdev
->flags
);
542 hdev
->init_last_cmd
= 0;
544 ret
= __hci_request(hdev
, hci_init_req
, 0,
545 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
547 if (lmp_le_capable(hdev
))
548 ret
= __hci_request(hdev
, hci_le_init_req
, 0,
549 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
551 clear_bit(HCI_INIT
, &hdev
->flags
);
556 set_bit(HCI_UP
, &hdev
->flags
);
557 hci_notify(hdev
, HCI_DEV_UP
);
558 if (!test_bit(HCI_SETUP
, &hdev
->flags
))
559 mgmt_powered(hdev
->id
, 1);
561 /* Init failed, cleanup */
562 tasklet_kill(&hdev
->rx_task
);
563 tasklet_kill(&hdev
->tx_task
);
564 tasklet_kill(&hdev
->cmd_task
);
566 skb_queue_purge(&hdev
->cmd_q
);
567 skb_queue_purge(&hdev
->rx_q
);
572 if (hdev
->sent_cmd
) {
573 kfree_skb(hdev
->sent_cmd
);
574 hdev
->sent_cmd
= NULL
;
582 hci_req_unlock(hdev
);
587 static int hci_dev_do_close(struct hci_dev
*hdev
)
589 BT_DBG("%s %p", hdev
->name
, hdev
);
591 hci_req_cancel(hdev
, ENODEV
);
594 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
595 del_timer_sync(&hdev
->cmd_timer
);
596 hci_req_unlock(hdev
);
600 /* Kill RX and TX tasks */
601 tasklet_kill(&hdev
->rx_task
);
602 tasklet_kill(&hdev
->tx_task
);
604 hci_dev_lock_bh(hdev
);
605 inquiry_cache_flush(hdev
);
606 hci_conn_hash_flush(hdev
);
607 hci_dev_unlock_bh(hdev
);
609 hci_notify(hdev
, HCI_DEV_DOWN
);
615 skb_queue_purge(&hdev
->cmd_q
);
616 atomic_set(&hdev
->cmd_cnt
, 1);
617 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
618 set_bit(HCI_INIT
, &hdev
->flags
);
619 __hci_request(hdev
, hci_reset_req
, 0,
620 msecs_to_jiffies(250));
621 clear_bit(HCI_INIT
, &hdev
->flags
);
625 tasklet_kill(&hdev
->cmd_task
);
628 skb_queue_purge(&hdev
->rx_q
);
629 skb_queue_purge(&hdev
->cmd_q
);
630 skb_queue_purge(&hdev
->raw_q
);
632 /* Drop last sent command */
633 if (hdev
->sent_cmd
) {
634 del_timer_sync(&hdev
->cmd_timer
);
635 kfree_skb(hdev
->sent_cmd
);
636 hdev
->sent_cmd
= NULL
;
639 /* After this point our queues are empty
640 * and no tasks are scheduled. */
643 mgmt_powered(hdev
->id
, 0);
648 hci_req_unlock(hdev
);
654 int hci_dev_close(__u16 dev
)
656 struct hci_dev
*hdev
;
659 hdev
= hci_dev_get(dev
);
662 err
= hci_dev_do_close(hdev
);
667 int hci_dev_reset(__u16 dev
)
669 struct hci_dev
*hdev
;
672 hdev
= hci_dev_get(dev
);
677 tasklet_disable(&hdev
->tx_task
);
679 if (!test_bit(HCI_UP
, &hdev
->flags
))
683 skb_queue_purge(&hdev
->rx_q
);
684 skb_queue_purge(&hdev
->cmd_q
);
686 hci_dev_lock_bh(hdev
);
687 inquiry_cache_flush(hdev
);
688 hci_conn_hash_flush(hdev
);
689 hci_dev_unlock_bh(hdev
);
694 atomic_set(&hdev
->cmd_cnt
, 1);
695 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
697 if (!test_bit(HCI_RAW
, &hdev
->flags
))
698 ret
= __hci_request(hdev
, hci_reset_req
, 0,
699 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
702 tasklet_enable(&hdev
->tx_task
);
703 hci_req_unlock(hdev
);
708 int hci_dev_reset_stat(__u16 dev
)
710 struct hci_dev
*hdev
;
713 hdev
= hci_dev_get(dev
);
717 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
724 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
726 struct hci_dev
*hdev
;
727 struct hci_dev_req dr
;
730 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
733 hdev
= hci_dev_get(dr
.dev_id
);
739 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
744 if (!lmp_encrypt_capable(hdev
)) {
749 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
750 /* Auth must be enabled first */
751 err
= hci_request(hdev
, hci_auth_req
, dr
.dev_opt
,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
757 err
= hci_request(hdev
, hci_encrypt_req
, dr
.dev_opt
,
758 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
762 err
= hci_request(hdev
, hci_scan_req
, dr
.dev_opt
,
763 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
767 err
= hci_request(hdev
, hci_linkpol_req
, dr
.dev_opt
,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT
));
772 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
773 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
777 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
781 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
782 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
786 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
787 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
799 int hci_get_dev_list(void __user
*arg
)
801 struct hci_dev_list_req
*dl
;
802 struct hci_dev_req
*dr
;
804 int n
= 0, size
, err
;
807 if (get_user(dev_num
, (__u16 __user
*) arg
))
810 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
813 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
815 dl
= kzalloc(size
, GFP_KERNEL
);
821 read_lock_bh(&hci_dev_list_lock
);
822 list_for_each(p
, &hci_dev_list
) {
823 struct hci_dev
*hdev
;
825 hdev
= list_entry(p
, struct hci_dev
, list
);
827 hci_del_off_timer(hdev
);
829 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
830 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
832 (dr
+ n
)->dev_id
= hdev
->id
;
833 (dr
+ n
)->dev_opt
= hdev
->flags
;
838 read_unlock_bh(&hci_dev_list_lock
);
841 size
= sizeof(*dl
) + n
* sizeof(*dr
);
843 err
= copy_to_user(arg
, dl
, size
);
846 return err
? -EFAULT
: 0;
849 int hci_get_dev_info(void __user
*arg
)
851 struct hci_dev
*hdev
;
852 struct hci_dev_info di
;
855 if (copy_from_user(&di
, arg
, sizeof(di
)))
858 hdev
= hci_dev_get(di
.dev_id
);
862 hci_del_off_timer(hdev
);
864 if (!test_bit(HCI_MGMT
, &hdev
->flags
))
865 set_bit(HCI_PAIRABLE
, &hdev
->flags
);
867 strcpy(di
.name
, hdev
->name
);
868 di
.bdaddr
= hdev
->bdaddr
;
869 di
.type
= (hdev
->bus
& 0x0f) | (hdev
->dev_type
<< 4);
870 di
.flags
= hdev
->flags
;
871 di
.pkt_type
= hdev
->pkt_type
;
872 di
.acl_mtu
= hdev
->acl_mtu
;
873 di
.acl_pkts
= hdev
->acl_pkts
;
874 di
.sco_mtu
= hdev
->sco_mtu
;
875 di
.sco_pkts
= hdev
->sco_pkts
;
876 di
.link_policy
= hdev
->link_policy
;
877 di
.link_mode
= hdev
->link_mode
;
879 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
880 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
882 if (copy_to_user(arg
, &di
, sizeof(di
)))
890 /* ---- Interface to HCI drivers ---- */
892 static int hci_rfkill_set_block(void *data
, bool blocked
)
894 struct hci_dev
*hdev
= data
;
896 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
901 hci_dev_do_close(hdev
);
906 static const struct rfkill_ops hci_rfkill_ops
= {
907 .set_block
= hci_rfkill_set_block
,
910 /* Alloc HCI device */
911 struct hci_dev
*hci_alloc_dev(void)
913 struct hci_dev
*hdev
;
915 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
919 skb_queue_head_init(&hdev
->driver_init
);
923 EXPORT_SYMBOL(hci_alloc_dev
);
925 /* Free HCI device */
926 void hci_free_dev(struct hci_dev
*hdev
)
928 skb_queue_purge(&hdev
->driver_init
);
930 /* will free via device release */
931 put_device(&hdev
->dev
);
933 EXPORT_SYMBOL(hci_free_dev
);
935 static void hci_power_on(struct work_struct
*work
)
937 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
939 BT_DBG("%s", hdev
->name
);
941 if (hci_dev_open(hdev
->id
) < 0)
944 if (test_bit(HCI_AUTO_OFF
, &hdev
->flags
))
945 mod_timer(&hdev
->off_timer
,
946 jiffies
+ msecs_to_jiffies(AUTO_OFF_TIMEOUT
));
948 if (test_and_clear_bit(HCI_SETUP
, &hdev
->flags
))
949 mgmt_index_added(hdev
->id
);
952 static void hci_power_off(struct work_struct
*work
)
954 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_off
);
956 BT_DBG("%s", hdev
->name
);
958 hci_dev_close(hdev
->id
);
961 static void hci_auto_off(unsigned long data
)
963 struct hci_dev
*hdev
= (struct hci_dev
*) data
;
965 BT_DBG("%s", hdev
->name
);
967 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
969 queue_work(hdev
->workqueue
, &hdev
->power_off
);
972 void hci_del_off_timer(struct hci_dev
*hdev
)
974 BT_DBG("%s", hdev
->name
);
976 clear_bit(HCI_AUTO_OFF
, &hdev
->flags
);
977 del_timer(&hdev
->off_timer
);
980 int hci_uuids_clear(struct hci_dev
*hdev
)
982 struct list_head
*p
, *n
;
984 list_for_each_safe(p
, n
, &hdev
->uuids
) {
985 struct bt_uuid
*uuid
;
987 uuid
= list_entry(p
, struct bt_uuid
, list
);
996 int hci_link_keys_clear(struct hci_dev
*hdev
)
998 struct list_head
*p
, *n
;
1000 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
1001 struct link_key
*key
;
1003 key
= list_entry(p
, struct link_key
, list
);
1012 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1014 struct list_head
*p
;
1016 list_for_each(p
, &hdev
->link_keys
) {
1019 k
= list_entry(p
, struct link_key
, list
);
1021 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
1028 static int hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
1029 u8 key_type
, u8 old_key_type
)
1032 if (key_type
< 0x03)
1035 /* Debug keys are insecure so don't store them persistently */
1036 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
1039 /* Changed combination key and there's no previous one */
1040 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
1043 /* Security mode 3 case */
1047 /* Neither local nor remote side had no-bonding as requirement */
1048 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
1051 /* Local side had dedicated bonding as requirement */
1052 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
1055 /* Remote side had dedicated bonding as requirement */
1056 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
1059 /* If none of the above criteria match, then don't store the key
1064 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
1065 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
1067 struct link_key
*key
, *old_key
;
1068 u8 old_key_type
, persistent
;
1070 old_key
= hci_find_link_key(hdev
, bdaddr
);
1072 old_key_type
= old_key
->type
;
1075 old_key_type
= conn
? conn
->key_type
: 0xff;
1076 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
1079 list_add(&key
->list
, &hdev
->link_keys
);
1082 BT_DBG("%s key for %s type %u", hdev
->name
, batostr(bdaddr
), type
);
1084 /* Some buggy controller combinations generate a changed
1085 * combination key for legacy pairing even when there's no
1087 if (type
== HCI_LK_CHANGED_COMBINATION
&&
1088 (!conn
|| conn
->remote_auth
== 0xff) &&
1089 old_key_type
== 0xff) {
1090 type
= HCI_LK_COMBINATION
;
1092 conn
->key_type
= type
;
1095 bacpy(&key
->bdaddr
, bdaddr
);
1096 memcpy(key
->val
, val
, 16);
1097 key
->pin_len
= pin_len
;
1099 if (type
== HCI_LK_CHANGED_COMBINATION
)
1100 key
->type
= old_key_type
;
1107 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
1109 mgmt_new_key(hdev
->id
, key
, persistent
);
1112 list_del(&key
->list
);
1119 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1121 struct link_key
*key
;
1123 key
= hci_find_link_key(hdev
, bdaddr
);
1127 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1129 list_del(&key
->list
);
1135 /* HCI command timer function */
1136 static void hci_cmd_timer(unsigned long arg
)
1138 struct hci_dev
*hdev
= (void *) arg
;
1140 BT_ERR("%s command tx timeout", hdev
->name
);
1141 atomic_set(&hdev
->cmd_cnt
, 1);
1142 clear_bit(HCI_RESET
, &hdev
->flags
);
1143 tasklet_schedule(&hdev
->cmd_task
);
1146 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
1149 struct oob_data
*data
;
1151 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
1152 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
1158 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
1160 struct oob_data
*data
;
1162 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1166 BT_DBG("%s removing %s", hdev
->name
, batostr(bdaddr
));
1168 list_del(&data
->list
);
1174 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
1176 struct oob_data
*data
, *n
;
1178 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
1179 list_del(&data
->list
);
1186 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
1189 struct oob_data
*data
;
1191 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
1194 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
1198 bacpy(&data
->bdaddr
, bdaddr
);
1199 list_add(&data
->list
, &hdev
->remote_oob_data
);
1202 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
1203 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
1205 BT_DBG("%s for %s", hdev
->name
, batostr(bdaddr
));
1210 /* Register HCI device */
1211 int hci_register_dev(struct hci_dev
*hdev
)
1213 struct list_head
*head
= &hci_dev_list
, *p
;
1216 BT_DBG("%p name %s bus %d owner %p", hdev
, hdev
->name
,
1217 hdev
->bus
, hdev
->owner
);
1219 if (!hdev
->open
|| !hdev
->close
|| !hdev
->destruct
)
1222 write_lock_bh(&hci_dev_list_lock
);
1224 /* Find first available device id */
1225 list_for_each(p
, &hci_dev_list
) {
1226 if (list_entry(p
, struct hci_dev
, list
)->id
!= id
)
1231 sprintf(hdev
->name
, "hci%d", id
);
1233 list_add(&hdev
->list
, head
);
1235 atomic_set(&hdev
->refcnt
, 1);
1236 spin_lock_init(&hdev
->lock
);
1239 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
1240 hdev
->esco_type
= (ESCO_HV1
);
1241 hdev
->link_mode
= (HCI_LM_ACCEPT
);
1242 hdev
->io_capability
= 0x03; /* No Input No Output */
1244 hdev
->idle_timeout
= 0;
1245 hdev
->sniff_max_interval
= 800;
1246 hdev
->sniff_min_interval
= 80;
1248 tasklet_init(&hdev
->cmd_task
, hci_cmd_task
, (unsigned long) hdev
);
1249 tasklet_init(&hdev
->rx_task
, hci_rx_task
, (unsigned long) hdev
);
1250 tasklet_init(&hdev
->tx_task
, hci_tx_task
, (unsigned long) hdev
);
1252 skb_queue_head_init(&hdev
->rx_q
);
1253 skb_queue_head_init(&hdev
->cmd_q
);
1254 skb_queue_head_init(&hdev
->raw_q
);
1256 setup_timer(&hdev
->cmd_timer
, hci_cmd_timer
, (unsigned long) hdev
);
1258 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1259 hdev
->reassembly
[i
] = NULL
;
1261 init_waitqueue_head(&hdev
->req_wait_q
);
1262 mutex_init(&hdev
->req_lock
);
1264 inquiry_cache_init(hdev
);
1266 hci_conn_hash_init(hdev
);
1268 INIT_LIST_HEAD(&hdev
->blacklist
);
1270 INIT_LIST_HEAD(&hdev
->uuids
);
1272 INIT_LIST_HEAD(&hdev
->link_keys
);
1274 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
1276 INIT_WORK(&hdev
->power_on
, hci_power_on
);
1277 INIT_WORK(&hdev
->power_off
, hci_power_off
);
1278 setup_timer(&hdev
->off_timer
, hci_auto_off
, (unsigned long) hdev
);
1280 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
1282 atomic_set(&hdev
->promisc
, 0);
1284 write_unlock_bh(&hci_dev_list_lock
);
1286 hdev
->workqueue
= create_singlethread_workqueue(hdev
->name
);
1287 if (!hdev
->workqueue
)
1290 hci_register_sysfs(hdev
);
1292 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
1293 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
, hdev
);
1295 if (rfkill_register(hdev
->rfkill
) < 0) {
1296 rfkill_destroy(hdev
->rfkill
);
1297 hdev
->rfkill
= NULL
;
1301 set_bit(HCI_AUTO_OFF
, &hdev
->flags
);
1302 set_bit(HCI_SETUP
, &hdev
->flags
);
1303 queue_work(hdev
->workqueue
, &hdev
->power_on
);
1305 hci_notify(hdev
, HCI_DEV_REG
);
1310 write_lock_bh(&hci_dev_list_lock
);
1311 list_del(&hdev
->list
);
1312 write_unlock_bh(&hci_dev_list_lock
);
1316 EXPORT_SYMBOL(hci_register_dev
);
1318 /* Unregister HCI device */
1319 int hci_unregister_dev(struct hci_dev
*hdev
)
1323 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
1325 set_bit(HCI_UNREGISTER
, &hdev
->flags
);
1327 write_lock_bh(&hci_dev_list_lock
);
1328 list_del(&hdev
->list
);
1329 write_unlock_bh(&hci_dev_list_lock
);
1331 hci_dev_do_close(hdev
);
1333 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
1334 kfree_skb(hdev
->reassembly
[i
]);
1336 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
1337 !test_bit(HCI_SETUP
, &hdev
->flags
))
1338 mgmt_index_removed(hdev
->id
);
1340 hci_notify(hdev
, HCI_DEV_UNREG
);
1343 rfkill_unregister(hdev
->rfkill
);
1344 rfkill_destroy(hdev
->rfkill
);
1347 hci_unregister_sysfs(hdev
);
1349 hci_del_off_timer(hdev
);
1351 destroy_workqueue(hdev
->workqueue
);
1353 hci_dev_lock_bh(hdev
);
1354 hci_blacklist_clear(hdev
);
1355 hci_uuids_clear(hdev
);
1356 hci_link_keys_clear(hdev
);
1357 hci_remote_oob_data_clear(hdev
);
1358 hci_dev_unlock_bh(hdev
);
1360 __hci_dev_put(hdev
);
1364 EXPORT_SYMBOL(hci_unregister_dev
);
1366 /* Suspend HCI device */
1367 int hci_suspend_dev(struct hci_dev
*hdev
)
1369 hci_notify(hdev
, HCI_DEV_SUSPEND
);
1372 EXPORT_SYMBOL(hci_suspend_dev
);
1374 /* Resume HCI device */
1375 int hci_resume_dev(struct hci_dev
*hdev
)
1377 hci_notify(hdev
, HCI_DEV_RESUME
);
1380 EXPORT_SYMBOL(hci_resume_dev
);
1382 /* Receive frame from HCI drivers */
1383 int hci_recv_frame(struct sk_buff
*skb
)
1385 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1386 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
1387 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
1393 bt_cb(skb
)->incoming
= 1;
1396 __net_timestamp(skb
);
1398 /* Queue frame for rx task */
1399 skb_queue_tail(&hdev
->rx_q
, skb
);
1400 tasklet_schedule(&hdev
->rx_task
);
1404 EXPORT_SYMBOL(hci_recv_frame
);
1406 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
1407 int count
, __u8 index
)
1412 struct sk_buff
*skb
;
1413 struct bt_skb_cb
*scb
;
1415 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
1416 index
>= NUM_REASSEMBLY
)
1419 skb
= hdev
->reassembly
[index
];
1423 case HCI_ACLDATA_PKT
:
1424 len
= HCI_MAX_FRAME_SIZE
;
1425 hlen
= HCI_ACL_HDR_SIZE
;
1428 len
= HCI_MAX_EVENT_SIZE
;
1429 hlen
= HCI_EVENT_HDR_SIZE
;
1431 case HCI_SCODATA_PKT
:
1432 len
= HCI_MAX_SCO_SIZE
;
1433 hlen
= HCI_SCO_HDR_SIZE
;
1437 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1441 scb
= (void *) skb
->cb
;
1443 scb
->pkt_type
= type
;
1445 skb
->dev
= (void *) hdev
;
1446 hdev
->reassembly
[index
] = skb
;
1450 scb
= (void *) skb
->cb
;
1451 len
= min(scb
->expect
, (__u16
)count
);
1453 memcpy(skb_put(skb
, len
), data
, len
);
1462 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
1463 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
1464 scb
->expect
= h
->plen
;
1466 if (skb_tailroom(skb
) < scb
->expect
) {
1468 hdev
->reassembly
[index
] = NULL
;
1474 case HCI_ACLDATA_PKT
:
1475 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
1476 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
1477 scb
->expect
= __le16_to_cpu(h
->dlen
);
1479 if (skb_tailroom(skb
) < scb
->expect
) {
1481 hdev
->reassembly
[index
] = NULL
;
1487 case HCI_SCODATA_PKT
:
1488 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
1489 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
1490 scb
->expect
= h
->dlen
;
1492 if (skb_tailroom(skb
) < scb
->expect
) {
1494 hdev
->reassembly
[index
] = NULL
;
1501 if (scb
->expect
== 0) {
1502 /* Complete frame */
1504 bt_cb(skb
)->pkt_type
= type
;
1505 hci_recv_frame(skb
);
1507 hdev
->reassembly
[index
] = NULL
;
1515 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
1519 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
1523 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
1527 data
+= (count
- rem
);
1533 EXPORT_SYMBOL(hci_recv_fragment
);
1535 #define STREAM_REASSEMBLY 0
1537 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
1543 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
1546 struct { char type
; } *pkt
;
1548 /* Start of the frame */
1555 type
= bt_cb(skb
)->pkt_type
;
1557 rem
= hci_reassembly(hdev
, type
, data
, count
,
1562 data
+= (count
- rem
);
1568 EXPORT_SYMBOL(hci_recv_stream_fragment
);
1570 /* ---- Interface to upper protocols ---- */
1572 /* Register/Unregister protocols.
1573 * hci_task_lock is used to ensure that no tasks are running. */
1574 int hci_register_proto(struct hci_proto
*hp
)
1578 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1580 if (hp
->id
>= HCI_MAX_PROTO
)
1583 write_lock_bh(&hci_task_lock
);
1585 if (!hci_proto
[hp
->id
])
1586 hci_proto
[hp
->id
] = hp
;
1590 write_unlock_bh(&hci_task_lock
);
1594 EXPORT_SYMBOL(hci_register_proto
);
1596 int hci_unregister_proto(struct hci_proto
*hp
)
1600 BT_DBG("%p name %s id %d", hp
, hp
->name
, hp
->id
);
1602 if (hp
->id
>= HCI_MAX_PROTO
)
1605 write_lock_bh(&hci_task_lock
);
1607 if (hci_proto
[hp
->id
])
1608 hci_proto
[hp
->id
] = NULL
;
1612 write_unlock_bh(&hci_task_lock
);
1616 EXPORT_SYMBOL(hci_unregister_proto
);
1618 int hci_register_cb(struct hci_cb
*cb
)
1620 BT_DBG("%p name %s", cb
, cb
->name
);
1622 write_lock_bh(&hci_cb_list_lock
);
1623 list_add(&cb
->list
, &hci_cb_list
);
1624 write_unlock_bh(&hci_cb_list_lock
);
1628 EXPORT_SYMBOL(hci_register_cb
);
1630 int hci_unregister_cb(struct hci_cb
*cb
)
1632 BT_DBG("%p name %s", cb
, cb
->name
);
1634 write_lock_bh(&hci_cb_list_lock
);
1635 list_del(&cb
->list
);
1636 write_unlock_bh(&hci_cb_list_lock
);
1640 EXPORT_SYMBOL(hci_unregister_cb
);
1642 static int hci_send_frame(struct sk_buff
*skb
)
1644 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
1651 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
1653 if (atomic_read(&hdev
->promisc
)) {
1655 __net_timestamp(skb
);
1657 hci_send_to_sock(hdev
, skb
, NULL
);
1660 /* Get rid of skb owner, prior to sending to the driver. */
1663 return hdev
->send(skb
);
1666 /* Send HCI command */
1667 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
)
1669 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
1670 struct hci_command_hdr
*hdr
;
1671 struct sk_buff
*skb
;
1673 BT_DBG("%s opcode 0x%x plen %d", hdev
->name
, opcode
, plen
);
1675 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
1677 BT_ERR("%s no memory for command", hdev
->name
);
1681 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
1682 hdr
->opcode
= cpu_to_le16(opcode
);
1686 memcpy(skb_put(skb
, plen
), param
, plen
);
1688 BT_DBG("skb len %d", skb
->len
);
1690 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
1691 skb
->dev
= (void *) hdev
;
1693 if (test_bit(HCI_INIT
, &hdev
->flags
))
1694 hdev
->init_last_cmd
= opcode
;
1696 skb_queue_tail(&hdev
->cmd_q
, skb
);
1697 tasklet_schedule(&hdev
->cmd_task
);
1702 /* Get data from the previously sent command */
1703 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
1705 struct hci_command_hdr
*hdr
;
1707 if (!hdev
->sent_cmd
)
1710 hdr
= (void *) hdev
->sent_cmd
->data
;
1712 if (hdr
->opcode
!= cpu_to_le16(opcode
))
1715 BT_DBG("%s opcode 0x%x", hdev
->name
, opcode
);
1717 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
1721 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
1723 struct hci_acl_hdr
*hdr
;
1726 skb_push(skb
, HCI_ACL_HDR_SIZE
);
1727 skb_reset_transport_header(skb
);
1728 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
1729 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
1730 hdr
->dlen
= cpu_to_le16(len
);
1733 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
)
1735 struct hci_dev
*hdev
= conn
->hdev
;
1736 struct sk_buff
*list
;
1738 BT_DBG("%s conn %p flags 0x%x", hdev
->name
, conn
, flags
);
1740 skb
->dev
= (void *) hdev
;
1741 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1742 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1744 list
= skb_shinfo(skb
)->frag_list
;
1746 /* Non fragmented */
1747 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
1749 skb_queue_tail(&conn
->data_q
, skb
);
1752 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1754 skb_shinfo(skb
)->frag_list
= NULL
;
1756 /* Queue all fragments atomically */
1757 spin_lock_bh(&conn
->data_q
.lock
);
1759 __skb_queue_tail(&conn
->data_q
, skb
);
1761 flags
&= ~ACL_START
;
1764 skb
= list
; list
= list
->next
;
1766 skb
->dev
= (void *) hdev
;
1767 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
1768 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
1770 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
1772 __skb_queue_tail(&conn
->data_q
, skb
);
1775 spin_unlock_bh(&conn
->data_q
.lock
);
1778 tasklet_schedule(&hdev
->tx_task
);
1780 EXPORT_SYMBOL(hci_send_acl
);
1783 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
1785 struct hci_dev
*hdev
= conn
->hdev
;
1786 struct hci_sco_hdr hdr
;
1788 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
1790 hdr
.handle
= cpu_to_le16(conn
->handle
);
1791 hdr
.dlen
= skb
->len
;
1793 skb_push(skb
, HCI_SCO_HDR_SIZE
);
1794 skb_reset_transport_header(skb
);
1795 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
1797 skb
->dev
= (void *) hdev
;
1798 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
1800 skb_queue_tail(&conn
->data_q
, skb
);
1801 tasklet_schedule(&hdev
->tx_task
);
1803 EXPORT_SYMBOL(hci_send_sco
);
1805 /* ---- HCI TX task (outgoing data) ---- */
1807 /* HCI Connection scheduler */
1808 static inline struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
, int *quote
)
1810 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1811 struct hci_conn
*conn
= NULL
;
1812 int num
= 0, min
= ~0;
1813 struct list_head
*p
;
1815 /* We don't have to lock device here. Connections are always
1816 * added and removed with TX task disabled. */
1817 list_for_each(p
, &h
->list
) {
1819 c
= list_entry(p
, struct hci_conn
, list
);
1821 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
1824 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
1829 if (c
->sent
< min
) {
1838 switch (conn
->type
) {
1840 cnt
= hdev
->acl_cnt
;
1844 cnt
= hdev
->sco_cnt
;
1847 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
1851 BT_ERR("Unknown link type");
1859 BT_DBG("conn %p quote %d", conn
, *quote
);
1863 static inline void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
1865 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
1866 struct list_head
*p
;
1869 BT_ERR("%s link tx timeout", hdev
->name
);
1871 /* Kill stalled connections */
1872 list_for_each(p
, &h
->list
) {
1873 c
= list_entry(p
, struct hci_conn
, list
);
1874 if (c
->type
== type
&& c
->sent
) {
1875 BT_ERR("%s killing stalled connection %s",
1876 hdev
->name
, batostr(&c
->dst
));
1877 hci_acl_disconn(c
, 0x13);
1882 static inline void hci_sched_acl(struct hci_dev
*hdev
)
1884 struct hci_conn
*conn
;
1885 struct sk_buff
*skb
;
1888 BT_DBG("%s", hdev
->name
);
1890 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1891 /* ACL tx timeout must be longer than maximum
1892 * link supervision timeout (40.9 seconds) */
1893 if (!hdev
->acl_cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+ HZ
* 45))
1894 hci_link_tx_to(hdev
, ACL_LINK
);
1897 while (hdev
->acl_cnt
&& (conn
= hci_low_sent(hdev
, ACL_LINK
, "e
))) {
1898 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1899 BT_DBG("skb %p len %d", skb
, skb
->len
);
1901 hci_conn_enter_active_mode(conn
);
1903 hci_send_frame(skb
);
1904 hdev
->acl_last_tx
= jiffies
;
1913 static inline void hci_sched_sco(struct hci_dev
*hdev
)
1915 struct hci_conn
*conn
;
1916 struct sk_buff
*skb
;
1919 BT_DBG("%s", hdev
->name
);
1921 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
1922 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1923 BT_DBG("skb %p len %d", skb
, skb
->len
);
1924 hci_send_frame(skb
);
1927 if (conn
->sent
== ~0)
1933 static inline void hci_sched_esco(struct hci_dev
*hdev
)
1935 struct hci_conn
*conn
;
1936 struct sk_buff
*skb
;
1939 BT_DBG("%s", hdev
->name
);
1941 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
, "e
))) {
1942 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1943 BT_DBG("skb %p len %d", skb
, skb
->len
);
1944 hci_send_frame(skb
);
1947 if (conn
->sent
== ~0)
1953 static inline void hci_sched_le(struct hci_dev
*hdev
)
1955 struct hci_conn
*conn
;
1956 struct sk_buff
*skb
;
1959 BT_DBG("%s", hdev
->name
);
1961 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
1962 /* LE tx timeout must be longer than maximum
1963 * link supervision timeout (40.9 seconds) */
1964 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
1965 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
1966 hci_link_tx_to(hdev
, LE_LINK
);
1969 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
1970 while (cnt
&& (conn
= hci_low_sent(hdev
, LE_LINK
, "e
))) {
1971 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
1972 BT_DBG("skb %p len %d", skb
, skb
->len
);
1974 hci_send_frame(skb
);
1975 hdev
->le_last_tx
= jiffies
;
1984 hdev
->acl_cnt
= cnt
;
1987 static void hci_tx_task(unsigned long arg
)
1989 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
1990 struct sk_buff
*skb
;
1992 read_lock(&hci_task_lock
);
1994 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
1995 hdev
->sco_cnt
, hdev
->le_cnt
);
1997 /* Schedule queues and send stuff to HCI driver */
1999 hci_sched_acl(hdev
);
2001 hci_sched_sco(hdev
);
2003 hci_sched_esco(hdev
);
2007 /* Send next queued raw (unknown type) packet */
2008 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
2009 hci_send_frame(skb
);
2011 read_unlock(&hci_task_lock
);
2014 /* ----- HCI RX task (incoming data processing) ----- */
2016 /* ACL data packet */
2017 static inline void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2019 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
2020 struct hci_conn
*conn
;
2021 __u16 handle
, flags
;
2023 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
2025 handle
= __le16_to_cpu(hdr
->handle
);
2026 flags
= hci_flags(handle
);
2027 handle
= hci_handle(handle
);
2029 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev
->name
, skb
->len
, handle
, flags
);
2031 hdev
->stat
.acl_rx
++;
2034 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2035 hci_dev_unlock(hdev
);
2038 register struct hci_proto
*hp
;
2040 hci_conn_enter_active_mode(conn
);
2042 /* Send to upper protocol */
2043 hp
= hci_proto
[HCI_PROTO_L2CAP
];
2044 if (hp
&& hp
->recv_acldata
) {
2045 hp
->recv_acldata(conn
, skb
, flags
);
2049 BT_ERR("%s ACL packet for unknown connection handle %d",
2050 hdev
->name
, handle
);
2056 /* SCO data packet */
2057 static inline void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
2059 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
2060 struct hci_conn
*conn
;
2063 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
2065 handle
= __le16_to_cpu(hdr
->handle
);
2067 BT_DBG("%s len %d handle 0x%x", hdev
->name
, skb
->len
, handle
);
2069 hdev
->stat
.sco_rx
++;
2072 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
2073 hci_dev_unlock(hdev
);
2076 register struct hci_proto
*hp
;
2078 /* Send to upper protocol */
2079 hp
= hci_proto
[HCI_PROTO_SCO
];
2080 if (hp
&& hp
->recv_scodata
) {
2081 hp
->recv_scodata(conn
, skb
);
2085 BT_ERR("%s SCO packet for unknown connection handle %d",
2086 hdev
->name
, handle
);
2092 static void hci_rx_task(unsigned long arg
)
2094 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2095 struct sk_buff
*skb
;
2097 BT_DBG("%s", hdev
->name
);
2099 read_lock(&hci_task_lock
);
2101 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
2102 if (atomic_read(&hdev
->promisc
)) {
2103 /* Send copy to the sockets */
2104 hci_send_to_sock(hdev
, skb
, NULL
);
2107 if (test_bit(HCI_RAW
, &hdev
->flags
)) {
2112 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
2113 /* Don't process data packets in this states. */
2114 switch (bt_cb(skb
)->pkt_type
) {
2115 case HCI_ACLDATA_PKT
:
2116 case HCI_SCODATA_PKT
:
2123 switch (bt_cb(skb
)->pkt_type
) {
2125 hci_event_packet(hdev
, skb
);
2128 case HCI_ACLDATA_PKT
:
2129 BT_DBG("%s ACL data packet", hdev
->name
);
2130 hci_acldata_packet(hdev
, skb
);
2133 case HCI_SCODATA_PKT
:
2134 BT_DBG("%s SCO data packet", hdev
->name
);
2135 hci_scodata_packet(hdev
, skb
);
2144 read_unlock(&hci_task_lock
);
2147 static void hci_cmd_task(unsigned long arg
)
2149 struct hci_dev
*hdev
= (struct hci_dev
*) arg
;
2150 struct sk_buff
*skb
;
2152 BT_DBG("%s cmd %d", hdev
->name
, atomic_read(&hdev
->cmd_cnt
));
2154 /* Send queued commands */
2155 if (atomic_read(&hdev
->cmd_cnt
)) {
2156 skb
= skb_dequeue(&hdev
->cmd_q
);
2160 kfree_skb(hdev
->sent_cmd
);
2162 hdev
->sent_cmd
= skb_clone(skb
, GFP_ATOMIC
);
2163 if (hdev
->sent_cmd
) {
2164 atomic_dec(&hdev
->cmd_cnt
);
2165 hci_send_frame(skb
);
2166 mod_timer(&hdev
->cmd_timer
,
2167 jiffies
+ msecs_to_jiffies(HCI_CMD_TIMEOUT
));
2169 skb_queue_head(&hdev
->cmd_q
, skb
);
2170 tasklet_schedule(&hdev
->cmd_task
);