2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct
*work
);
38 static void hci_cmd_work(struct work_struct
*work
);
39 static void hci_tx_work(struct work_struct
*work
);
42 LIST_HEAD(hci_dev_list
);
43 DEFINE_RWLOCK(hci_dev_list_lock
);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list
);
47 DEFINE_RWLOCK(hci_cb_list_lock
);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida
);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev
*hdev
, int event
)
56 hci_sock_dev_event(hdev
, event
);
59 /* ---- HCI debugfs entries ---- */
61 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
62 size_t count
, loff_t
*ppos
)
64 struct hci_dev
*hdev
= file
->private_data
;
67 buf
[0] = test_bit(HCI_DUT_MODE
, &hdev
->dev_flags
) ? 'Y': 'N';
70 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
73 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
74 size_t count
, loff_t
*ppos
)
76 struct hci_dev
*hdev
= file
->private_data
;
79 size_t buf_size
= min(count
, (sizeof(buf
)-1));
83 if (!test_bit(HCI_UP
, &hdev
->flags
))
86 if (copy_from_user(buf
, user_buf
, buf_size
))
90 if (strtobool(buf
, &enable
))
93 if (enable
== test_bit(HCI_DUT_MODE
, &hdev
->dev_flags
))
98 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
101 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
103 hci_req_unlock(hdev
);
108 err
= -bt_to_errno(skb
->data
[0]);
114 change_bit(HCI_DUT_MODE
, &hdev
->dev_flags
);
119 static const struct file_operations dut_mode_fops
= {
121 .read
= dut_mode_read
,
122 .write
= dut_mode_write
,
123 .llseek
= default_llseek
,
126 static int features_show(struct seq_file
*f
, void *ptr
)
128 struct hci_dev
*hdev
= f
->private;
132 for (p
= 0; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
133 seq_printf(f
, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p
,
135 hdev
->features
[p
][0], hdev
->features
[p
][1],
136 hdev
->features
[p
][2], hdev
->features
[p
][3],
137 hdev
->features
[p
][4], hdev
->features
[p
][5],
138 hdev
->features
[p
][6], hdev
->features
[p
][7]);
140 if (lmp_le_capable(hdev
))
141 seq_printf(f
, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev
->le_features
[0], hdev
->le_features
[1],
144 hdev
->le_features
[2], hdev
->le_features
[3],
145 hdev
->le_features
[4], hdev
->le_features
[5],
146 hdev
->le_features
[6], hdev
->le_features
[7]);
147 hci_dev_unlock(hdev
);
152 static int features_open(struct inode
*inode
, struct file
*file
)
154 return single_open(file
, features_show
, inode
->i_private
);
157 static const struct file_operations features_fops
= {
158 .open
= features_open
,
161 .release
= single_release
,
164 static int blacklist_show(struct seq_file
*f
, void *p
)
166 struct hci_dev
*hdev
= f
->private;
167 struct bdaddr_list
*b
;
170 list_for_each_entry(b
, &hdev
->blacklist
, list
)
171 seq_printf(f
, "%pMR (type %u)\n", &b
->bdaddr
, b
->bdaddr_type
);
172 hci_dev_unlock(hdev
);
177 static int blacklist_open(struct inode
*inode
, struct file
*file
)
179 return single_open(file
, blacklist_show
, inode
->i_private
);
182 static const struct file_operations blacklist_fops
= {
183 .open
= blacklist_open
,
186 .release
= single_release
,
189 static int uuids_show(struct seq_file
*f
, void *p
)
191 struct hci_dev
*hdev
= f
->private;
192 struct bt_uuid
*uuid
;
195 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
202 for (i
= 0; i
< 16; i
++)
203 val
[i
] = uuid
->uuid
[15 - i
];
205 seq_printf(f
, "%pUb\n", val
);
207 hci_dev_unlock(hdev
);
212 static int uuids_open(struct inode
*inode
, struct file
*file
)
214 return single_open(file
, uuids_show
, inode
->i_private
);
217 static const struct file_operations uuids_fops
= {
221 .release
= single_release
,
224 static int inquiry_cache_show(struct seq_file
*f
, void *p
)
226 struct hci_dev
*hdev
= f
->private;
227 struct discovery_state
*cache
= &hdev
->discovery
;
228 struct inquiry_entry
*e
;
232 list_for_each_entry(e
, &cache
->all
, all
) {
233 struct inquiry_data
*data
= &e
->data
;
234 seq_printf(f
, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
236 data
->pscan_rep_mode
, data
->pscan_period_mode
,
237 data
->pscan_mode
, data
->dev_class
[2],
238 data
->dev_class
[1], data
->dev_class
[0],
239 __le16_to_cpu(data
->clock_offset
),
240 data
->rssi
, data
->ssp_mode
, e
->timestamp
);
243 hci_dev_unlock(hdev
);
248 static int inquiry_cache_open(struct inode
*inode
, struct file
*file
)
250 return single_open(file
, inquiry_cache_show
, inode
->i_private
);
253 static const struct file_operations inquiry_cache_fops
= {
254 .open
= inquiry_cache_open
,
257 .release
= single_release
,
260 static int link_keys_show(struct seq_file
*f
, void *ptr
)
262 struct hci_dev
*hdev
= f
->private;
263 struct list_head
*p
, *n
;
266 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
267 struct link_key
*key
= list_entry(p
, struct link_key
, list
);
268 seq_printf(f
, "%pMR %u %*phN %u\n", &key
->bdaddr
, key
->type
,
269 HCI_LINK_KEY_SIZE
, key
->val
, key
->pin_len
);
271 hci_dev_unlock(hdev
);
276 static int link_keys_open(struct inode
*inode
, struct file
*file
)
278 return single_open(file
, link_keys_show
, inode
->i_private
);
281 static const struct file_operations link_keys_fops
= {
282 .open
= link_keys_open
,
285 .release
= single_release
,
288 static ssize_t
use_debug_keys_read(struct file
*file
, char __user
*user_buf
,
289 size_t count
, loff_t
*ppos
)
291 struct hci_dev
*hdev
= file
->private_data
;
294 buf
[0] = test_bit(HCI_DEBUG_KEYS
, &hdev
->dev_flags
) ? 'Y': 'N';
297 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
300 static const struct file_operations use_debug_keys_fops
= {
302 .read
= use_debug_keys_read
,
303 .llseek
= default_llseek
,
306 static int dev_class_show(struct seq_file
*f
, void *ptr
)
308 struct hci_dev
*hdev
= f
->private;
311 seq_printf(f
, "0x%.2x%.2x%.2x\n", hdev
->dev_class
[2],
312 hdev
->dev_class
[1], hdev
->dev_class
[0]);
313 hci_dev_unlock(hdev
);
318 static int dev_class_open(struct inode
*inode
, struct file
*file
)
320 return single_open(file
, dev_class_show
, inode
->i_private
);
323 static const struct file_operations dev_class_fops
= {
324 .open
= dev_class_open
,
327 .release
= single_release
,
330 static int voice_setting_get(void *data
, u64
*val
)
332 struct hci_dev
*hdev
= data
;
335 *val
= hdev
->voice_setting
;
336 hci_dev_unlock(hdev
);
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops
, voice_setting_get
,
342 NULL
, "0x%4.4llx\n");
344 static int auto_accept_delay_set(void *data
, u64 val
)
346 struct hci_dev
*hdev
= data
;
349 hdev
->auto_accept_delay
= val
;
350 hci_dev_unlock(hdev
);
355 static int auto_accept_delay_get(void *data
, u64
*val
)
357 struct hci_dev
*hdev
= data
;
360 *val
= hdev
->auto_accept_delay
;
361 hci_dev_unlock(hdev
);
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops
, auto_accept_delay_get
,
367 auto_accept_delay_set
, "%llu\n");
369 static int ssp_debug_mode_set(void *data
, u64 val
)
371 struct hci_dev
*hdev
= data
;
376 if (val
!= 0 && val
!= 1)
379 if (!test_bit(HCI_UP
, &hdev
->flags
))
384 skb
= __hci_cmd_sync(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
, sizeof(mode
),
385 &mode
, HCI_CMD_TIMEOUT
);
386 hci_req_unlock(hdev
);
391 err
= -bt_to_errno(skb
->data
[0]);
398 hdev
->ssp_debug_mode
= val
;
399 hci_dev_unlock(hdev
);
404 static int ssp_debug_mode_get(void *data
, u64
*val
)
406 struct hci_dev
*hdev
= data
;
409 *val
= hdev
->ssp_debug_mode
;
410 hci_dev_unlock(hdev
);
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops
, ssp_debug_mode_get
,
416 ssp_debug_mode_set
, "%llu\n");
418 static int idle_timeout_set(void *data
, u64 val
)
420 struct hci_dev
*hdev
= data
;
422 if (val
!= 0 && (val
< 500 || val
> 3600000))
426 hdev
->idle_timeout
= val
;
427 hci_dev_unlock(hdev
);
432 static int idle_timeout_get(void *data
, u64
*val
)
434 struct hci_dev
*hdev
= data
;
437 *val
= hdev
->idle_timeout
;
438 hci_dev_unlock(hdev
);
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops
, idle_timeout_get
,
444 idle_timeout_set
, "%llu\n");
446 static int sniff_min_interval_set(void *data
, u64 val
)
448 struct hci_dev
*hdev
= data
;
450 if (val
== 0 || val
% 2 || val
> hdev
->sniff_max_interval
)
454 hdev
->sniff_min_interval
= val
;
455 hci_dev_unlock(hdev
);
460 static int sniff_min_interval_get(void *data
, u64
*val
)
462 struct hci_dev
*hdev
= data
;
465 *val
= hdev
->sniff_min_interval
;
466 hci_dev_unlock(hdev
);
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops
, sniff_min_interval_get
,
472 sniff_min_interval_set
, "%llu\n");
474 static int sniff_max_interval_set(void *data
, u64 val
)
476 struct hci_dev
*hdev
= data
;
478 if (val
== 0 || val
% 2 || val
< hdev
->sniff_min_interval
)
482 hdev
->sniff_max_interval
= val
;
483 hci_dev_unlock(hdev
);
488 static int sniff_max_interval_get(void *data
, u64
*val
)
490 struct hci_dev
*hdev
= data
;
493 *val
= hdev
->sniff_max_interval
;
494 hci_dev_unlock(hdev
);
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops
, sniff_max_interval_get
,
500 sniff_max_interval_set
, "%llu\n");
502 static int static_address_show(struct seq_file
*f
, void *p
)
504 struct hci_dev
*hdev
= f
->private;
507 seq_printf(f
, "%pMR\n", &hdev
->static_addr
);
508 hci_dev_unlock(hdev
);
513 static int static_address_open(struct inode
*inode
, struct file
*file
)
515 return single_open(file
, static_address_show
, inode
->i_private
);
518 static const struct file_operations static_address_fops
= {
519 .open
= static_address_open
,
522 .release
= single_release
,
525 static int own_address_type_set(void *data
, u64 val
)
527 struct hci_dev
*hdev
= data
;
529 if (val
!= 0 && val
!= 1)
533 hdev
->own_addr_type
= val
;
534 hci_dev_unlock(hdev
);
539 static int own_address_type_get(void *data
, u64
*val
)
541 struct hci_dev
*hdev
= data
;
544 *val
= hdev
->own_addr_type
;
545 hci_dev_unlock(hdev
);
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops
, own_address_type_get
,
551 own_address_type_set
, "%llu\n");
553 static int long_term_keys_show(struct seq_file
*f
, void *ptr
)
555 struct hci_dev
*hdev
= f
->private;
556 struct list_head
*p
, *n
;
559 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
560 struct smp_ltk
*ltk
= list_entry(p
, struct smp_ltk
, list
);
561 seq_printf(f
, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 <k
->bdaddr
, ltk
->bdaddr_type
, ltk
->authenticated
,
563 ltk
->type
, ltk
->enc_size
, __le16_to_cpu(ltk
->ediv
),
564 8, ltk
->rand
, 16, ltk
->val
);
566 hci_dev_unlock(hdev
);
571 static int long_term_keys_open(struct inode
*inode
, struct file
*file
)
573 return single_open(file
, long_term_keys_show
, inode
->i_private
);
576 static const struct file_operations long_term_keys_fops
= {
577 .open
= long_term_keys_open
,
580 .release
= single_release
,
583 static int conn_min_interval_set(void *data
, u64 val
)
585 struct hci_dev
*hdev
= data
;
587 if (val
< 0x0006 || val
> 0x0c80 || val
> hdev
->le_conn_max_interval
)
591 hdev
->le_conn_min_interval
= val
;
592 hci_dev_unlock(hdev
);
597 static int conn_min_interval_get(void *data
, u64
*val
)
599 struct hci_dev
*hdev
= data
;
602 *val
= hdev
->le_conn_min_interval
;
603 hci_dev_unlock(hdev
);
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops
, conn_min_interval_get
,
609 conn_min_interval_set
, "%llu\n");
611 static int conn_max_interval_set(void *data
, u64 val
)
613 struct hci_dev
*hdev
= data
;
615 if (val
< 0x0006 || val
> 0x0c80 || val
< hdev
->le_conn_min_interval
)
619 hdev
->le_conn_max_interval
= val
;
620 hci_dev_unlock(hdev
);
625 static int conn_max_interval_get(void *data
, u64
*val
)
627 struct hci_dev
*hdev
= data
;
630 *val
= hdev
->le_conn_max_interval
;
631 hci_dev_unlock(hdev
);
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops
, conn_max_interval_get
,
637 conn_max_interval_set
, "%llu\n");
639 static ssize_t
lowpan_read(struct file
*file
, char __user
*user_buf
,
640 size_t count
, loff_t
*ppos
)
642 struct hci_dev
*hdev
= file
->private_data
;
645 buf
[0] = test_bit(HCI_6LOWPAN_ENABLED
, &hdev
->dev_flags
) ? 'Y' : 'N';
648 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
651 static ssize_t
lowpan_write(struct file
*fp
, const char __user
*user_buffer
,
652 size_t count
, loff_t
*position
)
654 struct hci_dev
*hdev
= fp
->private_data
;
657 size_t buf_size
= min(count
, (sizeof(buf
)-1));
659 if (copy_from_user(buf
, user_buffer
, buf_size
))
662 buf
[buf_size
] = '\0';
664 if (strtobool(buf
, &enable
) < 0)
667 if (enable
== test_bit(HCI_6LOWPAN_ENABLED
, &hdev
->dev_flags
))
670 change_bit(HCI_6LOWPAN_ENABLED
, &hdev
->dev_flags
);
675 static const struct file_operations lowpan_debugfs_fops
= {
678 .write
= lowpan_write
,
679 .llseek
= default_llseek
,
682 /* ---- HCI requests ---- */
684 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
686 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
688 if (hdev
->req_status
== HCI_REQ_PEND
) {
689 hdev
->req_result
= result
;
690 hdev
->req_status
= HCI_REQ_DONE
;
691 wake_up_interruptible(&hdev
->req_wait_q
);
695 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
697 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
699 if (hdev
->req_status
== HCI_REQ_PEND
) {
700 hdev
->req_result
= err
;
701 hdev
->req_status
= HCI_REQ_CANCELED
;
702 wake_up_interruptible(&hdev
->req_wait_q
);
706 static struct sk_buff
*hci_get_cmd_complete(struct hci_dev
*hdev
, u16 opcode
,
709 struct hci_ev_cmd_complete
*ev
;
710 struct hci_event_hdr
*hdr
;
715 skb
= hdev
->recv_evt
;
716 hdev
->recv_evt
= NULL
;
718 hci_dev_unlock(hdev
);
721 return ERR_PTR(-ENODATA
);
723 if (skb
->len
< sizeof(*hdr
)) {
724 BT_ERR("Too short HCI event");
728 hdr
= (void *) skb
->data
;
729 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
732 if (hdr
->evt
!= event
)
737 if (hdr
->evt
!= HCI_EV_CMD_COMPLETE
) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr
->evt
);
742 if (skb
->len
< sizeof(*ev
)) {
743 BT_ERR("Too short cmd_complete event");
747 ev
= (void *) skb
->data
;
748 skb_pull(skb
, sizeof(*ev
));
750 if (opcode
== __le16_to_cpu(ev
->opcode
))
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode
,
754 __le16_to_cpu(ev
->opcode
));
758 return ERR_PTR(-ENODATA
);
761 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
762 const void *param
, u8 event
, u32 timeout
)
764 DECLARE_WAITQUEUE(wait
, current
);
765 struct hci_request req
;
768 BT_DBG("%s", hdev
->name
);
770 hci_req_init(&req
, hdev
);
772 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
774 hdev
->req_status
= HCI_REQ_PEND
;
776 err
= hci_req_run(&req
, hci_req_sync_complete
);
780 add_wait_queue(&hdev
->req_wait_q
, &wait
);
781 set_current_state(TASK_INTERRUPTIBLE
);
783 schedule_timeout(timeout
);
785 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
787 if (signal_pending(current
))
788 return ERR_PTR(-EINTR
);
790 switch (hdev
->req_status
) {
792 err
= -bt_to_errno(hdev
->req_result
);
795 case HCI_REQ_CANCELED
:
796 err
= -hdev
->req_result
;
804 hdev
->req_status
= hdev
->req_result
= 0;
806 BT_DBG("%s end: err %d", hdev
->name
, err
);
811 return hci_get_cmd_complete(hdev
, opcode
, event
);
813 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
815 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
816 const void *param
, u32 timeout
)
818 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
820 EXPORT_SYMBOL(__hci_cmd_sync
);
822 /* Execute request and wait for completion. */
823 static int __hci_req_sync(struct hci_dev
*hdev
,
824 void (*func
)(struct hci_request
*req
,
826 unsigned long opt
, __u32 timeout
)
828 struct hci_request req
;
829 DECLARE_WAITQUEUE(wait
, current
);
832 BT_DBG("%s start", hdev
->name
);
834 hci_req_init(&req
, hdev
);
836 hdev
->req_status
= HCI_REQ_PEND
;
840 err
= hci_req_run(&req
, hci_req_sync_complete
);
842 hdev
->req_status
= 0;
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
855 add_wait_queue(&hdev
->req_wait_q
, &wait
);
856 set_current_state(TASK_INTERRUPTIBLE
);
858 schedule_timeout(timeout
);
860 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
862 if (signal_pending(current
))
865 switch (hdev
->req_status
) {
867 err
= -bt_to_errno(hdev
->req_result
);
870 case HCI_REQ_CANCELED
:
871 err
= -hdev
->req_result
;
879 hdev
->req_status
= hdev
->req_result
= 0;
881 BT_DBG("%s end: err %d", hdev
->name
, err
);
886 static int hci_req_sync(struct hci_dev
*hdev
,
887 void (*req
)(struct hci_request
*req
,
889 unsigned long opt
, __u32 timeout
)
893 if (!test_bit(HCI_UP
, &hdev
->flags
))
896 /* Serialize all requests */
898 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
899 hci_req_unlock(hdev
);
904 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
906 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
909 set_bit(HCI_RESET
, &req
->hdev
->flags
);
910 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
913 static void bredr_init(struct hci_request
*req
)
915 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
917 /* Read Local Supported Features */
918 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
920 /* Read Local Version */
921 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
923 /* Read BD Address */
924 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
927 static void amp_init(struct hci_request
*req
)
929 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
931 /* Read Local Version */
932 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
934 /* Read Local Supported Commands */
935 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
937 /* Read Local Supported Features */
938 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
940 /* Read Local AMP Info */
941 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
943 /* Read Data Blk size */
944 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
946 /* Read Flow Control Mode */
947 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
949 /* Read Location Data */
950 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
953 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
955 struct hci_dev
*hdev
= req
->hdev
;
957 BT_DBG("%s %ld", hdev
->name
, opt
);
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
961 hci_reset_req(req
, 0);
963 switch (hdev
->dev_type
) {
973 BT_ERR("Unknown device type %d", hdev
->dev_type
);
978 static void bredr_setup(struct hci_request
*req
)
980 struct hci_dev
*hdev
= req
->hdev
;
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
986 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
988 /* Read Class of Device */
989 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
991 /* Read Local Name */
992 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
994 /* Read Voice Setting */
995 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
997 /* Read Number of Supported IAC */
998 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
1000 /* Read Current IAC LAP */
1001 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
1003 /* Clear Event Filters */
1004 flt_type
= HCI_FLT_CLEAR_ALL
;
1005 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
1007 /* Connection accept timeout ~20 secs */
1008 param
= __constant_cpu_to_le16(0x7d00);
1009 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
1011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1014 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
) {
1015 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
1016 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
1020 static void le_setup(struct hci_request
*req
)
1022 struct hci_dev
*hdev
= req
->hdev
;
1024 /* Read LE Buffer Size */
1025 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
1027 /* Read LE Local Supported Features */
1028 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
1030 /* Read LE Advertising Channel TX Power */
1031 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
1033 /* Read LE White List Size */
1034 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
1036 /* Read LE Supported States */
1037 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev
))
1041 set_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
1044 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
1046 if (lmp_ext_inq_capable(hdev
))
1049 if (lmp_inq_rssi_capable(hdev
))
1052 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
1053 hdev
->lmp_subver
== 0x0757)
1056 if (hdev
->manufacturer
== 15) {
1057 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
1059 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
1061 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
1065 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
1066 hdev
->lmp_subver
== 0x1805)
1072 static void hci_setup_inquiry_mode(struct hci_request
*req
)
1076 mode
= hci_get_inquiry_mode(req
->hdev
);
1078 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
1081 static void hci_setup_event_mask(struct hci_request
*req
)
1083 struct hci_dev
*hdev
= req
->hdev
;
1085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1089 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1094 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1097 if (lmp_bredr_capable(hdev
)) {
1098 events
[4] |= 0x01; /* Flow Specification Complete */
1099 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events
[5] |= 0x08; /* Synchronous Connection Complete */
1102 events
[5] |= 0x10; /* Synchronous Connection Changed */
1104 /* Use a different default for LE-only devices */
1105 memset(events
, 0, sizeof(events
));
1106 events
[0] |= 0x10; /* Disconnection Complete */
1107 events
[0] |= 0x80; /* Encryption Change */
1108 events
[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events
[1] |= 0x20; /* Command Complete */
1110 events
[1] |= 0x40; /* Command Status */
1111 events
[1] |= 0x80; /* Hardware Error */
1112 events
[2] |= 0x04; /* Number of Completed Packets */
1113 events
[3] |= 0x02; /* Data Buffer Overflow */
1114 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1117 if (lmp_inq_rssi_capable(hdev
))
1118 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1120 if (lmp_sniffsubr_capable(hdev
))
1121 events
[5] |= 0x20; /* Sniff Subrating */
1123 if (lmp_pause_enc_capable(hdev
))
1124 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1126 if (lmp_ext_inq_capable(hdev
))
1127 events
[5] |= 0x40; /* Extended Inquiry Result */
1129 if (lmp_no_flush_capable(hdev
))
1130 events
[7] |= 0x01; /* Enhanced Flush Complete */
1132 if (lmp_lsto_capable(hdev
))
1133 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
1135 if (lmp_ssp_capable(hdev
)) {
1136 events
[6] |= 0x01; /* IO Capability Request */
1137 events
[6] |= 0x02; /* IO Capability Response */
1138 events
[6] |= 0x04; /* User Confirmation Request */
1139 events
[6] |= 0x08; /* User Passkey Request */
1140 events
[6] |= 0x10; /* Remote OOB Data Request */
1141 events
[6] |= 0x20; /* Simple Pairing Complete */
1142 events
[7] |= 0x04; /* User Passkey Notification */
1143 events
[7] |= 0x08; /* Keypress Notification */
1144 events
[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1149 if (lmp_le_capable(hdev
))
1150 events
[7] |= 0x20; /* LE Meta-Event */
1152 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
1154 if (lmp_le_capable(hdev
)) {
1155 memset(events
, 0, sizeof(events
));
1157 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
,
1158 sizeof(events
), events
);
1162 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
1164 struct hci_dev
*hdev
= req
->hdev
;
1166 if (lmp_bredr_capable(hdev
))
1169 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
1171 if (lmp_le_capable(hdev
))
1174 hci_setup_event_mask(req
);
1176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1179 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
1180 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
1182 if (lmp_ssp_capable(hdev
)) {
1183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1189 hdev
->max_page
= 0x01;
1191 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1193 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
1194 sizeof(mode
), &mode
);
1196 struct hci_cp_write_eir cp
;
1198 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1199 memset(&cp
, 0, sizeof(cp
));
1201 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1205 if (lmp_inq_rssi_capable(hdev
))
1206 hci_setup_inquiry_mode(req
);
1208 if (lmp_inq_tx_pwr_capable(hdev
))
1209 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
1211 if (lmp_ext_feat_capable(hdev
)) {
1212 struct hci_cp_read_local_ext_features cp
;
1215 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1219 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
1221 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
1226 static void hci_setup_link_policy(struct hci_request
*req
)
1228 struct hci_dev
*hdev
= req
->hdev
;
1229 struct hci_cp_write_def_link_policy cp
;
1230 u16 link_policy
= 0;
1232 if (lmp_rswitch_capable(hdev
))
1233 link_policy
|= HCI_LP_RSWITCH
;
1234 if (lmp_hold_capable(hdev
))
1235 link_policy
|= HCI_LP_HOLD
;
1236 if (lmp_sniff_capable(hdev
))
1237 link_policy
|= HCI_LP_SNIFF
;
1238 if (lmp_park_capable(hdev
))
1239 link_policy
|= HCI_LP_PARK
;
1241 cp
.policy
= cpu_to_le16(link_policy
);
1242 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
1245 static void hci_set_le_support(struct hci_request
*req
)
1247 struct hci_dev
*hdev
= req
->hdev
;
1248 struct hci_cp_write_le_host_supported cp
;
1250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev
))
1254 memset(&cp
, 0, sizeof(cp
));
1256 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1258 cp
.simul
= lmp_le_br_capable(hdev
);
1261 if (cp
.le
!= lmp_host_le_capable(hdev
))
1262 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
1266 static void hci_set_event_mask_page_2(struct hci_request
*req
)
1268 struct hci_dev
*hdev
= req
->hdev
;
1269 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1274 if (lmp_csb_master_capable(hdev
)) {
1275 events
[1] |= 0x40; /* Triggered Clock Capture */
1276 events
[1] |= 0x80; /* Synchronization Train Complete */
1277 events
[2] |= 0x10; /* Slave Page Response Timeout */
1278 events
[2] |= 0x20; /* CSB Channel Map Change */
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1284 if (lmp_csb_slave_capable(hdev
)) {
1285 events
[2] |= 0x01; /* Synchronization Train Received */
1286 events
[2] |= 0x02; /* CSB Receive */
1287 events
[2] |= 0x04; /* CSB Timeout */
1288 events
[2] |= 0x08; /* Truncated Page Complete */
1291 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
1294 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
1296 struct hci_dev
*hdev
= req
->hdev
;
1299 /* Some Broadcom based Bluetooth controllers do not support the
1300 * Delete Stored Link Key command. They are clearly indicating its
1301 * absence in the bit mask of supported commands.
1303 * Check the supported commands and only if the the command is marked
1304 * as supported send it. If not supported assume that the controller
1305 * does not have actual support for stored link keys which makes this
1306 * command redundant anyway.
1308 * Some controllers indicate that they support handling deleting
1309 * stored link keys, but they don't. The quirk lets a driver
1310 * just disable this command.
1312 if (hdev
->commands
[6] & 0x80 &&
1313 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
1314 struct hci_cp_delete_stored_link_key cp
;
1316 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
1317 cp
.delete_all
= 0x01;
1318 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
1322 if (hdev
->commands
[5] & 0x10)
1323 hci_setup_link_policy(req
);
1325 if (lmp_le_capable(hdev
)) {
1326 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1327 /* If the controller has a public BD_ADDR, then
1328 * by default use that one. If this is a LE only
1329 * controller without a public address, default
1330 * to the random address.
1332 if (bacmp(&hdev
->bdaddr
, BDADDR_ANY
))
1333 hdev
->own_addr_type
= ADDR_LE_DEV_PUBLIC
;
1335 hdev
->own_addr_type
= ADDR_LE_DEV_RANDOM
;
1338 hci_set_le_support(req
);
1341 /* Read features beyond page 1 if available */
1342 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
1343 struct hci_cp_read_local_ext_features cp
;
1346 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1351 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
1353 struct hci_dev
*hdev
= req
->hdev
;
1355 /* Set event mask page 2 if the HCI command for it is supported */
1356 if (hdev
->commands
[22] & 0x04)
1357 hci_set_event_mask_page_2(req
);
1359 /* Check for Synchronization Train support */
1360 if (lmp_sync_train_capable(hdev
))
1361 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
1364 static int __hci_init(struct hci_dev
*hdev
)
1368 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
1372 /* The Device Under Test (DUT) mode is special and available for
1373 * all controller types. So just create it early on.
1375 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1376 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
1380 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1381 * BR/EDR/LE type controllers. AMP controllers only need the
1384 if (hdev
->dev_type
!= HCI_BREDR
)
1387 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
1391 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
1395 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
1399 /* Only create debugfs entries during the initial setup
1400 * phase and not every time the controller gets powered on.
1402 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1405 debugfs_create_file("features", 0444, hdev
->debugfs
, hdev
,
1407 debugfs_create_u16("manufacturer", 0444, hdev
->debugfs
,
1408 &hdev
->manufacturer
);
1409 debugfs_create_u8("hci_version", 0444, hdev
->debugfs
, &hdev
->hci_ver
);
1410 debugfs_create_u16("hci_revision", 0444, hdev
->debugfs
, &hdev
->hci_rev
);
1411 debugfs_create_file("blacklist", 0444, hdev
->debugfs
, hdev
,
1413 debugfs_create_file("uuids", 0444, hdev
->debugfs
, hdev
, &uuids_fops
);
1415 if (lmp_bredr_capable(hdev
)) {
1416 debugfs_create_file("inquiry_cache", 0444, hdev
->debugfs
,
1417 hdev
, &inquiry_cache_fops
);
1418 debugfs_create_file("link_keys", 0400, hdev
->debugfs
,
1419 hdev
, &link_keys_fops
);
1420 debugfs_create_file("use_debug_keys", 0444, hdev
->debugfs
,
1421 hdev
, &use_debug_keys_fops
);
1422 debugfs_create_file("dev_class", 0444, hdev
->debugfs
,
1423 hdev
, &dev_class_fops
);
1424 debugfs_create_file("voice_setting", 0444, hdev
->debugfs
,
1425 hdev
, &voice_setting_fops
);
1428 if (lmp_ssp_capable(hdev
)) {
1429 debugfs_create_file("auto_accept_delay", 0644, hdev
->debugfs
,
1430 hdev
, &auto_accept_delay_fops
);
1431 debugfs_create_file("ssp_debug_mode", 0644, hdev
->debugfs
,
1432 hdev
, &ssp_debug_mode_fops
);
1435 if (lmp_sniff_capable(hdev
)) {
1436 debugfs_create_file("idle_timeout", 0644, hdev
->debugfs
,
1437 hdev
, &idle_timeout_fops
);
1438 debugfs_create_file("sniff_min_interval", 0644, hdev
->debugfs
,
1439 hdev
, &sniff_min_interval_fops
);
1440 debugfs_create_file("sniff_max_interval", 0644, hdev
->debugfs
,
1441 hdev
, &sniff_max_interval_fops
);
1444 if (lmp_le_capable(hdev
)) {
1445 debugfs_create_u8("white_list_size", 0444, hdev
->debugfs
,
1446 &hdev
->le_white_list_size
);
1447 debugfs_create_file("static_address", 0444, hdev
->debugfs
,
1448 hdev
, &static_address_fops
);
1449 debugfs_create_file("own_address_type", 0644, hdev
->debugfs
,
1450 hdev
, &own_address_type_fops
);
1451 debugfs_create_file("long_term_keys", 0400, hdev
->debugfs
,
1452 hdev
, &long_term_keys_fops
);
1453 debugfs_create_file("conn_min_interval", 0644, hdev
->debugfs
,
1454 hdev
, &conn_min_interval_fops
);
1455 debugfs_create_file("conn_max_interval", 0644, hdev
->debugfs
,
1456 hdev
, &conn_max_interval_fops
);
1457 debugfs_create_file("6lowpan", 0644, hdev
->debugfs
, hdev
,
1458 &lowpan_debugfs_fops
);
1464 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
1468 BT_DBG("%s %x", req
->hdev
->name
, scan
);
1470 /* Inquiry and Page scans */
1471 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1474 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
1478 BT_DBG("%s %x", req
->hdev
->name
, auth
);
1480 /* Authentication */
1481 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
1484 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
1488 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
1491 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
1494 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
1496 __le16 policy
= cpu_to_le16(opt
);
1498 BT_DBG("%s %x", req
->hdev
->name
, policy
);
1500 /* Default link policy */
1501 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1504 /* Get HCI device by index.
1505 * Device is held on return. */
1506 struct hci_dev
*hci_dev_get(int index
)
1508 struct hci_dev
*hdev
= NULL
, *d
;
1510 BT_DBG("%d", index
);
1515 read_lock(&hci_dev_list_lock
);
1516 list_for_each_entry(d
, &hci_dev_list
, list
) {
1517 if (d
->id
== index
) {
1518 hdev
= hci_dev_hold(d
);
1522 read_unlock(&hci_dev_list_lock
);
1526 /* ---- Inquiry support ---- */
1528 bool hci_discovery_active(struct hci_dev
*hdev
)
1530 struct discovery_state
*discov
= &hdev
->discovery
;
1532 switch (discov
->state
) {
1533 case DISCOVERY_FINDING
:
1534 case DISCOVERY_RESOLVING
:
1542 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1544 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1546 if (hdev
->discovery
.state
== state
)
1550 case DISCOVERY_STOPPED
:
1551 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
1552 mgmt_discovering(hdev
, 0);
1554 case DISCOVERY_STARTING
:
1556 case DISCOVERY_FINDING
:
1557 mgmt_discovering(hdev
, 1);
1559 case DISCOVERY_RESOLVING
:
1561 case DISCOVERY_STOPPING
:
1565 hdev
->discovery
.state
= state
;
1568 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1570 struct discovery_state
*cache
= &hdev
->discovery
;
1571 struct inquiry_entry
*p
, *n
;
1573 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1578 INIT_LIST_HEAD(&cache
->unknown
);
1579 INIT_LIST_HEAD(&cache
->resolve
);
1582 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1585 struct discovery_state
*cache
= &hdev
->discovery
;
1586 struct inquiry_entry
*e
;
1588 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1590 list_for_each_entry(e
, &cache
->all
, all
) {
1591 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1598 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1601 struct discovery_state
*cache
= &hdev
->discovery
;
1602 struct inquiry_entry
*e
;
1604 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1606 list_for_each_entry(e
, &cache
->unknown
, list
) {
1607 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1614 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1618 struct discovery_state
*cache
= &hdev
->discovery
;
1619 struct inquiry_entry
*e
;
1621 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1623 list_for_each_entry(e
, &cache
->resolve
, list
) {
1624 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1626 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1633 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1634 struct inquiry_entry
*ie
)
1636 struct discovery_state
*cache
= &hdev
->discovery
;
1637 struct list_head
*pos
= &cache
->resolve
;
1638 struct inquiry_entry
*p
;
1640 list_del(&ie
->list
);
1642 list_for_each_entry(p
, &cache
->resolve
, list
) {
1643 if (p
->name_state
!= NAME_PENDING
&&
1644 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1649 list_add(&ie
->list
, pos
);
1652 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1653 bool name_known
, bool *ssp
)
1655 struct discovery_state
*cache
= &hdev
->discovery
;
1656 struct inquiry_entry
*ie
;
1658 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1660 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
1663 *ssp
= data
->ssp_mode
;
1665 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1667 if (ie
->data
.ssp_mode
&& ssp
)
1670 if (ie
->name_state
== NAME_NEEDED
&&
1671 data
->rssi
!= ie
->data
.rssi
) {
1672 ie
->data
.rssi
= data
->rssi
;
1673 hci_inquiry_cache_update_resolve(hdev
, ie
);
1679 /* Entry not in the cache. Add new one. */
1680 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
1684 list_add(&ie
->all
, &cache
->all
);
1687 ie
->name_state
= NAME_KNOWN
;
1689 ie
->name_state
= NAME_NOT_KNOWN
;
1690 list_add(&ie
->list
, &cache
->unknown
);
1694 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1695 ie
->name_state
!= NAME_PENDING
) {
1696 ie
->name_state
= NAME_KNOWN
;
1697 list_del(&ie
->list
);
1700 memcpy(&ie
->data
, data
, sizeof(*data
));
1701 ie
->timestamp
= jiffies
;
1702 cache
->timestamp
= jiffies
;
1704 if (ie
->name_state
== NAME_NOT_KNOWN
)
1710 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1712 struct discovery_state
*cache
= &hdev
->discovery
;
1713 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1714 struct inquiry_entry
*e
;
1717 list_for_each_entry(e
, &cache
->all
, all
) {
1718 struct inquiry_data
*data
= &e
->data
;
1723 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1724 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1725 info
->pscan_period_mode
= data
->pscan_period_mode
;
1726 info
->pscan_mode
= data
->pscan_mode
;
1727 memcpy(info
->dev_class
, data
->dev_class
, 3);
1728 info
->clock_offset
= data
->clock_offset
;
1734 BT_DBG("cache %p, copied %d", cache
, copied
);
1738 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1740 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1741 struct hci_dev
*hdev
= req
->hdev
;
1742 struct hci_cp_inquiry cp
;
1744 BT_DBG("%s", hdev
->name
);
1746 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1750 memcpy(&cp
.lap
, &ir
->lap
, 3);
1751 cp
.length
= ir
->length
;
1752 cp
.num_rsp
= ir
->num_rsp
;
1753 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1756 static int wait_inquiry(void *word
)
1759 return signal_pending(current
);
1762 int hci_inquiry(void __user
*arg
)
1764 __u8 __user
*ptr
= arg
;
1765 struct hci_inquiry_req ir
;
1766 struct hci_dev
*hdev
;
1767 int err
= 0, do_inquiry
= 0, max_rsp
;
1771 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
1774 hdev
= hci_dev_get(ir
.dev_id
);
1778 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
1783 if (hdev
->dev_type
!= HCI_BREDR
) {
1788 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1794 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
1795 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
1796 hci_inquiry_cache_flush(hdev
);
1799 hci_dev_unlock(hdev
);
1801 timeo
= ir
.length
* msecs_to_jiffies(2000);
1804 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
1809 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1810 * cleared). If it is interrupted by a signal, return -EINTR.
1812 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
, wait_inquiry
,
1813 TASK_INTERRUPTIBLE
))
1817 /* for unlimited number of responses we will use buffer with
1820 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
1822 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1823 * copy it to the user space.
1825 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
1832 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
1833 hci_dev_unlock(hdev
);
1835 BT_DBG("num_rsp %d", ir
.num_rsp
);
1837 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
1839 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
1852 static int hci_dev_do_open(struct hci_dev
*hdev
)
1856 BT_DBG("%s %p", hdev
->name
, hdev
);
1860 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
1865 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1866 /* Check for rfkill but allow the HCI setup stage to
1867 * proceed (which in itself doesn't cause any RF activity).
1869 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
)) {
1874 /* Check for valid public address or a configured static
1875 * random adddress, but let the HCI setup proceed to
1876 * be able to determine if there is a public address
1879 * This check is only valid for BR/EDR controllers
1880 * since AMP controllers do not have an address.
1882 if (hdev
->dev_type
== HCI_BREDR
&&
1883 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
1884 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
1885 ret
= -EADDRNOTAVAIL
;
1890 if (test_bit(HCI_UP
, &hdev
->flags
)) {
1895 if (hdev
->open(hdev
)) {
1900 atomic_set(&hdev
->cmd_cnt
, 1);
1901 set_bit(HCI_INIT
, &hdev
->flags
);
1903 if (hdev
->setup
&& test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1904 ret
= hdev
->setup(hdev
);
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
1908 set_bit(HCI_RAW
, &hdev
->flags
);
1910 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
1911 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
1912 ret
= __hci_init(hdev
);
1915 clear_bit(HCI_INIT
, &hdev
->flags
);
1919 set_bit(HCI_UP
, &hdev
->flags
);
1920 hci_notify(hdev
, HCI_DEV_UP
);
1921 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
1922 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
1923 hdev
->dev_type
== HCI_BREDR
) {
1925 mgmt_powered(hdev
, 1);
1926 hci_dev_unlock(hdev
);
1929 /* Init failed, cleanup */
1930 flush_work(&hdev
->tx_work
);
1931 flush_work(&hdev
->cmd_work
);
1932 flush_work(&hdev
->rx_work
);
1934 skb_queue_purge(&hdev
->cmd_q
);
1935 skb_queue_purge(&hdev
->rx_q
);
1940 if (hdev
->sent_cmd
) {
1941 kfree_skb(hdev
->sent_cmd
);
1942 hdev
->sent_cmd
= NULL
;
1950 hci_req_unlock(hdev
);
1954 /* ---- HCI ioctl helpers ---- */
1956 int hci_dev_open(__u16 dev
)
1958 struct hci_dev
*hdev
;
1961 hdev
= hci_dev_get(dev
);
1965 /* We need to ensure that no other power on/off work is pending
1966 * before proceeding to call hci_dev_do_open. This is
1967 * particularly important if the setup procedure has not yet
1970 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
1971 cancel_delayed_work(&hdev
->power_off
);
1973 /* After this call it is guaranteed that the setup procedure
1974 * has finished. This means that error conditions like RFKILL
1975 * or no valid public or static random address apply.
1977 flush_workqueue(hdev
->req_workqueue
);
1979 err
= hci_dev_do_open(hdev
);
1986 static int hci_dev_do_close(struct hci_dev
*hdev
)
1988 BT_DBG("%s %p", hdev
->name
, hdev
);
1990 cancel_delayed_work(&hdev
->power_off
);
1992 hci_req_cancel(hdev
, ENODEV
);
1995 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
1996 del_timer_sync(&hdev
->cmd_timer
);
1997 hci_req_unlock(hdev
);
2001 /* Flush RX and TX works */
2002 flush_work(&hdev
->tx_work
);
2003 flush_work(&hdev
->rx_work
);
2005 if (hdev
->discov_timeout
> 0) {
2006 cancel_delayed_work(&hdev
->discov_off
);
2007 hdev
->discov_timeout
= 0;
2008 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
2009 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
2012 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
2013 cancel_delayed_work(&hdev
->service_cache
);
2015 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
2018 hci_inquiry_cache_flush(hdev
);
2019 hci_conn_hash_flush(hdev
);
2020 hci_dev_unlock(hdev
);
2022 hci_notify(hdev
, HCI_DEV_DOWN
);
2028 skb_queue_purge(&hdev
->cmd_q
);
2029 atomic_set(&hdev
->cmd_cnt
, 1);
2030 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
2031 !test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
2032 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
2033 set_bit(HCI_INIT
, &hdev
->flags
);
2034 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
2035 clear_bit(HCI_INIT
, &hdev
->flags
);
2038 /* flush cmd work */
2039 flush_work(&hdev
->cmd_work
);
2042 skb_queue_purge(&hdev
->rx_q
);
2043 skb_queue_purge(&hdev
->cmd_q
);
2044 skb_queue_purge(&hdev
->raw_q
);
2046 /* Drop last sent command */
2047 if (hdev
->sent_cmd
) {
2048 del_timer_sync(&hdev
->cmd_timer
);
2049 kfree_skb(hdev
->sent_cmd
);
2050 hdev
->sent_cmd
= NULL
;
2053 kfree_skb(hdev
->recv_evt
);
2054 hdev
->recv_evt
= NULL
;
2056 /* After this point our queues are empty
2057 * and no tasks are scheduled. */
2062 hdev
->dev_flags
&= ~HCI_PERSISTENT_MASK
;
2064 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
2065 if (hdev
->dev_type
== HCI_BREDR
) {
2067 mgmt_powered(hdev
, 0);
2068 hci_dev_unlock(hdev
);
2072 /* Controller radio is available but is currently powered down */
2073 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
2075 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
2076 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
2078 hci_req_unlock(hdev
);
2084 int hci_dev_close(__u16 dev
)
2086 struct hci_dev
*hdev
;
2089 hdev
= hci_dev_get(dev
);
2093 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2098 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2099 cancel_delayed_work(&hdev
->power_off
);
2101 err
= hci_dev_do_close(hdev
);
2108 int hci_dev_reset(__u16 dev
)
2110 struct hci_dev
*hdev
;
2113 hdev
= hci_dev_get(dev
);
2119 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2124 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2130 skb_queue_purge(&hdev
->rx_q
);
2131 skb_queue_purge(&hdev
->cmd_q
);
2134 hci_inquiry_cache_flush(hdev
);
2135 hci_conn_hash_flush(hdev
);
2136 hci_dev_unlock(hdev
);
2141 atomic_set(&hdev
->cmd_cnt
, 1);
2142 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
2144 if (!test_bit(HCI_RAW
, &hdev
->flags
))
2145 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
2148 hci_req_unlock(hdev
);
2153 int hci_dev_reset_stat(__u16 dev
)
2155 struct hci_dev
*hdev
;
2158 hdev
= hci_dev_get(dev
);
2162 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2167 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
2174 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
2176 struct hci_dev
*hdev
;
2177 struct hci_dev_req dr
;
2180 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
2183 hdev
= hci_dev_get(dr
.dev_id
);
2187 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2192 if (hdev
->dev_type
!= HCI_BREDR
) {
2197 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
2204 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2209 if (!lmp_encrypt_capable(hdev
)) {
2214 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
2215 /* Auth must be enabled first */
2216 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2222 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
2227 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
2232 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
2236 case HCISETLINKMODE
:
2237 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
2238 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
2242 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
2246 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2247 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2251 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2252 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2265 int hci_get_dev_list(void __user
*arg
)
2267 struct hci_dev
*hdev
;
2268 struct hci_dev_list_req
*dl
;
2269 struct hci_dev_req
*dr
;
2270 int n
= 0, size
, err
;
2273 if (get_user(dev_num
, (__u16 __user
*) arg
))
2276 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
2279 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
2281 dl
= kzalloc(size
, GFP_KERNEL
);
2287 read_lock(&hci_dev_list_lock
);
2288 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2289 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2290 cancel_delayed_work(&hdev
->power_off
);
2292 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2293 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
2295 (dr
+ n
)->dev_id
= hdev
->id
;
2296 (dr
+ n
)->dev_opt
= hdev
->flags
;
2301 read_unlock(&hci_dev_list_lock
);
2304 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2306 err
= copy_to_user(arg
, dl
, size
);
2309 return err
? -EFAULT
: 0;
2312 int hci_get_dev_info(void __user
*arg
)
2314 struct hci_dev
*hdev
;
2315 struct hci_dev_info di
;
2318 if (copy_from_user(&di
, arg
, sizeof(di
)))
2321 hdev
= hci_dev_get(di
.dev_id
);
2325 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2326 cancel_delayed_work_sync(&hdev
->power_off
);
2328 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2329 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
2331 strcpy(di
.name
, hdev
->name
);
2332 di
.bdaddr
= hdev
->bdaddr
;
2333 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2334 di
.flags
= hdev
->flags
;
2335 di
.pkt_type
= hdev
->pkt_type
;
2336 if (lmp_bredr_capable(hdev
)) {
2337 di
.acl_mtu
= hdev
->acl_mtu
;
2338 di
.acl_pkts
= hdev
->acl_pkts
;
2339 di
.sco_mtu
= hdev
->sco_mtu
;
2340 di
.sco_pkts
= hdev
->sco_pkts
;
2342 di
.acl_mtu
= hdev
->le_mtu
;
2343 di
.acl_pkts
= hdev
->le_pkts
;
2347 di
.link_policy
= hdev
->link_policy
;
2348 di
.link_mode
= hdev
->link_mode
;
2350 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2351 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2353 if (copy_to_user(arg
, &di
, sizeof(di
)))
2361 /* ---- Interface to HCI drivers ---- */
2363 static int hci_rfkill_set_block(void *data
, bool blocked
)
2365 struct hci_dev
*hdev
= data
;
2367 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2369 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
2373 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2374 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
2375 hci_dev_do_close(hdev
);
2377 clear_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2383 static const struct rfkill_ops hci_rfkill_ops
= {
2384 .set_block
= hci_rfkill_set_block
,
2387 static void hci_power_on(struct work_struct
*work
)
2389 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2392 BT_DBG("%s", hdev
->name
);
2394 err
= hci_dev_do_open(hdev
);
2396 mgmt_set_powered_failed(hdev
, err
);
2400 /* During the HCI setup phase, a few error conditions are
2401 * ignored and they need to be checked now. If they are still
2402 * valid, it is important to turn the device back off.
2404 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
) ||
2405 (hdev
->dev_type
== HCI_BREDR
&&
2406 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2407 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2408 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2409 hci_dev_do_close(hdev
);
2410 } else if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
2411 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2412 HCI_AUTO_OFF_TIMEOUT
);
2415 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
2416 mgmt_index_added(hdev
);
2419 static void hci_power_off(struct work_struct
*work
)
2421 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2424 BT_DBG("%s", hdev
->name
);
2426 hci_dev_do_close(hdev
);
2429 static void hci_discov_off(struct work_struct
*work
)
2431 struct hci_dev
*hdev
;
2433 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
2435 BT_DBG("%s", hdev
->name
);
2437 mgmt_discoverable_timeout(hdev
);
2440 int hci_uuids_clear(struct hci_dev
*hdev
)
2442 struct bt_uuid
*uuid
, *tmp
;
2444 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2445 list_del(&uuid
->list
);
2452 int hci_link_keys_clear(struct hci_dev
*hdev
)
2454 struct list_head
*p
, *n
;
2456 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
2457 struct link_key
*key
;
2459 key
= list_entry(p
, struct link_key
, list
);
2468 int hci_smp_ltks_clear(struct hci_dev
*hdev
)
2470 struct smp_ltk
*k
, *tmp
;
2472 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
2480 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2484 list_for_each_entry(k
, &hdev
->link_keys
, list
)
2485 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
2491 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2492 u8 key_type
, u8 old_key_type
)
2495 if (key_type
< 0x03)
2498 /* Debug keys are insecure so don't store them persistently */
2499 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2502 /* Changed combination key and there's no previous one */
2503 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2506 /* Security mode 3 case */
2510 /* Neither local nor remote side had no-bonding as requirement */
2511 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2514 /* Local side had dedicated bonding as requirement */
2515 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2518 /* Remote side had dedicated bonding as requirement */
2519 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2522 /* If none of the above criteria match, then don't store the key
2527 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8])
2531 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
2532 if (k
->ediv
!= ediv
||
2533 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
2542 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2547 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
2548 if (addr_type
== k
->bdaddr_type
&&
2549 bacmp(bdaddr
, &k
->bdaddr
) == 0)
2555 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
2556 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
2558 struct link_key
*key
, *old_key
;
2562 old_key
= hci_find_link_key(hdev
, bdaddr
);
2564 old_key_type
= old_key
->type
;
2567 old_key_type
= conn
? conn
->key_type
: 0xff;
2568 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
2571 list_add(&key
->list
, &hdev
->link_keys
);
2574 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2576 /* Some buggy controller combinations generate a changed
2577 * combination key for legacy pairing even when there's no
2579 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2580 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2581 type
= HCI_LK_COMBINATION
;
2583 conn
->key_type
= type
;
2586 bacpy(&key
->bdaddr
, bdaddr
);
2587 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2588 key
->pin_len
= pin_len
;
2590 if (type
== HCI_LK_CHANGED_COMBINATION
)
2591 key
->type
= old_key_type
;
2598 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
2600 mgmt_new_link_key(hdev
, key
, persistent
);
2603 conn
->flush_key
= !persistent
;
2608 int hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
, u8 type
,
2609 int new_key
, u8 authenticated
, u8 tk
[16], u8 enc_size
, __le16
2612 struct smp_ltk
*key
, *old_key
;
2614 if (!(type
& HCI_SMP_STK
) && !(type
& HCI_SMP_LTK
))
2617 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
);
2621 key
= kzalloc(sizeof(*key
), GFP_ATOMIC
);
2624 list_add(&key
->list
, &hdev
->long_term_keys
);
2627 bacpy(&key
->bdaddr
, bdaddr
);
2628 key
->bdaddr_type
= addr_type
;
2629 memcpy(key
->val
, tk
, sizeof(key
->val
));
2630 key
->authenticated
= authenticated
;
2632 key
->enc_size
= enc_size
;
2634 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
2639 if (type
& HCI_SMP_LTK
)
2640 mgmt_new_ltk(hdev
, key
, 1);
2645 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2647 struct link_key
*key
;
2649 key
= hci_find_link_key(hdev
, bdaddr
);
2653 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2655 list_del(&key
->list
);
2661 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2663 struct smp_ltk
*k
, *tmp
;
2665 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
2666 if (bacmp(bdaddr
, &k
->bdaddr
))
2669 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2678 /* HCI command timer function */
2679 static void hci_cmd_timeout(unsigned long arg
)
2681 struct hci_dev
*hdev
= (void *) arg
;
2683 if (hdev
->sent_cmd
) {
2684 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
2685 u16 opcode
= __le16_to_cpu(sent
->opcode
);
2687 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
2689 BT_ERR("%s command tx timeout", hdev
->name
);
2692 atomic_set(&hdev
->cmd_cnt
, 1);
2693 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
2696 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
2699 struct oob_data
*data
;
2701 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
2702 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
2708 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2710 struct oob_data
*data
;
2712 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
2716 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2718 list_del(&data
->list
);
2724 int hci_remote_oob_data_clear(struct hci_dev
*hdev
)
2726 struct oob_data
*data
, *n
;
2728 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
2729 list_del(&data
->list
);
2736 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8
*hash
,
2739 struct oob_data
*data
;
2741 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
2744 data
= kmalloc(sizeof(*data
), GFP_ATOMIC
);
2748 bacpy(&data
->bdaddr
, bdaddr
);
2749 list_add(&data
->list
, &hdev
->remote_oob_data
);
2752 memcpy(data
->hash
, hash
, sizeof(data
->hash
));
2753 memcpy(data
->randomizer
, randomizer
, sizeof(data
->randomizer
));
2755 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
2760 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
2761 bdaddr_t
*bdaddr
, u8 type
)
2763 struct bdaddr_list
*b
;
2765 list_for_each_entry(b
, &hdev
->blacklist
, list
) {
2766 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
2773 int hci_blacklist_clear(struct hci_dev
*hdev
)
2775 struct list_head
*p
, *n
;
2777 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
2778 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
2787 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2789 struct bdaddr_list
*entry
;
2791 if (!bacmp(bdaddr
, BDADDR_ANY
))
2794 if (hci_blacklist_lookup(hdev
, bdaddr
, type
))
2797 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
2801 bacpy(&entry
->bdaddr
, bdaddr
);
2802 entry
->bdaddr_type
= type
;
2804 list_add(&entry
->list
, &hdev
->blacklist
);
2806 return mgmt_device_blocked(hdev
, bdaddr
, type
);
2809 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
2811 struct bdaddr_list
*entry
;
2813 if (!bacmp(bdaddr
, BDADDR_ANY
))
2814 return hci_blacklist_clear(hdev
);
2816 entry
= hci_blacklist_lookup(hdev
, bdaddr
, type
);
2820 list_del(&entry
->list
);
2823 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
2826 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
)
2829 BT_ERR("Failed to start inquiry: status %d", status
);
2832 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2833 hci_dev_unlock(hdev
);
2838 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
)
2840 /* General inquiry access code (GIAC) */
2841 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
2842 struct hci_request req
;
2843 struct hci_cp_inquiry cp
;
2847 BT_ERR("Failed to disable LE scanning: status %d", status
);
2851 switch (hdev
->discovery
.type
) {
2852 case DISCOV_TYPE_LE
:
2854 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2855 hci_dev_unlock(hdev
);
2858 case DISCOV_TYPE_INTERLEAVED
:
2859 hci_req_init(&req
, hdev
);
2861 memset(&cp
, 0, sizeof(cp
));
2862 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
2863 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
2864 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
2868 hci_inquiry_cache_flush(hdev
);
2870 err
= hci_req_run(&req
, inquiry_complete
);
2872 BT_ERR("Inquiry request failed: err %d", err
);
2873 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
2876 hci_dev_unlock(hdev
);
2881 static void le_scan_disable_work(struct work_struct
*work
)
2883 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2884 le_scan_disable
.work
);
2885 struct hci_cp_le_set_scan_enable cp
;
2886 struct hci_request req
;
2889 BT_DBG("%s", hdev
->name
);
2891 hci_req_init(&req
, hdev
);
2893 memset(&cp
, 0, sizeof(cp
));
2894 cp
.enable
= LE_SCAN_DISABLE
;
2895 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
2897 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
2899 BT_ERR("Disable LE scanning request failed: err %d", err
);
2902 /* Alloc HCI device */
2903 struct hci_dev
*hci_alloc_dev(void)
2905 struct hci_dev
*hdev
;
2907 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
2911 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
2912 hdev
->esco_type
= (ESCO_HV1
);
2913 hdev
->link_mode
= (HCI_LM_ACCEPT
);
2914 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
2915 hdev
->io_capability
= 0x03; /* No Input No Output */
2916 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
2917 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
2919 hdev
->sniff_max_interval
= 800;
2920 hdev
->sniff_min_interval
= 80;
2922 hdev
->le_scan_interval
= 0x0060;
2923 hdev
->le_scan_window
= 0x0030;
2924 hdev
->le_conn_min_interval
= 0x0028;
2925 hdev
->le_conn_max_interval
= 0x0038;
2927 mutex_init(&hdev
->lock
);
2928 mutex_init(&hdev
->req_lock
);
2930 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
2931 INIT_LIST_HEAD(&hdev
->blacklist
);
2932 INIT_LIST_HEAD(&hdev
->uuids
);
2933 INIT_LIST_HEAD(&hdev
->link_keys
);
2934 INIT_LIST_HEAD(&hdev
->long_term_keys
);
2935 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
2936 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
2938 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
2939 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
2940 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
2941 INIT_WORK(&hdev
->power_on
, hci_power_on
);
2943 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
2944 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
2945 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
2947 skb_queue_head_init(&hdev
->rx_q
);
2948 skb_queue_head_init(&hdev
->cmd_q
);
2949 skb_queue_head_init(&hdev
->raw_q
);
2951 init_waitqueue_head(&hdev
->req_wait_q
);
2953 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
2955 hci_init_sysfs(hdev
);
2956 discovery_init(hdev
);
2960 EXPORT_SYMBOL(hci_alloc_dev
);
2962 /* Free HCI device */
2963 void hci_free_dev(struct hci_dev
*hdev
)
2965 /* will free via device release */
2966 put_device(&hdev
->dev
);
2968 EXPORT_SYMBOL(hci_free_dev
);
2970 /* Register HCI device */
2971 int hci_register_dev(struct hci_dev
*hdev
)
2975 if (!hdev
->open
|| !hdev
->close
)
2978 /* Do not allow HCI_AMP devices to register at index 0,
2979 * so the index can be used as the AMP controller ID.
2981 switch (hdev
->dev_type
) {
2983 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
2986 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
2995 sprintf(hdev
->name
, "hci%d", id
);
2998 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3000 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3001 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3002 if (!hdev
->workqueue
) {
3007 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3008 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3009 if (!hdev
->req_workqueue
) {
3010 destroy_workqueue(hdev
->workqueue
);
3015 if (!IS_ERR_OR_NULL(bt_debugfs
))
3016 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3018 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3020 error
= device_add(&hdev
->dev
);
3024 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3025 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3028 if (rfkill_register(hdev
->rfkill
) < 0) {
3029 rfkill_destroy(hdev
->rfkill
);
3030 hdev
->rfkill
= NULL
;
3034 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3035 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
3037 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
3038 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
3040 if (hdev
->dev_type
== HCI_BREDR
) {
3041 /* Assume BR/EDR support until proven otherwise (such as
3042 * through reading supported features during init.
3044 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
3047 write_lock(&hci_dev_list_lock
);
3048 list_add(&hdev
->list
, &hci_dev_list
);
3049 write_unlock(&hci_dev_list_lock
);
3051 hci_notify(hdev
, HCI_DEV_REG
);
3054 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3059 destroy_workqueue(hdev
->workqueue
);
3060 destroy_workqueue(hdev
->req_workqueue
);
3062 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3066 EXPORT_SYMBOL(hci_register_dev
);
3068 /* Unregister HCI device */
3069 void hci_unregister_dev(struct hci_dev
*hdev
)
3073 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3075 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
3079 write_lock(&hci_dev_list_lock
);
3080 list_del(&hdev
->list
);
3081 write_unlock(&hci_dev_list_lock
);
3083 hci_dev_do_close(hdev
);
3085 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
3086 kfree_skb(hdev
->reassembly
[i
]);
3088 cancel_work_sync(&hdev
->power_on
);
3090 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3091 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
3093 mgmt_index_removed(hdev
);
3094 hci_dev_unlock(hdev
);
3097 /* mgmt_index_removed should take care of emptying the
3099 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3101 hci_notify(hdev
, HCI_DEV_UNREG
);
3104 rfkill_unregister(hdev
->rfkill
);
3105 rfkill_destroy(hdev
->rfkill
);
3108 device_del(&hdev
->dev
);
3110 debugfs_remove_recursive(hdev
->debugfs
);
3112 destroy_workqueue(hdev
->workqueue
);
3113 destroy_workqueue(hdev
->req_workqueue
);
3116 hci_blacklist_clear(hdev
);
3117 hci_uuids_clear(hdev
);
3118 hci_link_keys_clear(hdev
);
3119 hci_smp_ltks_clear(hdev
);
3120 hci_remote_oob_data_clear(hdev
);
3121 hci_dev_unlock(hdev
);
3125 ida_simple_remove(&hci_index_ida
, id
);
3127 EXPORT_SYMBOL(hci_unregister_dev
);
3129 /* Suspend HCI device */
3130 int hci_suspend_dev(struct hci_dev
*hdev
)
3132 hci_notify(hdev
, HCI_DEV_SUSPEND
);
3135 EXPORT_SYMBOL(hci_suspend_dev
);
3137 /* Resume HCI device */
3138 int hci_resume_dev(struct hci_dev
*hdev
)
3140 hci_notify(hdev
, HCI_DEV_RESUME
);
3143 EXPORT_SYMBOL(hci_resume_dev
);
3145 /* Receive frame from HCI drivers */
3146 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3148 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3149 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3155 bt_cb(skb
)->incoming
= 1;
3158 __net_timestamp(skb
);
3160 skb_queue_tail(&hdev
->rx_q
, skb
);
3161 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3165 EXPORT_SYMBOL(hci_recv_frame
);
3167 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
3168 int count
, __u8 index
)
3173 struct sk_buff
*skb
;
3174 struct bt_skb_cb
*scb
;
3176 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
3177 index
>= NUM_REASSEMBLY
)
3180 skb
= hdev
->reassembly
[index
];
3184 case HCI_ACLDATA_PKT
:
3185 len
= HCI_MAX_FRAME_SIZE
;
3186 hlen
= HCI_ACL_HDR_SIZE
;
3189 len
= HCI_MAX_EVENT_SIZE
;
3190 hlen
= HCI_EVENT_HDR_SIZE
;
3192 case HCI_SCODATA_PKT
:
3193 len
= HCI_MAX_SCO_SIZE
;
3194 hlen
= HCI_SCO_HDR_SIZE
;
3198 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3202 scb
= (void *) skb
->cb
;
3204 scb
->pkt_type
= type
;
3206 hdev
->reassembly
[index
] = skb
;
3210 scb
= (void *) skb
->cb
;
3211 len
= min_t(uint
, scb
->expect
, count
);
3213 memcpy(skb_put(skb
, len
), data
, len
);
3222 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
3223 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
3224 scb
->expect
= h
->plen
;
3226 if (skb_tailroom(skb
) < scb
->expect
) {
3228 hdev
->reassembly
[index
] = NULL
;
3234 case HCI_ACLDATA_PKT
:
3235 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
3236 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
3237 scb
->expect
= __le16_to_cpu(h
->dlen
);
3239 if (skb_tailroom(skb
) < scb
->expect
) {
3241 hdev
->reassembly
[index
] = NULL
;
3247 case HCI_SCODATA_PKT
:
3248 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
3249 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
3250 scb
->expect
= h
->dlen
;
3252 if (skb_tailroom(skb
) < scb
->expect
) {
3254 hdev
->reassembly
[index
] = NULL
;
3261 if (scb
->expect
== 0) {
3262 /* Complete frame */
3264 bt_cb(skb
)->pkt_type
= type
;
3265 hci_recv_frame(hdev
, skb
);
3267 hdev
->reassembly
[index
] = NULL
;
3275 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
3279 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
3283 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
3287 data
+= (count
- rem
);
3293 EXPORT_SYMBOL(hci_recv_fragment
);
3295 #define STREAM_REASSEMBLY 0
3297 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
3303 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
3306 struct { char type
; } *pkt
;
3308 /* Start of the frame */
3315 type
= bt_cb(skb
)->pkt_type
;
3317 rem
= hci_reassembly(hdev
, type
, data
, count
,
3322 data
+= (count
- rem
);
3328 EXPORT_SYMBOL(hci_recv_stream_fragment
);
3330 /* ---- Interface to upper protocols ---- */
3332 int hci_register_cb(struct hci_cb
*cb
)
3334 BT_DBG("%p name %s", cb
, cb
->name
);
3336 write_lock(&hci_cb_list_lock
);
3337 list_add(&cb
->list
, &hci_cb_list
);
3338 write_unlock(&hci_cb_list_lock
);
3342 EXPORT_SYMBOL(hci_register_cb
);
3344 int hci_unregister_cb(struct hci_cb
*cb
)
3346 BT_DBG("%p name %s", cb
, cb
->name
);
3348 write_lock(&hci_cb_list_lock
);
3349 list_del(&cb
->list
);
3350 write_unlock(&hci_cb_list_lock
);
3354 EXPORT_SYMBOL(hci_unregister_cb
);
3356 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3358 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
3361 __net_timestamp(skb
);
3363 /* Send copy to monitor */
3364 hci_send_to_monitor(hdev
, skb
);
3366 if (atomic_read(&hdev
->promisc
)) {
3367 /* Send copy to the sockets */
3368 hci_send_to_sock(hdev
, skb
);
3371 /* Get rid of skb owner, prior to sending to the driver. */
3374 if (hdev
->send(hdev
, skb
) < 0)
3375 BT_ERR("%s sending frame failed", hdev
->name
);
3378 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
3380 skb_queue_head_init(&req
->cmd_q
);
3385 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
3387 struct hci_dev
*hdev
= req
->hdev
;
3388 struct sk_buff
*skb
;
3389 unsigned long flags
;
3391 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
3393 /* If an error occured during request building, remove all HCI
3394 * commands queued on the HCI request queue.
3397 skb_queue_purge(&req
->cmd_q
);
3401 /* Do not allow empty requests */
3402 if (skb_queue_empty(&req
->cmd_q
))
3405 skb
= skb_peek_tail(&req
->cmd_q
);
3406 bt_cb(skb
)->req
.complete
= complete
;
3408 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3409 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
3410 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3412 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3417 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
3418 u32 plen
, const void *param
)
3420 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
3421 struct hci_command_hdr
*hdr
;
3422 struct sk_buff
*skb
;
3424 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3428 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
3429 hdr
->opcode
= cpu_to_le16(opcode
);
3433 memcpy(skb_put(skb
, plen
), param
, plen
);
3435 BT_DBG("skb len %d", skb
->len
);
3437 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
3442 /* Send HCI command */
3443 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3446 struct sk_buff
*skb
;
3448 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3450 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3452 BT_ERR("%s no memory for command", hdev
->name
);
3456 /* Stand-alone HCI commands must be flaged as
3457 * single-command requests.
3459 bt_cb(skb
)->req
.start
= true;
3461 skb_queue_tail(&hdev
->cmd_q
, skb
);
3462 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3467 /* Queue a command to an asynchronous HCI request */
3468 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
3469 const void *param
, u8 event
)
3471 struct hci_dev
*hdev
= req
->hdev
;
3472 struct sk_buff
*skb
;
3474 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3476 /* If an error occured during request building, there is no point in
3477 * queueing the HCI command. We can simply return.
3482 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3484 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3485 hdev
->name
, opcode
);
3490 if (skb_queue_empty(&req
->cmd_q
))
3491 bt_cb(skb
)->req
.start
= true;
3493 bt_cb(skb
)->req
.event
= event
;
3495 skb_queue_tail(&req
->cmd_q
, skb
);
3498 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
3501 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
3504 /* Get data from the previously sent command */
3505 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
3507 struct hci_command_hdr
*hdr
;
3509 if (!hdev
->sent_cmd
)
3512 hdr
= (void *) hdev
->sent_cmd
->data
;
3514 if (hdr
->opcode
!= cpu_to_le16(opcode
))
3517 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
3519 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
3523 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
3525 struct hci_acl_hdr
*hdr
;
3528 skb_push(skb
, HCI_ACL_HDR_SIZE
);
3529 skb_reset_transport_header(skb
);
3530 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
3531 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
3532 hdr
->dlen
= cpu_to_le16(len
);
3535 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
3536 struct sk_buff
*skb
, __u16 flags
)
3538 struct hci_conn
*conn
= chan
->conn
;
3539 struct hci_dev
*hdev
= conn
->hdev
;
3540 struct sk_buff
*list
;
3542 skb
->len
= skb_headlen(skb
);
3545 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3547 switch (hdev
->dev_type
) {
3549 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3552 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
3555 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
3559 list
= skb_shinfo(skb
)->frag_list
;
3561 /* Non fragmented */
3562 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
3564 skb_queue_tail(queue
, skb
);
3567 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3569 skb_shinfo(skb
)->frag_list
= NULL
;
3571 /* Queue all fragments atomically */
3572 spin_lock(&queue
->lock
);
3574 __skb_queue_tail(queue
, skb
);
3576 flags
&= ~ACL_START
;
3579 skb
= list
; list
= list
->next
;
3581 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
3582 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
3584 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
3586 __skb_queue_tail(queue
, skb
);
3589 spin_unlock(&queue
->lock
);
3593 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
3595 struct hci_dev
*hdev
= chan
->conn
->hdev
;
3597 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
3599 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
3601 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3605 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
3607 struct hci_dev
*hdev
= conn
->hdev
;
3608 struct hci_sco_hdr hdr
;
3610 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
3612 hdr
.handle
= cpu_to_le16(conn
->handle
);
3613 hdr
.dlen
= skb
->len
;
3615 skb_push(skb
, HCI_SCO_HDR_SIZE
);
3616 skb_reset_transport_header(skb
);
3617 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
3619 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
3621 skb_queue_tail(&conn
->data_q
, skb
);
3622 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
3625 /* ---- HCI TX task (outgoing data) ---- */
3627 /* HCI Connection scheduler */
3628 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
3631 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3632 struct hci_conn
*conn
= NULL
, *c
;
3633 unsigned int num
= 0, min
= ~0;
3635 /* We don't have to lock device here. Connections are always
3636 * added and removed with TX task disabled. */
3640 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3641 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
3644 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
3649 if (c
->sent
< min
) {
3654 if (hci_conn_num(hdev
, type
) == num
)
3663 switch (conn
->type
) {
3665 cnt
= hdev
->acl_cnt
;
3669 cnt
= hdev
->sco_cnt
;
3672 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3676 BT_ERR("Unknown link type");
3684 BT_DBG("conn %p quote %d", conn
, *quote
);
3688 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
3690 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3693 BT_ERR("%s link tx timeout", hdev
->name
);
3697 /* Kill stalled connections */
3698 list_for_each_entry_rcu(c
, &h
->list
, list
) {
3699 if (c
->type
== type
&& c
->sent
) {
3700 BT_ERR("%s killing stalled connection %pMR",
3701 hdev
->name
, &c
->dst
);
3702 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
3709 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
3712 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3713 struct hci_chan
*chan
= NULL
;
3714 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
3715 struct hci_conn
*conn
;
3716 int cnt
, q
, conn_num
= 0;
3718 BT_DBG("%s", hdev
->name
);
3722 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3723 struct hci_chan
*tmp
;
3725 if (conn
->type
!= type
)
3728 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3733 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
3734 struct sk_buff
*skb
;
3736 if (skb_queue_empty(&tmp
->data_q
))
3739 skb
= skb_peek(&tmp
->data_q
);
3740 if (skb
->priority
< cur_prio
)
3743 if (skb
->priority
> cur_prio
) {
3746 cur_prio
= skb
->priority
;
3751 if (conn
->sent
< min
) {
3757 if (hci_conn_num(hdev
, type
) == conn_num
)
3766 switch (chan
->conn
->type
) {
3768 cnt
= hdev
->acl_cnt
;
3771 cnt
= hdev
->block_cnt
;
3775 cnt
= hdev
->sco_cnt
;
3778 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
3782 BT_ERR("Unknown link type");
3787 BT_DBG("chan %p quote %d", chan
, *quote
);
3791 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
3793 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
3794 struct hci_conn
*conn
;
3797 BT_DBG("%s", hdev
->name
);
3801 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
3802 struct hci_chan
*chan
;
3804 if (conn
->type
!= type
)
3807 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
3812 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
3813 struct sk_buff
*skb
;
3820 if (skb_queue_empty(&chan
->data_q
))
3823 skb
= skb_peek(&chan
->data_q
);
3824 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
3827 skb
->priority
= HCI_PRIO_MAX
- 1;
3829 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
3833 if (hci_conn_num(hdev
, type
) == num
)
3841 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3843 /* Calculate count of blocks used by this packet */
3844 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
3847 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
3849 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
3850 /* ACL tx timeout must be longer than maximum
3851 * link supervision timeout (40.9 seconds) */
3852 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
3853 HCI_ACL_TX_TIMEOUT
))
3854 hci_link_tx_to(hdev
, ACL_LINK
);
3858 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
3860 unsigned int cnt
= hdev
->acl_cnt
;
3861 struct hci_chan
*chan
;
3862 struct sk_buff
*skb
;
3865 __check_timeout(hdev
, cnt
);
3867 while (hdev
->acl_cnt
&&
3868 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
3869 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3870 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
3871 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3872 skb
->len
, skb
->priority
);
3874 /* Stop if priority has changed */
3875 if (skb
->priority
< priority
)
3878 skb
= skb_dequeue(&chan
->data_q
);
3880 hci_conn_enter_active_mode(chan
->conn
,
3881 bt_cb(skb
)->force_active
);
3883 hci_send_frame(hdev
, skb
);
3884 hdev
->acl_last_tx
= jiffies
;
3892 if (cnt
!= hdev
->acl_cnt
)
3893 hci_prio_recalculate(hdev
, ACL_LINK
);
3896 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
3898 unsigned int cnt
= hdev
->block_cnt
;
3899 struct hci_chan
*chan
;
3900 struct sk_buff
*skb
;
3904 __check_timeout(hdev
, cnt
);
3906 BT_DBG("%s", hdev
->name
);
3908 if (hdev
->dev_type
== HCI_AMP
)
3913 while (hdev
->block_cnt
> 0 &&
3914 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
3915 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
3916 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
3919 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
3920 skb
->len
, skb
->priority
);
3922 /* Stop if priority has changed */
3923 if (skb
->priority
< priority
)
3926 skb
= skb_dequeue(&chan
->data_q
);
3928 blocks
= __get_blocks(hdev
, skb
);
3929 if (blocks
> hdev
->block_cnt
)
3932 hci_conn_enter_active_mode(chan
->conn
,
3933 bt_cb(skb
)->force_active
);
3935 hci_send_frame(hdev
, skb
);
3936 hdev
->acl_last_tx
= jiffies
;
3938 hdev
->block_cnt
-= blocks
;
3941 chan
->sent
+= blocks
;
3942 chan
->conn
->sent
+= blocks
;
3946 if (cnt
!= hdev
->block_cnt
)
3947 hci_prio_recalculate(hdev
, type
);
3950 static void hci_sched_acl(struct hci_dev
*hdev
)
3952 BT_DBG("%s", hdev
->name
);
3954 /* No ACL link over BR/EDR controller */
3955 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
3958 /* No AMP link over AMP controller */
3959 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
3962 switch (hdev
->flow_ctl_mode
) {
3963 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
3964 hci_sched_acl_pkt(hdev
);
3967 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
3968 hci_sched_acl_blk(hdev
);
3974 static void hci_sched_sco(struct hci_dev
*hdev
)
3976 struct hci_conn
*conn
;
3977 struct sk_buff
*skb
;
3980 BT_DBG("%s", hdev
->name
);
3982 if (!hci_conn_num(hdev
, SCO_LINK
))
3985 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
3986 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
3987 BT_DBG("skb %p len %d", skb
, skb
->len
);
3988 hci_send_frame(hdev
, skb
);
3991 if (conn
->sent
== ~0)
3997 static void hci_sched_esco(struct hci_dev
*hdev
)
3999 struct hci_conn
*conn
;
4000 struct sk_buff
*skb
;
4003 BT_DBG("%s", hdev
->name
);
4005 if (!hci_conn_num(hdev
, ESCO_LINK
))
4008 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
4010 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4011 BT_DBG("skb %p len %d", skb
, skb
->len
);
4012 hci_send_frame(hdev
, skb
);
4015 if (conn
->sent
== ~0)
4021 static void hci_sched_le(struct hci_dev
*hdev
)
4023 struct hci_chan
*chan
;
4024 struct sk_buff
*skb
;
4025 int quote
, cnt
, tmp
;
4027 BT_DBG("%s", hdev
->name
);
4029 if (!hci_conn_num(hdev
, LE_LINK
))
4032 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
4033 /* LE tx timeout must be longer than maximum
4034 * link supervision timeout (40.9 seconds) */
4035 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
4036 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
4037 hci_link_tx_to(hdev
, LE_LINK
);
4040 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
4042 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
4043 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4044 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4045 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4046 skb
->len
, skb
->priority
);
4048 /* Stop if priority has changed */
4049 if (skb
->priority
< priority
)
4052 skb
= skb_dequeue(&chan
->data_q
);
4054 hci_send_frame(hdev
, skb
);
4055 hdev
->le_last_tx
= jiffies
;
4066 hdev
->acl_cnt
= cnt
;
4069 hci_prio_recalculate(hdev
, LE_LINK
);
4072 static void hci_tx_work(struct work_struct
*work
)
4074 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
4075 struct sk_buff
*skb
;
4077 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
4078 hdev
->sco_cnt
, hdev
->le_cnt
);
4080 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4081 /* Schedule queues and send stuff to HCI driver */
4082 hci_sched_acl(hdev
);
4083 hci_sched_sco(hdev
);
4084 hci_sched_esco(hdev
);
4088 /* Send next queued raw (unknown type) packet */
4089 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
4090 hci_send_frame(hdev
, skb
);
4093 /* ----- HCI RX task (incoming data processing) ----- */
4095 /* ACL data packet */
4096 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4098 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4099 struct hci_conn
*conn
;
4100 __u16 handle
, flags
;
4102 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4104 handle
= __le16_to_cpu(hdr
->handle
);
4105 flags
= hci_flags(handle
);
4106 handle
= hci_handle(handle
);
4108 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4111 hdev
->stat
.acl_rx
++;
4114 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4115 hci_dev_unlock(hdev
);
4118 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4120 /* Send to upper protocol */
4121 l2cap_recv_acldata(conn
, skb
, flags
);
4124 BT_ERR("%s ACL packet for unknown connection handle %d",
4125 hdev
->name
, handle
);
4131 /* SCO data packet */
4132 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4134 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4135 struct hci_conn
*conn
;
4138 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4140 handle
= __le16_to_cpu(hdr
->handle
);
4142 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4144 hdev
->stat
.sco_rx
++;
4147 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4148 hci_dev_unlock(hdev
);
4151 /* Send to upper protocol */
4152 sco_recv_scodata(conn
, skb
);
4155 BT_ERR("%s SCO packet for unknown connection handle %d",
4156 hdev
->name
, handle
);
4162 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4164 struct sk_buff
*skb
;
4166 skb
= skb_peek(&hdev
->cmd_q
);
4170 return bt_cb(skb
)->req
.start
;
4173 static void hci_resend_last(struct hci_dev
*hdev
)
4175 struct hci_command_hdr
*sent
;
4176 struct sk_buff
*skb
;
4179 if (!hdev
->sent_cmd
)
4182 sent
= (void *) hdev
->sent_cmd
->data
;
4183 opcode
= __le16_to_cpu(sent
->opcode
);
4184 if (opcode
== HCI_OP_RESET
)
4187 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4191 skb_queue_head(&hdev
->cmd_q
, skb
);
4192 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4195 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
4197 hci_req_complete_t req_complete
= NULL
;
4198 struct sk_buff
*skb
;
4199 unsigned long flags
;
4201 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4203 /* If the completed command doesn't match the last one that was
4204 * sent we need to do special handling of it.
4206 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4207 /* Some CSR based controllers generate a spontaneous
4208 * reset complete event during init and any pending
4209 * command will never be completed. In such a case we
4210 * need to resend whatever was the last sent
4213 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4214 hci_resend_last(hdev
);
4219 /* If the command succeeded and there's still more commands in
4220 * this request the request is not yet complete.
4222 if (!status
&& !hci_req_is_complete(hdev
))
4225 /* If this was the last command in a request the complete
4226 * callback would be found in hdev->sent_cmd instead of the
4227 * command queue (hdev->cmd_q).
4229 if (hdev
->sent_cmd
) {
4230 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
4233 /* We must set the complete callback to NULL to
4234 * avoid calling the callback more than once if
4235 * this function gets called again.
4237 bt_cb(hdev
->sent_cmd
)->req
.complete
= NULL
;
4243 /* Remove all pending commands belonging to this request */
4244 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4245 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4246 if (bt_cb(skb
)->req
.start
) {
4247 __skb_queue_head(&hdev
->cmd_q
, skb
);
4251 req_complete
= bt_cb(skb
)->req
.complete
;
4254 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4258 req_complete(hdev
, status
);
4261 static void hci_rx_work(struct work_struct
*work
)
4263 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4264 struct sk_buff
*skb
;
4266 BT_DBG("%s", hdev
->name
);
4268 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4269 /* Send copy to monitor */
4270 hci_send_to_monitor(hdev
, skb
);
4272 if (atomic_read(&hdev
->promisc
)) {
4273 /* Send copy to the sockets */
4274 hci_send_to_sock(hdev
, skb
);
4277 if (test_bit(HCI_RAW
, &hdev
->flags
) ||
4278 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4283 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4284 /* Don't process data packets in this states. */
4285 switch (bt_cb(skb
)->pkt_type
) {
4286 case HCI_ACLDATA_PKT
:
4287 case HCI_SCODATA_PKT
:
4294 switch (bt_cb(skb
)->pkt_type
) {
4296 BT_DBG("%s Event packet", hdev
->name
);
4297 hci_event_packet(hdev
, skb
);
4300 case HCI_ACLDATA_PKT
:
4301 BT_DBG("%s ACL data packet", hdev
->name
);
4302 hci_acldata_packet(hdev
, skb
);
4305 case HCI_SCODATA_PKT
:
4306 BT_DBG("%s SCO data packet", hdev
->name
);
4307 hci_scodata_packet(hdev
, skb
);
4317 static void hci_cmd_work(struct work_struct
*work
)
4319 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4320 struct sk_buff
*skb
;
4322 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4323 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4325 /* Send queued commands */
4326 if (atomic_read(&hdev
->cmd_cnt
)) {
4327 skb
= skb_dequeue(&hdev
->cmd_q
);
4331 kfree_skb(hdev
->sent_cmd
);
4333 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4334 if (hdev
->sent_cmd
) {
4335 atomic_dec(&hdev
->cmd_cnt
);
4336 hci_send_frame(hdev
, skb
);
4337 if (test_bit(HCI_RESET
, &hdev
->flags
))
4338 del_timer(&hdev
->cmd_timer
);
4340 mod_timer(&hdev
->cmd_timer
,
4341 jiffies
+ HCI_CMD_TIMEOUT
);
4343 skb_queue_head(&hdev
->cmd_q
, skb
);
4344 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);