2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode
;
46 struct inquiry_entry
{
47 struct inquiry_entry
*next
;
49 struct inquiry_data data
;
52 struct inquiry_cache
{
55 struct inquiry_entry
*list
;
58 struct hci_conn_hash
{
59 struct list_head list
;
66 struct list_head list
;
69 #define NUM_REASSEMBLY 4
71 struct list_head list
;
97 __u16 sniff_min_interval
;
98 __u16 sniff_max_interval
;
100 unsigned long quirks
;
103 unsigned int acl_cnt
;
104 unsigned int sco_cnt
;
106 unsigned int acl_mtu
;
107 unsigned int sco_mtu
;
108 unsigned int acl_pkts
;
109 unsigned int sco_pkts
;
111 unsigned long cmd_last_tx
;
112 unsigned long acl_last_tx
;
113 unsigned long sco_last_tx
;
115 struct workqueue_struct
*workqueue
;
117 struct tasklet_struct cmd_task
;
118 struct tasklet_struct rx_task
;
119 struct tasklet_struct tx_task
;
121 struct sk_buff_head rx_q
;
122 struct sk_buff_head raw_q
;
123 struct sk_buff_head cmd_q
;
125 struct sk_buff
*sent_cmd
;
126 struct sk_buff
*reassembly
[NUM_REASSEMBLY
];
128 struct mutex req_lock
;
129 wait_queue_head_t req_wait_q
;
133 struct inquiry_cache inq_cache
;
134 struct hci_conn_hash conn_hash
;
135 struct list_head blacklist
;
137 struct hci_dev_stats stat
;
139 struct sk_buff_head driver_init
;
146 struct dentry
*debugfs
;
148 struct device
*parent
;
151 struct rfkill
*rfkill
;
153 struct module
*owner
;
155 int (*open
)(struct hci_dev
*hdev
);
156 int (*close
)(struct hci_dev
*hdev
);
157 int (*flush
)(struct hci_dev
*hdev
);
158 int (*send
)(struct sk_buff
*skb
);
159 void (*destruct
)(struct hci_dev
*hdev
);
160 void (*notify
)(struct hci_dev
*hdev
, unsigned int evt
);
161 int (*ioctl
)(struct hci_dev
*hdev
, unsigned int cmd
, unsigned long arg
);
165 struct list_head list
;
192 struct sk_buff_head data_q
;
194 struct timer_list disc_timer
;
195 struct timer_list idle_timer
;
197 struct work_struct work_add
;
198 struct work_struct work_del
;
203 struct hci_dev
*hdev
;
208 struct hci_conn
*link
;
211 extern struct hci_proto
*hci_proto
[];
212 extern struct list_head hci_dev_list
;
213 extern struct list_head hci_cb_list
;
214 extern rwlock_t hci_dev_list_lock
;
215 extern rwlock_t hci_cb_list_lock
;
217 /* ----- Inquiry cache ----- */
218 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
219 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
221 #define inquiry_cache_lock(c) spin_lock(&c->lock)
222 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
223 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
224 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
226 static inline void inquiry_cache_init(struct hci_dev
*hdev
)
228 struct inquiry_cache
*c
= &hdev
->inq_cache
;
229 spin_lock_init(&c
->lock
);
233 static inline int inquiry_cache_empty(struct hci_dev
*hdev
)
235 struct inquiry_cache
*c
= &hdev
->inq_cache
;
236 return (c
->list
== NULL
);
239 static inline long inquiry_cache_age(struct hci_dev
*hdev
)
241 struct inquiry_cache
*c
= &hdev
->inq_cache
;
242 return jiffies
- c
->timestamp
;
245 static inline long inquiry_entry_age(struct inquiry_entry
*e
)
247 return jiffies
- e
->timestamp
;
250 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
);
251 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
);
253 /* ----- HCI Connections ----- */
256 HCI_CONN_ENCRYPT_PEND
,
257 HCI_CONN_RSWITCH_PEND
,
258 HCI_CONN_MODE_CHANGE_PEND
,
259 HCI_CONN_SCO_SETUP_PEND
,
262 static inline void hci_conn_hash_init(struct hci_dev
*hdev
)
264 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
265 INIT_LIST_HEAD(&h
->list
);
266 spin_lock_init(&h
->lock
);
271 static inline void hci_conn_hash_add(struct hci_dev
*hdev
, struct hci_conn
*c
)
273 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
274 list_add(&c
->list
, &h
->list
);
275 if (c
->type
== ACL_LINK
)
281 static inline void hci_conn_hash_del(struct hci_dev
*hdev
, struct hci_conn
*c
)
283 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
285 if (c
->type
== ACL_LINK
)
291 static inline struct hci_conn
*hci_conn_hash_lookup_handle(struct hci_dev
*hdev
,
294 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
298 list_for_each(p
, &h
->list
) {
299 c
= list_entry(p
, struct hci_conn
, list
);
300 if (c
->handle
== handle
)
306 static inline struct hci_conn
*hci_conn_hash_lookup_ba(struct hci_dev
*hdev
,
307 __u8 type
, bdaddr_t
*ba
)
309 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
313 list_for_each(p
, &h
->list
) {
314 c
= list_entry(p
, struct hci_conn
, list
);
315 if (c
->type
== type
&& !bacmp(&c
->dst
, ba
))
321 static inline struct hci_conn
*hci_conn_hash_lookup_state(struct hci_dev
*hdev
,
322 __u8 type
, __u16 state
)
324 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
328 list_for_each(p
, &h
->list
) {
329 c
= list_entry(p
, struct hci_conn
, list
);
330 if (c
->type
== type
&& c
->state
== state
)
336 void hci_acl_connect(struct hci_conn
*conn
);
337 void hci_acl_disconn(struct hci_conn
*conn
, __u8 reason
);
338 void hci_add_sco(struct hci_conn
*conn
, __u16 handle
);
339 void hci_setup_sync(struct hci_conn
*conn
, __u16 handle
);
340 void hci_sco_setup(struct hci_conn
*conn
, __u8 status
);
342 struct hci_conn
*hci_conn_add(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
);
343 int hci_conn_del(struct hci_conn
*conn
);
344 void hci_conn_hash_flush(struct hci_dev
*hdev
);
345 void hci_conn_check_pending(struct hci_dev
*hdev
);
347 struct hci_conn
*hci_connect(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
, __u8 sec_level
, __u8 auth_type
);
348 int hci_conn_check_link_mode(struct hci_conn
*conn
);
349 int hci_conn_security(struct hci_conn
*conn
, __u8 sec_level
, __u8 auth_type
);
350 int hci_conn_change_link_key(struct hci_conn
*conn
);
351 int hci_conn_switch_role(struct hci_conn
*conn
, __u8 role
);
353 void hci_conn_enter_active_mode(struct hci_conn
*conn
);
354 void hci_conn_enter_sniff_mode(struct hci_conn
*conn
);
356 void hci_conn_hold_device(struct hci_conn
*conn
);
357 void hci_conn_put_device(struct hci_conn
*conn
);
359 static inline void hci_conn_hold(struct hci_conn
*conn
)
361 atomic_inc(&conn
->refcnt
);
362 del_timer(&conn
->disc_timer
);
365 static inline void hci_conn_put(struct hci_conn
*conn
)
367 if (atomic_dec_and_test(&conn
->refcnt
)) {
369 if (conn
->type
== ACL_LINK
) {
370 del_timer(&conn
->idle_timer
);
371 if (conn
->state
== BT_CONNECTED
) {
372 timeo
= msecs_to_jiffies(conn
->disc_timeout
);
376 timeo
= msecs_to_jiffies(10);
378 timeo
= msecs_to_jiffies(10);
379 mod_timer(&conn
->disc_timer
, jiffies
+ timeo
);
383 /* ----- HCI Devices ----- */
384 static inline void __hci_dev_put(struct hci_dev
*d
)
386 if (atomic_dec_and_test(&d
->refcnt
))
390 static inline void hci_dev_put(struct hci_dev
*d
)
393 module_put(d
->owner
);
396 static inline struct hci_dev
*__hci_dev_hold(struct hci_dev
*d
)
398 atomic_inc(&d
->refcnt
);
402 static inline struct hci_dev
*hci_dev_hold(struct hci_dev
*d
)
404 if (try_module_get(d
->owner
))
405 return __hci_dev_hold(d
);
409 #define hci_dev_lock(d) spin_lock(&d->lock)
410 #define hci_dev_unlock(d) spin_unlock(&d->lock)
411 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
412 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
414 struct hci_dev
*hci_dev_get(int index
);
415 struct hci_dev
*hci_get_route(bdaddr_t
*src
, bdaddr_t
*dst
);
417 struct hci_dev
*hci_alloc_dev(void);
418 void hci_free_dev(struct hci_dev
*hdev
);
419 int hci_register_dev(struct hci_dev
*hdev
);
420 int hci_unregister_dev(struct hci_dev
*hdev
);
421 int hci_suspend_dev(struct hci_dev
*hdev
);
422 int hci_resume_dev(struct hci_dev
*hdev
);
423 int hci_dev_open(__u16 dev
);
424 int hci_dev_close(__u16 dev
);
425 int hci_dev_reset(__u16 dev
);
426 int hci_dev_reset_stat(__u16 dev
);
427 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
);
428 int hci_get_dev_list(void __user
*arg
);
429 int hci_get_dev_info(void __user
*arg
);
430 int hci_get_conn_list(void __user
*arg
);
431 int hci_get_conn_info(struct hci_dev
*hdev
, void __user
*arg
);
432 int hci_get_auth_info(struct hci_dev
*hdev
, void __user
*arg
);
433 int hci_inquiry(void __user
*arg
);
435 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
);
436 int hci_blacklist_clear(struct hci_dev
*hdev
);
438 void hci_event_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
);
440 int hci_recv_frame(struct sk_buff
*skb
);
441 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
);
442 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
);
444 int hci_register_sysfs(struct hci_dev
*hdev
);
445 void hci_unregister_sysfs(struct hci_dev
*hdev
);
446 void hci_conn_init_sysfs(struct hci_conn
*conn
);
447 void hci_conn_add_sysfs(struct hci_conn
*conn
);
448 void hci_conn_del_sysfs(struct hci_conn
*conn
);
450 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
452 /* ----- LMP capabilities ----- */
453 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
454 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
455 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
456 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
457 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
458 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
460 /* ----- HCI protocols ----- */
468 int (*connect_ind
) (struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
);
469 int (*connect_cfm
) (struct hci_conn
*conn
, __u8 status
);
470 int (*disconn_ind
) (struct hci_conn
*conn
);
471 int (*disconn_cfm
) (struct hci_conn
*conn
, __u8 reason
);
472 int (*recv_acldata
) (struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
473 int (*recv_scodata
) (struct hci_conn
*conn
, struct sk_buff
*skb
);
474 int (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
477 static inline int hci_proto_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
)
479 register struct hci_proto
*hp
;
482 hp
= hci_proto
[HCI_PROTO_L2CAP
];
483 if (hp
&& hp
->connect_ind
)
484 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
486 hp
= hci_proto
[HCI_PROTO_SCO
];
487 if (hp
&& hp
->connect_ind
)
488 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
493 static inline void hci_proto_connect_cfm(struct hci_conn
*conn
, __u8 status
)
495 register struct hci_proto
*hp
;
497 hp
= hci_proto
[HCI_PROTO_L2CAP
];
498 if (hp
&& hp
->connect_cfm
)
499 hp
->connect_cfm(conn
, status
);
501 hp
= hci_proto
[HCI_PROTO_SCO
];
502 if (hp
&& hp
->connect_cfm
)
503 hp
->connect_cfm(conn
, status
);
506 static inline int hci_proto_disconn_ind(struct hci_conn
*conn
)
508 register struct hci_proto
*hp
;
511 hp
= hci_proto
[HCI_PROTO_L2CAP
];
512 if (hp
&& hp
->disconn_ind
)
513 reason
= hp
->disconn_ind(conn
);
515 hp
= hci_proto
[HCI_PROTO_SCO
];
516 if (hp
&& hp
->disconn_ind
)
517 reason
= hp
->disconn_ind(conn
);
522 static inline void hci_proto_disconn_cfm(struct hci_conn
*conn
, __u8 reason
)
524 register struct hci_proto
*hp
;
526 hp
= hci_proto
[HCI_PROTO_L2CAP
];
527 if (hp
&& hp
->disconn_cfm
)
528 hp
->disconn_cfm(conn
, reason
);
530 hp
= hci_proto
[HCI_PROTO_SCO
];
531 if (hp
&& hp
->disconn_cfm
)
532 hp
->disconn_cfm(conn
, reason
);
535 static inline void hci_proto_auth_cfm(struct hci_conn
*conn
, __u8 status
)
537 register struct hci_proto
*hp
;
540 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
543 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
545 hp
= hci_proto
[HCI_PROTO_L2CAP
];
546 if (hp
&& hp
->security_cfm
)
547 hp
->security_cfm(conn
, status
, encrypt
);
549 hp
= hci_proto
[HCI_PROTO_SCO
];
550 if (hp
&& hp
->security_cfm
)
551 hp
->security_cfm(conn
, status
, encrypt
);
554 static inline void hci_proto_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
556 register struct hci_proto
*hp
;
558 hp
= hci_proto
[HCI_PROTO_L2CAP
];
559 if (hp
&& hp
->security_cfm
)
560 hp
->security_cfm(conn
, status
, encrypt
);
562 hp
= hci_proto
[HCI_PROTO_SCO
];
563 if (hp
&& hp
->security_cfm
)
564 hp
->security_cfm(conn
, status
, encrypt
);
567 int hci_register_proto(struct hci_proto
*hproto
);
568 int hci_unregister_proto(struct hci_proto
*hproto
);
570 /* ----- HCI callbacks ----- */
572 struct list_head list
;
576 void (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
577 void (*key_change_cfm
) (struct hci_conn
*conn
, __u8 status
);
578 void (*role_switch_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 role
);
581 static inline void hci_auth_cfm(struct hci_conn
*conn
, __u8 status
)
586 hci_proto_auth_cfm(conn
, status
);
588 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
591 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
593 read_lock_bh(&hci_cb_list_lock
);
594 list_for_each(p
, &hci_cb_list
) {
595 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
596 if (cb
->security_cfm
)
597 cb
->security_cfm(conn
, status
, encrypt
);
599 read_unlock_bh(&hci_cb_list_lock
);
602 static inline void hci_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
606 if (conn
->sec_level
== BT_SECURITY_SDP
)
607 conn
->sec_level
= BT_SECURITY_LOW
;
609 hci_proto_encrypt_cfm(conn
, status
, encrypt
);
611 read_lock_bh(&hci_cb_list_lock
);
612 list_for_each(p
, &hci_cb_list
) {
613 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
614 if (cb
->security_cfm
)
615 cb
->security_cfm(conn
, status
, encrypt
);
617 read_unlock_bh(&hci_cb_list_lock
);
620 static inline void hci_key_change_cfm(struct hci_conn
*conn
, __u8 status
)
624 read_lock_bh(&hci_cb_list_lock
);
625 list_for_each(p
, &hci_cb_list
) {
626 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
627 if (cb
->key_change_cfm
)
628 cb
->key_change_cfm(conn
, status
);
630 read_unlock_bh(&hci_cb_list_lock
);
633 static inline void hci_role_switch_cfm(struct hci_conn
*conn
, __u8 status
, __u8 role
)
637 read_lock_bh(&hci_cb_list_lock
);
638 list_for_each(p
, &hci_cb_list
) {
639 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
640 if (cb
->role_switch_cfm
)
641 cb
->role_switch_cfm(conn
, status
, role
);
643 read_unlock_bh(&hci_cb_list_lock
);
646 int hci_register_cb(struct hci_cb
*hcb
);
647 int hci_unregister_cb(struct hci_cb
*hcb
);
649 int hci_register_notifier(struct notifier_block
*nb
);
650 int hci_unregister_notifier(struct notifier_block
*nb
);
652 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
);
653 void hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
654 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
);
656 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
);
658 void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
);
660 /* ----- HCI Sockets ----- */
661 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
);
663 /* HCI info for socket */
664 #define hci_pi(sk) ((struct hci_pinfo *) sk)
668 struct hci_dev
*hdev
;
669 struct hci_filter filter
;
673 /* HCI security filter */
674 #define HCI_SFLT_MAX_OGF 5
676 struct hci_sec_filter
{
679 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
682 /* ----- HCI requests ----- */
683 #define HCI_REQ_DONE 0
684 #define HCI_REQ_PEND 1
685 #define HCI_REQ_CANCELED 2
687 #define hci_req_lock(d) mutex_lock(&d->req_lock)
688 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
690 void hci_req_complete(struct hci_dev
*hdev
, int result
);
692 #endif /* __HCI_CORE_H */