2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode
;
46 struct inquiry_entry
{
47 struct inquiry_entry
*next
;
49 struct inquiry_data data
;
52 struct inquiry_cache
{
55 struct inquiry_entry
*list
;
58 struct hci_conn_hash
{
59 struct list_head list
;
66 struct list_head list
;
91 __u16 sniff_min_interval
;
92 __u16 sniff_max_interval
;
100 unsigned int acl_mtu
;
101 unsigned int sco_mtu
;
102 unsigned int acl_pkts
;
103 unsigned int sco_pkts
;
105 unsigned long cmd_last_tx
;
106 unsigned long acl_last_tx
;
107 unsigned long sco_last_tx
;
109 struct tasklet_struct cmd_task
;
110 struct tasklet_struct rx_task
;
111 struct tasklet_struct tx_task
;
113 struct sk_buff_head rx_q
;
114 struct sk_buff_head raw_q
;
115 struct sk_buff_head cmd_q
;
117 struct sk_buff
*sent_cmd
;
118 struct sk_buff
*reassembly
[3];
120 struct semaphore req_lock
;
121 wait_queue_head_t req_wait_q
;
125 struct inquiry_cache inq_cache
;
126 struct hci_conn_hash conn_hash
;
128 struct hci_dev_stats stat
;
130 struct sk_buff_head driver_init
;
137 struct device
*parent
;
140 struct module
*owner
;
142 int (*open
)(struct hci_dev
*hdev
);
143 int (*close
)(struct hci_dev
*hdev
);
144 int (*flush
)(struct hci_dev
*hdev
);
145 int (*send
)(struct sk_buff
*skb
);
146 void (*destruct
)(struct hci_dev
*hdev
);
147 void (*notify
)(struct hci_dev
*hdev
, unsigned int evt
);
148 int (*ioctl
)(struct hci_dev
*hdev
, unsigned int cmd
, unsigned long arg
);
152 struct list_head list
;
178 struct sk_buff_head data_q
;
180 struct timer_list disc_timer
;
181 struct timer_list idle_timer
;
183 struct work_struct work_add
;
184 struct work_struct work_del
;
188 struct hci_dev
*hdev
;
193 struct hci_conn
*link
;
196 extern struct hci_proto
*hci_proto
[];
197 extern struct list_head hci_dev_list
;
198 extern struct list_head hci_cb_list
;
199 extern rwlock_t hci_dev_list_lock
;
200 extern rwlock_t hci_cb_list_lock
;
202 /* ----- Inquiry cache ----- */
203 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
204 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
206 #define inquiry_cache_lock(c) spin_lock(&c->lock)
207 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
208 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
209 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
211 static inline void inquiry_cache_init(struct hci_dev
*hdev
)
213 struct inquiry_cache
*c
= &hdev
->inq_cache
;
214 spin_lock_init(&c
->lock
);
218 static inline int inquiry_cache_empty(struct hci_dev
*hdev
)
220 struct inquiry_cache
*c
= &hdev
->inq_cache
;
221 return (c
->list
== NULL
);
224 static inline long inquiry_cache_age(struct hci_dev
*hdev
)
226 struct inquiry_cache
*c
= &hdev
->inq_cache
;
227 return jiffies
- c
->timestamp
;
230 static inline long inquiry_entry_age(struct inquiry_entry
*e
)
232 return jiffies
- e
->timestamp
;
235 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
);
236 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
);
238 /* ----- HCI Connections ----- */
241 HCI_CONN_ENCRYPT_PEND
,
242 HCI_CONN_RSWITCH_PEND
,
243 HCI_CONN_MODE_CHANGE_PEND
,
246 static inline void hci_conn_hash_init(struct hci_dev
*hdev
)
248 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
249 INIT_LIST_HEAD(&h
->list
);
250 spin_lock_init(&h
->lock
);
255 static inline void hci_conn_hash_add(struct hci_dev
*hdev
, struct hci_conn
*c
)
257 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
258 list_add(&c
->list
, &h
->list
);
259 if (c
->type
== ACL_LINK
)
265 static inline void hci_conn_hash_del(struct hci_dev
*hdev
, struct hci_conn
*c
)
267 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
269 if (c
->type
== ACL_LINK
)
275 static inline struct hci_conn
*hci_conn_hash_lookup_handle(struct hci_dev
*hdev
,
278 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
282 list_for_each(p
, &h
->list
) {
283 c
= list_entry(p
, struct hci_conn
, list
);
284 if (c
->handle
== handle
)
290 static inline struct hci_conn
*hci_conn_hash_lookup_ba(struct hci_dev
*hdev
,
291 __u8 type
, bdaddr_t
*ba
)
293 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
297 list_for_each(p
, &h
->list
) {
298 c
= list_entry(p
, struct hci_conn
, list
);
299 if (c
->type
== type
&& !bacmp(&c
->dst
, ba
))
305 static inline struct hci_conn
*hci_conn_hash_lookup_state(struct hci_dev
*hdev
,
306 __u8 type
, __u16 state
)
308 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
312 list_for_each(p
, &h
->list
) {
313 c
= list_entry(p
, struct hci_conn
, list
);
314 if (c
->type
== type
&& c
->state
== state
)
320 void hci_acl_connect(struct hci_conn
*conn
);
321 void hci_acl_disconn(struct hci_conn
*conn
, __u8 reason
);
322 void hci_add_sco(struct hci_conn
*conn
, __u16 handle
);
323 void hci_setup_sync(struct hci_conn
*conn
, __u16 handle
);
325 struct hci_conn
*hci_conn_add(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
);
326 int hci_conn_del(struct hci_conn
*conn
);
327 void hci_conn_hash_flush(struct hci_dev
*hdev
);
328 void hci_conn_check_pending(struct hci_dev
*hdev
);
330 struct hci_conn
*hci_connect(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
, __u8 sec_level
, __u8 auth_type
);
331 int hci_conn_check_link_mode(struct hci_conn
*conn
);
332 int hci_conn_security(struct hci_conn
*conn
, __u8 sec_level
, __u8 auth_type
);
333 int hci_conn_change_link_key(struct hci_conn
*conn
);
334 int hci_conn_switch_role(struct hci_conn
*conn
, __u8 role
);
336 void hci_conn_enter_active_mode(struct hci_conn
*conn
);
337 void hci_conn_enter_sniff_mode(struct hci_conn
*conn
);
339 static inline void hci_conn_hold(struct hci_conn
*conn
)
341 atomic_inc(&conn
->refcnt
);
342 del_timer(&conn
->disc_timer
);
345 static inline void hci_conn_put(struct hci_conn
*conn
)
347 if (atomic_dec_and_test(&conn
->refcnt
)) {
349 if (conn
->type
== ACL_LINK
) {
350 del_timer(&conn
->idle_timer
);
351 if (conn
->state
== BT_CONNECTED
) {
352 timeo
= msecs_to_jiffies(HCI_DISCONN_TIMEOUT
);
356 timeo
= msecs_to_jiffies(10);
358 timeo
= msecs_to_jiffies(10);
359 mod_timer(&conn
->disc_timer
, jiffies
+ timeo
);
363 /* ----- HCI tasks ----- */
364 static inline void hci_sched_cmd(struct hci_dev
*hdev
)
366 tasklet_schedule(&hdev
->cmd_task
);
369 static inline void hci_sched_rx(struct hci_dev
*hdev
)
371 tasklet_schedule(&hdev
->rx_task
);
374 static inline void hci_sched_tx(struct hci_dev
*hdev
)
376 tasklet_schedule(&hdev
->tx_task
);
379 /* ----- HCI Devices ----- */
380 static inline void __hci_dev_put(struct hci_dev
*d
)
382 if (atomic_dec_and_test(&d
->refcnt
))
386 static inline void hci_dev_put(struct hci_dev
*d
)
389 module_put(d
->owner
);
392 static inline struct hci_dev
*__hci_dev_hold(struct hci_dev
*d
)
394 atomic_inc(&d
->refcnt
);
398 static inline struct hci_dev
*hci_dev_hold(struct hci_dev
*d
)
400 if (try_module_get(d
->owner
))
401 return __hci_dev_hold(d
);
405 #define hci_dev_lock(d) spin_lock(&d->lock)
406 #define hci_dev_unlock(d) spin_unlock(&d->lock)
407 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
408 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
410 struct hci_dev
*hci_dev_get(int index
);
411 struct hci_dev
*hci_get_route(bdaddr_t
*src
, bdaddr_t
*dst
);
413 struct hci_dev
*hci_alloc_dev(void);
414 void hci_free_dev(struct hci_dev
*hdev
);
415 int hci_register_dev(struct hci_dev
*hdev
);
416 int hci_unregister_dev(struct hci_dev
*hdev
);
417 int hci_suspend_dev(struct hci_dev
*hdev
);
418 int hci_resume_dev(struct hci_dev
*hdev
);
419 int hci_dev_open(__u16 dev
);
420 int hci_dev_close(__u16 dev
);
421 int hci_dev_reset(__u16 dev
);
422 int hci_dev_reset_stat(__u16 dev
);
423 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
);
424 int hci_get_dev_list(void __user
*arg
);
425 int hci_get_dev_info(void __user
*arg
);
426 int hci_get_conn_list(void __user
*arg
);
427 int hci_get_conn_info(struct hci_dev
*hdev
, void __user
*arg
);
428 int hci_get_auth_info(struct hci_dev
*hdev
, void __user
*arg
);
429 int hci_inquiry(void __user
*arg
);
431 void hci_event_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
);
433 /* Receive frame from HCI drivers */
434 static inline int hci_recv_frame(struct sk_buff
*skb
)
436 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
437 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
438 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
444 bt_cb(skb
)->incoming
= 1;
447 __net_timestamp(skb
);
449 /* Queue frame for rx task */
450 skb_queue_tail(&hdev
->rx_q
, skb
);
455 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
);
457 int hci_register_sysfs(struct hci_dev
*hdev
);
458 void hci_unregister_sysfs(struct hci_dev
*hdev
);
459 void hci_conn_add_sysfs(struct hci_conn
*conn
);
460 void hci_conn_del_sysfs(struct hci_conn
*conn
);
462 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
464 /* ----- LMP capabilities ----- */
465 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
466 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
467 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
468 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
469 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
470 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
472 /* ----- HCI protocols ----- */
480 int (*connect_ind
) (struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
);
481 int (*connect_cfm
) (struct hci_conn
*conn
, __u8 status
);
482 int (*disconn_ind
) (struct hci_conn
*conn
);
483 int (*disconn_cfm
) (struct hci_conn
*conn
, __u8 reason
);
484 int (*recv_acldata
) (struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
485 int (*recv_scodata
) (struct hci_conn
*conn
, struct sk_buff
*skb
);
486 int (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
489 static inline int hci_proto_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
)
491 register struct hci_proto
*hp
;
494 hp
= hci_proto
[HCI_PROTO_L2CAP
];
495 if (hp
&& hp
->connect_ind
)
496 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
498 hp
= hci_proto
[HCI_PROTO_SCO
];
499 if (hp
&& hp
->connect_ind
)
500 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
505 static inline void hci_proto_connect_cfm(struct hci_conn
*conn
, __u8 status
)
507 register struct hci_proto
*hp
;
509 hp
= hci_proto
[HCI_PROTO_L2CAP
];
510 if (hp
&& hp
->connect_cfm
)
511 hp
->connect_cfm(conn
, status
);
513 hp
= hci_proto
[HCI_PROTO_SCO
];
514 if (hp
&& hp
->connect_cfm
)
515 hp
->connect_cfm(conn
, status
);
518 static inline int hci_proto_disconn_ind(struct hci_conn
*conn
)
520 register struct hci_proto
*hp
;
523 hp
= hci_proto
[HCI_PROTO_L2CAP
];
524 if (hp
&& hp
->disconn_ind
)
525 reason
= hp
->disconn_ind(conn
);
527 hp
= hci_proto
[HCI_PROTO_SCO
];
528 if (hp
&& hp
->disconn_ind
)
529 reason
= hp
->disconn_ind(conn
);
534 static inline void hci_proto_disconn_cfm(struct hci_conn
*conn
, __u8 reason
)
536 register struct hci_proto
*hp
;
538 hp
= hci_proto
[HCI_PROTO_L2CAP
];
539 if (hp
&& hp
->disconn_cfm
)
540 hp
->disconn_cfm(conn
, reason
);
542 hp
= hci_proto
[HCI_PROTO_SCO
];
543 if (hp
&& hp
->disconn_cfm
)
544 hp
->disconn_cfm(conn
, reason
);
547 static inline void hci_proto_auth_cfm(struct hci_conn
*conn
, __u8 status
)
549 register struct hci_proto
*hp
;
552 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
555 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
557 hp
= hci_proto
[HCI_PROTO_L2CAP
];
558 if (hp
&& hp
->security_cfm
)
559 hp
->security_cfm(conn
, status
, encrypt
);
561 hp
= hci_proto
[HCI_PROTO_SCO
];
562 if (hp
&& hp
->security_cfm
)
563 hp
->security_cfm(conn
, status
, encrypt
);
566 static inline void hci_proto_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
568 register struct hci_proto
*hp
;
570 hp
= hci_proto
[HCI_PROTO_L2CAP
];
571 if (hp
&& hp
->security_cfm
)
572 hp
->security_cfm(conn
, status
, encrypt
);
574 hp
= hci_proto
[HCI_PROTO_SCO
];
575 if (hp
&& hp
->security_cfm
)
576 hp
->security_cfm(conn
, status
, encrypt
);
579 int hci_register_proto(struct hci_proto
*hproto
);
580 int hci_unregister_proto(struct hci_proto
*hproto
);
582 /* ----- HCI callbacks ----- */
584 struct list_head list
;
588 void (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
589 void (*key_change_cfm
) (struct hci_conn
*conn
, __u8 status
);
590 void (*role_switch_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 role
);
593 static inline void hci_auth_cfm(struct hci_conn
*conn
, __u8 status
)
598 hci_proto_auth_cfm(conn
, status
);
600 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
603 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
605 read_lock_bh(&hci_cb_list_lock
);
606 list_for_each(p
, &hci_cb_list
) {
607 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
608 if (cb
->security_cfm
)
609 cb
->security_cfm(conn
, status
, encrypt
);
611 read_unlock_bh(&hci_cb_list_lock
);
614 static inline void hci_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
618 if (conn
->sec_level
== BT_SECURITY_SDP
)
619 conn
->sec_level
= BT_SECURITY_LOW
;
621 hci_proto_encrypt_cfm(conn
, status
, encrypt
);
623 read_lock_bh(&hci_cb_list_lock
);
624 list_for_each(p
, &hci_cb_list
) {
625 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
626 if (cb
->security_cfm
)
627 cb
->security_cfm(conn
, status
, encrypt
);
629 read_unlock_bh(&hci_cb_list_lock
);
632 static inline void hci_key_change_cfm(struct hci_conn
*conn
, __u8 status
)
636 read_lock_bh(&hci_cb_list_lock
);
637 list_for_each(p
, &hci_cb_list
) {
638 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
639 if (cb
->key_change_cfm
)
640 cb
->key_change_cfm(conn
, status
);
642 read_unlock_bh(&hci_cb_list_lock
);
645 static inline void hci_role_switch_cfm(struct hci_conn
*conn
, __u8 status
, __u8 role
)
649 read_lock_bh(&hci_cb_list_lock
);
650 list_for_each(p
, &hci_cb_list
) {
651 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
652 if (cb
->role_switch_cfm
)
653 cb
->role_switch_cfm(conn
, status
, role
);
655 read_unlock_bh(&hci_cb_list_lock
);
658 int hci_register_cb(struct hci_cb
*hcb
);
659 int hci_unregister_cb(struct hci_cb
*hcb
);
661 int hci_register_notifier(struct notifier_block
*nb
);
662 int hci_unregister_notifier(struct notifier_block
*nb
);
664 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
);
665 int hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
666 int hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
);
668 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
);
670 void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
);
672 /* ----- HCI Sockets ----- */
673 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
);
675 /* HCI info for socket */
676 #define hci_pi(sk) ((struct hci_pinfo *) sk)
680 struct hci_dev
*hdev
;
681 struct hci_filter filter
;
685 /* HCI security filter */
686 #define HCI_SFLT_MAX_OGF 5
688 struct hci_sec_filter
{
691 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
694 /* ----- HCI requests ----- */
695 #define HCI_REQ_DONE 0
696 #define HCI_REQ_PEND 1
697 #define HCI_REQ_CANCELED 2
699 #define hci_req_lock(d) down(&d->req_lock)
700 #define hci_req_unlock(d) up(&d->req_lock)
702 void hci_req_complete(struct hci_dev
*hdev
, int result
);
704 #endif /* __HCI_CORE_H */