2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
38 __u8 pscan_period_mode
;
46 struct inquiry_entry
{
47 struct inquiry_entry
*next
;
49 struct inquiry_data data
;
52 struct inquiry_cache
{
55 struct inquiry_entry
*list
;
58 struct hci_conn_hash
{
59 struct list_head list
;
66 struct list_head list
;
91 __u16 sniff_min_interval
;
92 __u16 sniff_max_interval
;
100 unsigned int acl_mtu
;
101 unsigned int sco_mtu
;
102 unsigned int acl_pkts
;
103 unsigned int sco_pkts
;
105 unsigned long cmd_last_tx
;
106 unsigned long acl_last_tx
;
107 unsigned long sco_last_tx
;
109 struct tasklet_struct cmd_task
;
110 struct tasklet_struct rx_task
;
111 struct tasklet_struct tx_task
;
113 struct sk_buff_head rx_q
;
114 struct sk_buff_head raw_q
;
115 struct sk_buff_head cmd_q
;
117 struct sk_buff
*sent_cmd
;
118 struct sk_buff
*reassembly
[3];
120 struct mutex req_lock
;
121 wait_queue_head_t req_wait_q
;
125 struct inquiry_cache inq_cache
;
126 struct hci_conn_hash conn_hash
;
128 struct hci_dev_stats stat
;
130 struct sk_buff_head driver_init
;
137 struct device
*parent
;
140 struct rfkill
*rfkill
;
142 struct module
*owner
;
144 int (*open
)(struct hci_dev
*hdev
);
145 int (*close
)(struct hci_dev
*hdev
);
146 int (*flush
)(struct hci_dev
*hdev
);
147 int (*send
)(struct sk_buff
*skb
);
148 void (*destruct
)(struct hci_dev
*hdev
);
149 void (*notify
)(struct hci_dev
*hdev
, unsigned int evt
);
150 int (*ioctl
)(struct hci_dev
*hdev
, unsigned int cmd
, unsigned long arg
);
154 struct list_head list
;
181 struct sk_buff_head data_q
;
183 struct timer_list disc_timer
;
184 struct timer_list idle_timer
;
186 struct work_struct work_add
;
187 struct work_struct work_del
;
192 struct hci_dev
*hdev
;
197 struct hci_conn
*link
;
200 extern struct hci_proto
*hci_proto
[];
201 extern struct list_head hci_dev_list
;
202 extern struct list_head hci_cb_list
;
203 extern rwlock_t hci_dev_list_lock
;
204 extern rwlock_t hci_cb_list_lock
;
206 /* ----- Inquiry cache ----- */
207 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
208 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
210 #define inquiry_cache_lock(c) spin_lock(&c->lock)
211 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
212 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
213 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
215 static inline void inquiry_cache_init(struct hci_dev
*hdev
)
217 struct inquiry_cache
*c
= &hdev
->inq_cache
;
218 spin_lock_init(&c
->lock
);
222 static inline int inquiry_cache_empty(struct hci_dev
*hdev
)
224 struct inquiry_cache
*c
= &hdev
->inq_cache
;
225 return (c
->list
== NULL
);
228 static inline long inquiry_cache_age(struct hci_dev
*hdev
)
230 struct inquiry_cache
*c
= &hdev
->inq_cache
;
231 return jiffies
- c
->timestamp
;
234 static inline long inquiry_entry_age(struct inquiry_entry
*e
)
236 return jiffies
- e
->timestamp
;
239 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
);
240 void hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
);
242 /* ----- HCI Connections ----- */
245 HCI_CONN_ENCRYPT_PEND
,
246 HCI_CONN_RSWITCH_PEND
,
247 HCI_CONN_MODE_CHANGE_PEND
,
250 static inline void hci_conn_hash_init(struct hci_dev
*hdev
)
252 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
253 INIT_LIST_HEAD(&h
->list
);
254 spin_lock_init(&h
->lock
);
259 static inline void hci_conn_hash_add(struct hci_dev
*hdev
, struct hci_conn
*c
)
261 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
262 list_add(&c
->list
, &h
->list
);
263 if (c
->type
== ACL_LINK
)
269 static inline void hci_conn_hash_del(struct hci_dev
*hdev
, struct hci_conn
*c
)
271 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
273 if (c
->type
== ACL_LINK
)
279 static inline struct hci_conn
*hci_conn_hash_lookup_handle(struct hci_dev
*hdev
,
282 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
286 list_for_each(p
, &h
->list
) {
287 c
= list_entry(p
, struct hci_conn
, list
);
288 if (c
->handle
== handle
)
294 static inline struct hci_conn
*hci_conn_hash_lookup_ba(struct hci_dev
*hdev
,
295 __u8 type
, bdaddr_t
*ba
)
297 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
301 list_for_each(p
, &h
->list
) {
302 c
= list_entry(p
, struct hci_conn
, list
);
303 if (c
->type
== type
&& !bacmp(&c
->dst
, ba
))
309 static inline struct hci_conn
*hci_conn_hash_lookup_state(struct hci_dev
*hdev
,
310 __u8 type
, __u16 state
)
312 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
316 list_for_each(p
, &h
->list
) {
317 c
= list_entry(p
, struct hci_conn
, list
);
318 if (c
->type
== type
&& c
->state
== state
)
324 void hci_acl_connect(struct hci_conn
*conn
);
325 void hci_acl_disconn(struct hci_conn
*conn
, __u8 reason
);
326 void hci_add_sco(struct hci_conn
*conn
, __u16 handle
);
327 void hci_setup_sync(struct hci_conn
*conn
, __u16 handle
);
329 struct hci_conn
*hci_conn_add(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
);
330 int hci_conn_del(struct hci_conn
*conn
);
331 void hci_conn_hash_flush(struct hci_dev
*hdev
);
332 void hci_conn_check_pending(struct hci_dev
*hdev
);
334 struct hci_conn
*hci_connect(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
, __u8 sec_level
, __u8 auth_type
);
335 int hci_conn_check_link_mode(struct hci_conn
*conn
);
336 int hci_conn_security(struct hci_conn
*conn
, __u8 sec_level
, __u8 auth_type
);
337 int hci_conn_change_link_key(struct hci_conn
*conn
);
338 int hci_conn_switch_role(struct hci_conn
*conn
, __u8 role
);
340 void hci_conn_enter_active_mode(struct hci_conn
*conn
);
341 void hci_conn_enter_sniff_mode(struct hci_conn
*conn
);
343 void hci_conn_hold_device(struct hci_conn
*conn
);
344 void hci_conn_put_device(struct hci_conn
*conn
);
346 static inline void hci_conn_hold(struct hci_conn
*conn
)
348 atomic_inc(&conn
->refcnt
);
349 del_timer(&conn
->disc_timer
);
352 static inline void hci_conn_put(struct hci_conn
*conn
)
354 if (atomic_dec_and_test(&conn
->refcnt
)) {
356 if (conn
->type
== ACL_LINK
) {
357 del_timer(&conn
->idle_timer
);
358 if (conn
->state
== BT_CONNECTED
) {
359 timeo
= msecs_to_jiffies(conn
->disc_timeout
);
363 timeo
= msecs_to_jiffies(10);
365 timeo
= msecs_to_jiffies(10);
366 mod_timer(&conn
->disc_timer
, jiffies
+ timeo
);
370 /* ----- HCI tasks ----- */
371 static inline void hci_sched_cmd(struct hci_dev
*hdev
)
373 tasklet_schedule(&hdev
->cmd_task
);
376 static inline void hci_sched_rx(struct hci_dev
*hdev
)
378 tasklet_schedule(&hdev
->rx_task
);
381 static inline void hci_sched_tx(struct hci_dev
*hdev
)
383 tasklet_schedule(&hdev
->tx_task
);
386 /* ----- HCI Devices ----- */
387 static inline void __hci_dev_put(struct hci_dev
*d
)
389 if (atomic_dec_and_test(&d
->refcnt
))
393 static inline void hci_dev_put(struct hci_dev
*d
)
396 module_put(d
->owner
);
399 static inline struct hci_dev
*__hci_dev_hold(struct hci_dev
*d
)
401 atomic_inc(&d
->refcnt
);
405 static inline struct hci_dev
*hci_dev_hold(struct hci_dev
*d
)
407 if (try_module_get(d
->owner
))
408 return __hci_dev_hold(d
);
412 #define hci_dev_lock(d) spin_lock(&d->lock)
413 #define hci_dev_unlock(d) spin_unlock(&d->lock)
414 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
415 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
417 struct hci_dev
*hci_dev_get(int index
);
418 struct hci_dev
*hci_get_route(bdaddr_t
*src
, bdaddr_t
*dst
);
420 struct hci_dev
*hci_alloc_dev(void);
421 void hci_free_dev(struct hci_dev
*hdev
);
422 int hci_register_dev(struct hci_dev
*hdev
);
423 int hci_unregister_dev(struct hci_dev
*hdev
);
424 int hci_suspend_dev(struct hci_dev
*hdev
);
425 int hci_resume_dev(struct hci_dev
*hdev
);
426 int hci_dev_open(__u16 dev
);
427 int hci_dev_close(__u16 dev
);
428 int hci_dev_reset(__u16 dev
);
429 int hci_dev_reset_stat(__u16 dev
);
430 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
);
431 int hci_get_dev_list(void __user
*arg
);
432 int hci_get_dev_info(void __user
*arg
);
433 int hci_get_conn_list(void __user
*arg
);
434 int hci_get_conn_info(struct hci_dev
*hdev
, void __user
*arg
);
435 int hci_get_auth_info(struct hci_dev
*hdev
, void __user
*arg
);
436 int hci_inquiry(void __user
*arg
);
438 void hci_event_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
);
440 /* Receive frame from HCI drivers */
441 static inline int hci_recv_frame(struct sk_buff
*skb
)
443 struct hci_dev
*hdev
= (struct hci_dev
*) skb
->dev
;
444 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
445 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
451 bt_cb(skb
)->incoming
= 1;
454 __net_timestamp(skb
);
456 /* Queue frame for rx task */
457 skb_queue_tail(&hdev
->rx_q
, skb
);
462 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
);
464 int hci_register_sysfs(struct hci_dev
*hdev
);
465 void hci_unregister_sysfs(struct hci_dev
*hdev
);
466 void hci_conn_init_sysfs(struct hci_conn
*conn
);
467 void hci_conn_add_sysfs(struct hci_conn
*conn
);
468 void hci_conn_del_sysfs(struct hci_conn
*conn
);
470 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
472 /* ----- LMP capabilities ----- */
473 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
474 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
475 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
476 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
477 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
478 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
480 /* ----- HCI protocols ----- */
488 int (*connect_ind
) (struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
);
489 int (*connect_cfm
) (struct hci_conn
*conn
, __u8 status
);
490 int (*disconn_ind
) (struct hci_conn
*conn
);
491 int (*disconn_cfm
) (struct hci_conn
*conn
, __u8 reason
);
492 int (*recv_acldata
) (struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
493 int (*recv_scodata
) (struct hci_conn
*conn
, struct sk_buff
*skb
);
494 int (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
497 static inline int hci_proto_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, __u8 type
)
499 register struct hci_proto
*hp
;
502 hp
= hci_proto
[HCI_PROTO_L2CAP
];
503 if (hp
&& hp
->connect_ind
)
504 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
506 hp
= hci_proto
[HCI_PROTO_SCO
];
507 if (hp
&& hp
->connect_ind
)
508 mask
|= hp
->connect_ind(hdev
, bdaddr
, type
);
513 static inline void hci_proto_connect_cfm(struct hci_conn
*conn
, __u8 status
)
515 register struct hci_proto
*hp
;
517 hp
= hci_proto
[HCI_PROTO_L2CAP
];
518 if (hp
&& hp
->connect_cfm
)
519 hp
->connect_cfm(conn
, status
);
521 hp
= hci_proto
[HCI_PROTO_SCO
];
522 if (hp
&& hp
->connect_cfm
)
523 hp
->connect_cfm(conn
, status
);
526 static inline int hci_proto_disconn_ind(struct hci_conn
*conn
)
528 register struct hci_proto
*hp
;
531 hp
= hci_proto
[HCI_PROTO_L2CAP
];
532 if (hp
&& hp
->disconn_ind
)
533 reason
= hp
->disconn_ind(conn
);
535 hp
= hci_proto
[HCI_PROTO_SCO
];
536 if (hp
&& hp
->disconn_ind
)
537 reason
= hp
->disconn_ind(conn
);
542 static inline void hci_proto_disconn_cfm(struct hci_conn
*conn
, __u8 reason
)
544 register struct hci_proto
*hp
;
546 hp
= hci_proto
[HCI_PROTO_L2CAP
];
547 if (hp
&& hp
->disconn_cfm
)
548 hp
->disconn_cfm(conn
, reason
);
550 hp
= hci_proto
[HCI_PROTO_SCO
];
551 if (hp
&& hp
->disconn_cfm
)
552 hp
->disconn_cfm(conn
, reason
);
555 static inline void hci_proto_auth_cfm(struct hci_conn
*conn
, __u8 status
)
557 register struct hci_proto
*hp
;
560 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
563 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
565 hp
= hci_proto
[HCI_PROTO_L2CAP
];
566 if (hp
&& hp
->security_cfm
)
567 hp
->security_cfm(conn
, status
, encrypt
);
569 hp
= hci_proto
[HCI_PROTO_SCO
];
570 if (hp
&& hp
->security_cfm
)
571 hp
->security_cfm(conn
, status
, encrypt
);
574 static inline void hci_proto_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
576 register struct hci_proto
*hp
;
578 hp
= hci_proto
[HCI_PROTO_L2CAP
];
579 if (hp
&& hp
->security_cfm
)
580 hp
->security_cfm(conn
, status
, encrypt
);
582 hp
= hci_proto
[HCI_PROTO_SCO
];
583 if (hp
&& hp
->security_cfm
)
584 hp
->security_cfm(conn
, status
, encrypt
);
587 int hci_register_proto(struct hci_proto
*hproto
);
588 int hci_unregister_proto(struct hci_proto
*hproto
);
590 /* ----- HCI callbacks ----- */
592 struct list_head list
;
596 void (*security_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 encrypt
);
597 void (*key_change_cfm
) (struct hci_conn
*conn
, __u8 status
);
598 void (*role_switch_cfm
) (struct hci_conn
*conn
, __u8 status
, __u8 role
);
601 static inline void hci_auth_cfm(struct hci_conn
*conn
, __u8 status
)
606 hci_proto_auth_cfm(conn
, status
);
608 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->pend
))
611 encrypt
= (conn
->link_mode
& HCI_LM_ENCRYPT
) ? 0x01 : 0x00;
613 read_lock_bh(&hci_cb_list_lock
);
614 list_for_each(p
, &hci_cb_list
) {
615 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
616 if (cb
->security_cfm
)
617 cb
->security_cfm(conn
, status
, encrypt
);
619 read_unlock_bh(&hci_cb_list_lock
);
622 static inline void hci_encrypt_cfm(struct hci_conn
*conn
, __u8 status
, __u8 encrypt
)
626 if (conn
->sec_level
== BT_SECURITY_SDP
)
627 conn
->sec_level
= BT_SECURITY_LOW
;
629 hci_proto_encrypt_cfm(conn
, status
, encrypt
);
631 read_lock_bh(&hci_cb_list_lock
);
632 list_for_each(p
, &hci_cb_list
) {
633 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
634 if (cb
->security_cfm
)
635 cb
->security_cfm(conn
, status
, encrypt
);
637 read_unlock_bh(&hci_cb_list_lock
);
640 static inline void hci_key_change_cfm(struct hci_conn
*conn
, __u8 status
)
644 read_lock_bh(&hci_cb_list_lock
);
645 list_for_each(p
, &hci_cb_list
) {
646 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
647 if (cb
->key_change_cfm
)
648 cb
->key_change_cfm(conn
, status
);
650 read_unlock_bh(&hci_cb_list_lock
);
653 static inline void hci_role_switch_cfm(struct hci_conn
*conn
, __u8 status
, __u8 role
)
657 read_lock_bh(&hci_cb_list_lock
);
658 list_for_each(p
, &hci_cb_list
) {
659 struct hci_cb
*cb
= list_entry(p
, struct hci_cb
, list
);
660 if (cb
->role_switch_cfm
)
661 cb
->role_switch_cfm(conn
, status
, role
);
663 read_unlock_bh(&hci_cb_list_lock
);
666 int hci_register_cb(struct hci_cb
*hcb
);
667 int hci_unregister_cb(struct hci_cb
*hcb
);
669 int hci_register_notifier(struct notifier_block
*nb
);
670 int hci_unregister_notifier(struct notifier_block
*nb
);
672 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
, void *param
);
673 int hci_send_acl(struct hci_conn
*conn
, struct sk_buff
*skb
, __u16 flags
);
674 int hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
);
676 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
);
678 void hci_si_event(struct hci_dev
*hdev
, int type
, int dlen
, void *data
);
680 /* ----- HCI Sockets ----- */
681 void hci_send_to_sock(struct hci_dev
*hdev
, struct sk_buff
*skb
);
683 /* HCI info for socket */
684 #define hci_pi(sk) ((struct hci_pinfo *) sk)
688 struct hci_dev
*hdev
;
689 struct hci_filter filter
;
693 /* HCI security filter */
694 #define HCI_SFLT_MAX_OGF 5
696 struct hci_sec_filter
{
699 __u32 ocf_mask
[HCI_SFLT_MAX_OGF
+ 1][4];
702 /* ----- HCI requests ----- */
703 #define HCI_REQ_DONE 0
704 #define HCI_REQ_PEND 1
705 #define HCI_REQ_CANCELED 2
707 #define hci_req_lock(d) mutex_lock(&d->req_lock)
708 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
710 void hci_req_complete(struct hci_dev
*hdev
, int result
);
712 #endif /* __HCI_CORE_H */