writeback: split writeback_inodes_wb
[linux-2.6/next.git] / include / net / bluetooth / hci_core.h
blobe42f6ed5421cabd05f653ba24f1acc09c1a66844
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
52 struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
58 struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
65 struct hci_dev {
66 struct list_head list;
67 spinlock_t lock;
68 atomic_t refcnt;
70 char name[8];
71 unsigned long flags;
72 __u16 id;
73 __u8 bus;
74 __u8 dev_type;
75 bdaddr_t bdaddr;
76 __u8 dev_name[248];
77 __u8 dev_class[3];
78 __u8 features[8];
79 __u8 commands[64];
80 __u8 ssp_mode;
81 __u8 hci_ver;
82 __u16 hci_rev;
83 __u16 manufacturer;
84 __u16 voice_setting;
86 __u16 pkt_type;
87 __u16 esco_type;
88 __u16 link_policy;
89 __u16 link_mode;
91 __u32 idle_timeout;
92 __u16 sniff_min_interval;
93 __u16 sniff_max_interval;
95 unsigned long quirks;
97 atomic_t cmd_cnt;
98 unsigned int acl_cnt;
99 unsigned int sco_cnt;
101 unsigned int acl_mtu;
102 unsigned int sco_mtu;
103 unsigned int acl_pkts;
104 unsigned int sco_pkts;
106 unsigned long cmd_last_tx;
107 unsigned long acl_last_tx;
108 unsigned long sco_last_tx;
110 struct workqueue_struct *workqueue;
112 struct tasklet_struct cmd_task;
113 struct tasklet_struct rx_task;
114 struct tasklet_struct tx_task;
116 struct sk_buff_head rx_q;
117 struct sk_buff_head raw_q;
118 struct sk_buff_head cmd_q;
120 struct sk_buff *sent_cmd;
121 struct sk_buff *reassembly[3];
123 struct mutex req_lock;
124 wait_queue_head_t req_wait_q;
125 __u32 req_status;
126 __u32 req_result;
128 struct inquiry_cache inq_cache;
129 struct hci_conn_hash conn_hash;
131 struct hci_dev_stats stat;
133 struct sk_buff_head driver_init;
135 void *driver_data;
136 void *core_data;
138 atomic_t promisc;
140 struct dentry *debugfs;
142 struct device *parent;
143 struct device dev;
145 struct rfkill *rfkill;
147 struct module *owner;
149 int (*open)(struct hci_dev *hdev);
150 int (*close)(struct hci_dev *hdev);
151 int (*flush)(struct hci_dev *hdev);
152 int (*send)(struct sk_buff *skb);
153 void (*destruct)(struct hci_dev *hdev);
154 void (*notify)(struct hci_dev *hdev, unsigned int evt);
155 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
158 struct hci_conn {
159 struct list_head list;
161 atomic_t refcnt;
162 spinlock_t lock;
164 bdaddr_t dst;
165 __u16 handle;
166 __u16 state;
167 __u8 mode;
168 __u8 type;
169 __u8 out;
170 __u8 attempt;
171 __u8 dev_class[3];
172 __u8 features[8];
173 __u8 ssp_mode;
174 __u16 interval;
175 __u16 pkt_type;
176 __u16 link_policy;
177 __u32 link_mode;
178 __u8 auth_type;
179 __u8 sec_level;
180 __u8 power_save;
181 __u16 disc_timeout;
182 unsigned long pend;
184 unsigned int sent;
186 struct sk_buff_head data_q;
188 struct timer_list disc_timer;
189 struct timer_list idle_timer;
191 struct work_struct work_add;
192 struct work_struct work_del;
194 struct device dev;
195 atomic_t devref;
197 struct hci_dev *hdev;
198 void *l2cap_data;
199 void *sco_data;
200 void *priv;
202 struct hci_conn *link;
205 extern struct hci_proto *hci_proto[];
206 extern struct list_head hci_dev_list;
207 extern struct list_head hci_cb_list;
208 extern rwlock_t hci_dev_list_lock;
209 extern rwlock_t hci_cb_list_lock;
211 /* ----- Inquiry cache ----- */
212 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
213 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
215 #define inquiry_cache_lock(c) spin_lock(&c->lock)
216 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
217 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
218 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
220 static inline void inquiry_cache_init(struct hci_dev *hdev)
222 struct inquiry_cache *c = &hdev->inq_cache;
223 spin_lock_init(&c->lock);
224 c->list = NULL;
227 static inline int inquiry_cache_empty(struct hci_dev *hdev)
229 struct inquiry_cache *c = &hdev->inq_cache;
230 return (c->list == NULL);
233 static inline long inquiry_cache_age(struct hci_dev *hdev)
235 struct inquiry_cache *c = &hdev->inq_cache;
236 return jiffies - c->timestamp;
239 static inline long inquiry_entry_age(struct inquiry_entry *e)
241 return jiffies - e->timestamp;
244 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
245 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
247 /* ----- HCI Connections ----- */
248 enum {
249 HCI_CONN_AUTH_PEND,
250 HCI_CONN_ENCRYPT_PEND,
251 HCI_CONN_RSWITCH_PEND,
252 HCI_CONN_MODE_CHANGE_PEND,
255 static inline void hci_conn_hash_init(struct hci_dev *hdev)
257 struct hci_conn_hash *h = &hdev->conn_hash;
258 INIT_LIST_HEAD(&h->list);
259 spin_lock_init(&h->lock);
260 h->acl_num = 0;
261 h->sco_num = 0;
264 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
266 struct hci_conn_hash *h = &hdev->conn_hash;
267 list_add(&c->list, &h->list);
268 if (c->type == ACL_LINK)
269 h->acl_num++;
270 else
271 h->sco_num++;
274 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
276 struct hci_conn_hash *h = &hdev->conn_hash;
277 list_del(&c->list);
278 if (c->type == ACL_LINK)
279 h->acl_num--;
280 else
281 h->sco_num--;
284 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
285 __u16 handle)
287 struct hci_conn_hash *h = &hdev->conn_hash;
288 struct list_head *p;
289 struct hci_conn *c;
291 list_for_each(p, &h->list) {
292 c = list_entry(p, struct hci_conn, list);
293 if (c->handle == handle)
294 return c;
296 return NULL;
299 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
300 __u8 type, bdaddr_t *ba)
302 struct hci_conn_hash *h = &hdev->conn_hash;
303 struct list_head *p;
304 struct hci_conn *c;
306 list_for_each(p, &h->list) {
307 c = list_entry(p, struct hci_conn, list);
308 if (c->type == type && !bacmp(&c->dst, ba))
309 return c;
311 return NULL;
314 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
315 __u8 type, __u16 state)
317 struct hci_conn_hash *h = &hdev->conn_hash;
318 struct list_head *p;
319 struct hci_conn *c;
321 list_for_each(p, &h->list) {
322 c = list_entry(p, struct hci_conn, list);
323 if (c->type == type && c->state == state)
324 return c;
326 return NULL;
329 void hci_acl_connect(struct hci_conn *conn);
330 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
331 void hci_add_sco(struct hci_conn *conn, __u16 handle);
332 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
334 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
335 int hci_conn_del(struct hci_conn *conn);
336 void hci_conn_hash_flush(struct hci_dev *hdev);
337 void hci_conn_check_pending(struct hci_dev *hdev);
339 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
340 int hci_conn_check_link_mode(struct hci_conn *conn);
341 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
342 int hci_conn_change_link_key(struct hci_conn *conn);
343 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
345 void hci_conn_enter_active_mode(struct hci_conn *conn);
346 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
348 void hci_conn_hold_device(struct hci_conn *conn);
349 void hci_conn_put_device(struct hci_conn *conn);
351 static inline void hci_conn_hold(struct hci_conn *conn)
353 atomic_inc(&conn->refcnt);
354 del_timer(&conn->disc_timer);
357 static inline void hci_conn_put(struct hci_conn *conn)
359 if (atomic_dec_and_test(&conn->refcnt)) {
360 unsigned long timeo;
361 if (conn->type == ACL_LINK) {
362 del_timer(&conn->idle_timer);
363 if (conn->state == BT_CONNECTED) {
364 timeo = msecs_to_jiffies(conn->disc_timeout);
365 if (!conn->out)
366 timeo *= 2;
367 } else
368 timeo = msecs_to_jiffies(10);
369 } else
370 timeo = msecs_to_jiffies(10);
371 mod_timer(&conn->disc_timer, jiffies + timeo);
375 /* ----- HCI Devices ----- */
376 static inline void __hci_dev_put(struct hci_dev *d)
378 if (atomic_dec_and_test(&d->refcnt))
379 d->destruct(d);
382 static inline void hci_dev_put(struct hci_dev *d)
384 __hci_dev_put(d);
385 module_put(d->owner);
388 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
390 atomic_inc(&d->refcnt);
391 return d;
394 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
396 if (try_module_get(d->owner))
397 return __hci_dev_hold(d);
398 return NULL;
401 #define hci_dev_lock(d) spin_lock(&d->lock)
402 #define hci_dev_unlock(d) spin_unlock(&d->lock)
403 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
404 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
406 struct hci_dev *hci_dev_get(int index);
407 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
409 struct hci_dev *hci_alloc_dev(void);
410 void hci_free_dev(struct hci_dev *hdev);
411 int hci_register_dev(struct hci_dev *hdev);
412 int hci_unregister_dev(struct hci_dev *hdev);
413 int hci_suspend_dev(struct hci_dev *hdev);
414 int hci_resume_dev(struct hci_dev *hdev);
415 int hci_dev_open(__u16 dev);
416 int hci_dev_close(__u16 dev);
417 int hci_dev_reset(__u16 dev);
418 int hci_dev_reset_stat(__u16 dev);
419 int hci_dev_cmd(unsigned int cmd, void __user *arg);
420 int hci_get_dev_list(void __user *arg);
421 int hci_get_dev_info(void __user *arg);
422 int hci_get_conn_list(void __user *arg);
423 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
424 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
425 int hci_inquiry(void __user *arg);
427 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
429 int hci_recv_frame(struct sk_buff *skb);
430 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
432 int hci_register_sysfs(struct hci_dev *hdev);
433 void hci_unregister_sysfs(struct hci_dev *hdev);
434 void hci_conn_init_sysfs(struct hci_conn *conn);
435 void hci_conn_add_sysfs(struct hci_conn *conn);
436 void hci_conn_del_sysfs(struct hci_conn *conn);
438 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
440 /* ----- LMP capabilities ----- */
441 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
442 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
443 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
444 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
445 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
446 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
448 /* ----- HCI protocols ----- */
449 struct hci_proto {
450 char *name;
451 unsigned int id;
452 unsigned long flags;
454 void *priv;
456 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
457 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
458 int (*disconn_ind) (struct hci_conn *conn);
459 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
460 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
461 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
462 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
465 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
467 register struct hci_proto *hp;
468 int mask = 0;
470 hp = hci_proto[HCI_PROTO_L2CAP];
471 if (hp && hp->connect_ind)
472 mask |= hp->connect_ind(hdev, bdaddr, type);
474 hp = hci_proto[HCI_PROTO_SCO];
475 if (hp && hp->connect_ind)
476 mask |= hp->connect_ind(hdev, bdaddr, type);
478 return mask;
481 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
483 register struct hci_proto *hp;
485 hp = hci_proto[HCI_PROTO_L2CAP];
486 if (hp && hp->connect_cfm)
487 hp->connect_cfm(conn, status);
489 hp = hci_proto[HCI_PROTO_SCO];
490 if (hp && hp->connect_cfm)
491 hp->connect_cfm(conn, status);
494 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
496 register struct hci_proto *hp;
497 int reason = 0x13;
499 hp = hci_proto[HCI_PROTO_L2CAP];
500 if (hp && hp->disconn_ind)
501 reason = hp->disconn_ind(conn);
503 hp = hci_proto[HCI_PROTO_SCO];
504 if (hp && hp->disconn_ind)
505 reason = hp->disconn_ind(conn);
507 return reason;
510 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
512 register struct hci_proto *hp;
514 hp = hci_proto[HCI_PROTO_L2CAP];
515 if (hp && hp->disconn_cfm)
516 hp->disconn_cfm(conn, reason);
518 hp = hci_proto[HCI_PROTO_SCO];
519 if (hp && hp->disconn_cfm)
520 hp->disconn_cfm(conn, reason);
523 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
525 register struct hci_proto *hp;
526 __u8 encrypt;
528 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
529 return;
531 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
533 hp = hci_proto[HCI_PROTO_L2CAP];
534 if (hp && hp->security_cfm)
535 hp->security_cfm(conn, status, encrypt);
537 hp = hci_proto[HCI_PROTO_SCO];
538 if (hp && hp->security_cfm)
539 hp->security_cfm(conn, status, encrypt);
542 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
544 register struct hci_proto *hp;
546 hp = hci_proto[HCI_PROTO_L2CAP];
547 if (hp && hp->security_cfm)
548 hp->security_cfm(conn, status, encrypt);
550 hp = hci_proto[HCI_PROTO_SCO];
551 if (hp && hp->security_cfm)
552 hp->security_cfm(conn, status, encrypt);
555 int hci_register_proto(struct hci_proto *hproto);
556 int hci_unregister_proto(struct hci_proto *hproto);
558 /* ----- HCI callbacks ----- */
559 struct hci_cb {
560 struct list_head list;
562 char *name;
564 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
565 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
566 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
569 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
571 struct list_head *p;
572 __u8 encrypt;
574 hci_proto_auth_cfm(conn, status);
576 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
577 return;
579 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
581 read_lock_bh(&hci_cb_list_lock);
582 list_for_each(p, &hci_cb_list) {
583 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
584 if (cb->security_cfm)
585 cb->security_cfm(conn, status, encrypt);
587 read_unlock_bh(&hci_cb_list_lock);
590 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
592 struct list_head *p;
594 if (conn->sec_level == BT_SECURITY_SDP)
595 conn->sec_level = BT_SECURITY_LOW;
597 hci_proto_encrypt_cfm(conn, status, encrypt);
599 read_lock_bh(&hci_cb_list_lock);
600 list_for_each(p, &hci_cb_list) {
601 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
602 if (cb->security_cfm)
603 cb->security_cfm(conn, status, encrypt);
605 read_unlock_bh(&hci_cb_list_lock);
608 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
610 struct list_head *p;
612 read_lock_bh(&hci_cb_list_lock);
613 list_for_each(p, &hci_cb_list) {
614 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
615 if (cb->key_change_cfm)
616 cb->key_change_cfm(conn, status);
618 read_unlock_bh(&hci_cb_list_lock);
621 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
623 struct list_head *p;
625 read_lock_bh(&hci_cb_list_lock);
626 list_for_each(p, &hci_cb_list) {
627 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
628 if (cb->role_switch_cfm)
629 cb->role_switch_cfm(conn, status, role);
631 read_unlock_bh(&hci_cb_list_lock);
634 int hci_register_cb(struct hci_cb *hcb);
635 int hci_unregister_cb(struct hci_cb *hcb);
637 int hci_register_notifier(struct notifier_block *nb);
638 int hci_unregister_notifier(struct notifier_block *nb);
640 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
641 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
642 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
644 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
646 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
648 /* ----- HCI Sockets ----- */
649 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
651 /* HCI info for socket */
652 #define hci_pi(sk) ((struct hci_pinfo *) sk)
654 struct hci_pinfo {
655 struct bt_sock bt;
656 struct hci_dev *hdev;
657 struct hci_filter filter;
658 __u32 cmsg_mask;
661 /* HCI security filter */
662 #define HCI_SFLT_MAX_OGF 5
664 struct hci_sec_filter {
665 __u32 type_mask;
666 __u32 event_mask[2];
667 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
670 /* ----- HCI requests ----- */
671 #define HCI_REQ_DONE 0
672 #define HCI_REQ_PEND 1
673 #define HCI_REQ_CANCELED 2
675 #define hci_req_lock(d) mutex_lock(&d->req_lock)
676 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
678 void hci_req_complete(struct hci_dev *hdev, int result);
680 #endif /* __HCI_CORE_H */