KVM: SVM: clean up nested vmrun path
[linux/fpc-iii.git] / include / net / bluetooth / hci_core.h
blobc4ca4228b0830bc542d1f1d635987c86d03e3f11
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
28 #include <net/bluetooth/hci.h>
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 __u8 ssp_mode;
46 struct inquiry_entry {
47 struct inquiry_entry *next;
48 __u32 timestamp;
49 struct inquiry_data data;
52 struct inquiry_cache {
53 spinlock_t lock;
54 __u32 timestamp;
55 struct inquiry_entry *list;
58 struct hci_conn_hash {
59 struct list_head list;
60 spinlock_t lock;
61 unsigned int acl_num;
62 unsigned int sco_num;
65 struct hci_dev {
66 struct list_head list;
67 spinlock_t lock;
68 atomic_t refcnt;
70 char name[8];
71 unsigned long flags;
72 __u16 id;
73 __u8 type;
74 bdaddr_t bdaddr;
75 __u8 dev_name[248];
76 __u8 dev_class[3];
77 __u8 features[8];
78 __u8 commands[64];
79 __u8 ssp_mode;
80 __u8 hci_ver;
81 __u16 hci_rev;
82 __u16 manufacturer;
83 __u16 voice_setting;
85 __u16 pkt_type;
86 __u16 esco_type;
87 __u16 link_policy;
88 __u16 link_mode;
90 __u32 idle_timeout;
91 __u16 sniff_min_interval;
92 __u16 sniff_max_interval;
94 unsigned long quirks;
96 atomic_t cmd_cnt;
97 unsigned int acl_cnt;
98 unsigned int sco_cnt;
100 unsigned int acl_mtu;
101 unsigned int sco_mtu;
102 unsigned int acl_pkts;
103 unsigned int sco_pkts;
105 unsigned long cmd_last_tx;
106 unsigned long acl_last_tx;
107 unsigned long sco_last_tx;
109 struct tasklet_struct cmd_task;
110 struct tasklet_struct rx_task;
111 struct tasklet_struct tx_task;
113 struct sk_buff_head rx_q;
114 struct sk_buff_head raw_q;
115 struct sk_buff_head cmd_q;
117 struct sk_buff *sent_cmd;
118 struct sk_buff *reassembly[3];
120 struct semaphore req_lock;
121 wait_queue_head_t req_wait_q;
122 __u32 req_status;
123 __u32 req_result;
125 struct inquiry_cache inq_cache;
126 struct hci_conn_hash conn_hash;
128 struct hci_dev_stats stat;
130 struct sk_buff_head driver_init;
132 void *driver_data;
133 void *core_data;
135 atomic_t promisc;
137 struct device *parent;
138 struct device dev;
140 struct rfkill *rfkill;
142 struct module *owner;
144 int (*open)(struct hci_dev *hdev);
145 int (*close)(struct hci_dev *hdev);
146 int (*flush)(struct hci_dev *hdev);
147 int (*send)(struct sk_buff *skb);
148 void (*destruct)(struct hci_dev *hdev);
149 void (*notify)(struct hci_dev *hdev, unsigned int evt);
150 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
153 struct hci_conn {
154 struct list_head list;
156 atomic_t refcnt;
157 spinlock_t lock;
159 bdaddr_t dst;
160 __u16 handle;
161 __u16 state;
162 __u8 mode;
163 __u8 type;
164 __u8 out;
165 __u8 attempt;
166 __u8 dev_class[3];
167 __u8 features[8];
168 __u8 ssp_mode;
169 __u16 interval;
170 __u16 pkt_type;
171 __u16 link_policy;
172 __u32 link_mode;
173 __u8 auth_type;
174 __u8 sec_level;
175 __u8 power_save;
176 __u16 disc_timeout;
177 unsigned long pend;
179 unsigned int sent;
181 struct sk_buff_head data_q;
183 struct timer_list disc_timer;
184 struct timer_list idle_timer;
186 struct work_struct work_add;
187 struct work_struct work_del;
189 struct device dev;
191 struct hci_dev *hdev;
192 void *l2cap_data;
193 void *sco_data;
194 void *priv;
196 struct hci_conn *link;
199 extern struct hci_proto *hci_proto[];
200 extern struct list_head hci_dev_list;
201 extern struct list_head hci_cb_list;
202 extern rwlock_t hci_dev_list_lock;
203 extern rwlock_t hci_cb_list_lock;
205 /* ----- Inquiry cache ----- */
206 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
207 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
209 #define inquiry_cache_lock(c) spin_lock(&c->lock)
210 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
211 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
212 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
214 static inline void inquiry_cache_init(struct hci_dev *hdev)
216 struct inquiry_cache *c = &hdev->inq_cache;
217 spin_lock_init(&c->lock);
218 c->list = NULL;
221 static inline int inquiry_cache_empty(struct hci_dev *hdev)
223 struct inquiry_cache *c = &hdev->inq_cache;
224 return (c->list == NULL);
227 static inline long inquiry_cache_age(struct hci_dev *hdev)
229 struct inquiry_cache *c = &hdev->inq_cache;
230 return jiffies - c->timestamp;
233 static inline long inquiry_entry_age(struct inquiry_entry *e)
235 return jiffies - e->timestamp;
238 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
239 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
241 /* ----- HCI Connections ----- */
242 enum {
243 HCI_CONN_AUTH_PEND,
244 HCI_CONN_ENCRYPT_PEND,
245 HCI_CONN_RSWITCH_PEND,
246 HCI_CONN_MODE_CHANGE_PEND,
249 static inline void hci_conn_hash_init(struct hci_dev *hdev)
251 struct hci_conn_hash *h = &hdev->conn_hash;
252 INIT_LIST_HEAD(&h->list);
253 spin_lock_init(&h->lock);
254 h->acl_num = 0;
255 h->sco_num = 0;
258 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
260 struct hci_conn_hash *h = &hdev->conn_hash;
261 list_add(&c->list, &h->list);
262 if (c->type == ACL_LINK)
263 h->acl_num++;
264 else
265 h->sco_num++;
268 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
270 struct hci_conn_hash *h = &hdev->conn_hash;
271 list_del(&c->list);
272 if (c->type == ACL_LINK)
273 h->acl_num--;
274 else
275 h->sco_num--;
278 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
279 __u16 handle)
281 struct hci_conn_hash *h = &hdev->conn_hash;
282 struct list_head *p;
283 struct hci_conn *c;
285 list_for_each(p, &h->list) {
286 c = list_entry(p, struct hci_conn, list);
287 if (c->handle == handle)
288 return c;
290 return NULL;
293 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
294 __u8 type, bdaddr_t *ba)
296 struct hci_conn_hash *h = &hdev->conn_hash;
297 struct list_head *p;
298 struct hci_conn *c;
300 list_for_each(p, &h->list) {
301 c = list_entry(p, struct hci_conn, list);
302 if (c->type == type && !bacmp(&c->dst, ba))
303 return c;
305 return NULL;
308 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
309 __u8 type, __u16 state)
311 struct hci_conn_hash *h = &hdev->conn_hash;
312 struct list_head *p;
313 struct hci_conn *c;
315 list_for_each(p, &h->list) {
316 c = list_entry(p, struct hci_conn, list);
317 if (c->type == type && c->state == state)
318 return c;
320 return NULL;
323 void hci_acl_connect(struct hci_conn *conn);
324 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
325 void hci_add_sco(struct hci_conn *conn, __u16 handle);
326 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
328 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
329 int hci_conn_del(struct hci_conn *conn);
330 void hci_conn_hash_flush(struct hci_dev *hdev);
331 void hci_conn_check_pending(struct hci_dev *hdev);
333 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
334 int hci_conn_check_link_mode(struct hci_conn *conn);
335 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
336 int hci_conn_change_link_key(struct hci_conn *conn);
337 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
339 void hci_conn_enter_active_mode(struct hci_conn *conn);
340 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
342 static inline void hci_conn_hold(struct hci_conn *conn)
344 atomic_inc(&conn->refcnt);
345 del_timer(&conn->disc_timer);
348 static inline void hci_conn_put(struct hci_conn *conn)
350 if (atomic_dec_and_test(&conn->refcnt)) {
351 unsigned long timeo;
352 if (conn->type == ACL_LINK) {
353 del_timer(&conn->idle_timer);
354 if (conn->state == BT_CONNECTED) {
355 timeo = msecs_to_jiffies(conn->disc_timeout);
356 if (!conn->out)
357 timeo *= 2;
358 } else
359 timeo = msecs_to_jiffies(10);
360 } else
361 timeo = msecs_to_jiffies(10);
362 mod_timer(&conn->disc_timer, jiffies + timeo);
366 /* ----- HCI tasks ----- */
367 static inline void hci_sched_cmd(struct hci_dev *hdev)
369 tasklet_schedule(&hdev->cmd_task);
372 static inline void hci_sched_rx(struct hci_dev *hdev)
374 tasklet_schedule(&hdev->rx_task);
377 static inline void hci_sched_tx(struct hci_dev *hdev)
379 tasklet_schedule(&hdev->tx_task);
382 /* ----- HCI Devices ----- */
383 static inline void __hci_dev_put(struct hci_dev *d)
385 if (atomic_dec_and_test(&d->refcnt))
386 d->destruct(d);
389 static inline void hci_dev_put(struct hci_dev *d)
391 __hci_dev_put(d);
392 module_put(d->owner);
395 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
397 atomic_inc(&d->refcnt);
398 return d;
401 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
403 if (try_module_get(d->owner))
404 return __hci_dev_hold(d);
405 return NULL;
408 #define hci_dev_lock(d) spin_lock(&d->lock)
409 #define hci_dev_unlock(d) spin_unlock(&d->lock)
410 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
411 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
413 struct hci_dev *hci_dev_get(int index);
414 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
416 struct hci_dev *hci_alloc_dev(void);
417 void hci_free_dev(struct hci_dev *hdev);
418 int hci_register_dev(struct hci_dev *hdev);
419 int hci_unregister_dev(struct hci_dev *hdev);
420 int hci_suspend_dev(struct hci_dev *hdev);
421 int hci_resume_dev(struct hci_dev *hdev);
422 int hci_dev_open(__u16 dev);
423 int hci_dev_close(__u16 dev);
424 int hci_dev_reset(__u16 dev);
425 int hci_dev_reset_stat(__u16 dev);
426 int hci_dev_cmd(unsigned int cmd, void __user *arg);
427 int hci_get_dev_list(void __user *arg);
428 int hci_get_dev_info(void __user *arg);
429 int hci_get_conn_list(void __user *arg);
430 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
431 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
432 int hci_inquiry(void __user *arg);
434 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
436 /* Receive frame from HCI drivers */
437 static inline int hci_recv_frame(struct sk_buff *skb)
439 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
440 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
441 && !test_bit(HCI_INIT, &hdev->flags))) {
442 kfree_skb(skb);
443 return -ENXIO;
446 /* Incomming skb */
447 bt_cb(skb)->incoming = 1;
449 /* Time stamp */
450 __net_timestamp(skb);
452 /* Queue frame for rx task */
453 skb_queue_tail(&hdev->rx_q, skb);
454 hci_sched_rx(hdev);
455 return 0;
458 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
460 int hci_register_sysfs(struct hci_dev *hdev);
461 void hci_unregister_sysfs(struct hci_dev *hdev);
462 void hci_conn_init_sysfs(struct hci_conn *conn);
463 void hci_conn_add_sysfs(struct hci_conn *conn);
464 void hci_conn_del_sysfs(struct hci_conn *conn);
466 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
468 /* ----- LMP capabilities ----- */
469 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
470 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
471 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
472 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
473 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
474 #define lmp_ssp_capable(dev) ((dev)->features[6] & LMP_SIMPLE_PAIR)
476 /* ----- HCI protocols ----- */
477 struct hci_proto {
478 char *name;
479 unsigned int id;
480 unsigned long flags;
482 void *priv;
484 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
485 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
486 int (*disconn_ind) (struct hci_conn *conn);
487 int (*disconn_cfm) (struct hci_conn *conn, __u8 reason);
488 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
489 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
490 int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
493 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
495 register struct hci_proto *hp;
496 int mask = 0;
498 hp = hci_proto[HCI_PROTO_L2CAP];
499 if (hp && hp->connect_ind)
500 mask |= hp->connect_ind(hdev, bdaddr, type);
502 hp = hci_proto[HCI_PROTO_SCO];
503 if (hp && hp->connect_ind)
504 mask |= hp->connect_ind(hdev, bdaddr, type);
506 return mask;
509 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
511 register struct hci_proto *hp;
513 hp = hci_proto[HCI_PROTO_L2CAP];
514 if (hp && hp->connect_cfm)
515 hp->connect_cfm(conn, status);
517 hp = hci_proto[HCI_PROTO_SCO];
518 if (hp && hp->connect_cfm)
519 hp->connect_cfm(conn, status);
522 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
524 register struct hci_proto *hp;
525 int reason = 0x13;
527 hp = hci_proto[HCI_PROTO_L2CAP];
528 if (hp && hp->disconn_ind)
529 reason = hp->disconn_ind(conn);
531 hp = hci_proto[HCI_PROTO_SCO];
532 if (hp && hp->disconn_ind)
533 reason = hp->disconn_ind(conn);
535 return reason;
538 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
540 register struct hci_proto *hp;
542 hp = hci_proto[HCI_PROTO_L2CAP];
543 if (hp && hp->disconn_cfm)
544 hp->disconn_cfm(conn, reason);
546 hp = hci_proto[HCI_PROTO_SCO];
547 if (hp && hp->disconn_cfm)
548 hp->disconn_cfm(conn, reason);
551 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
553 register struct hci_proto *hp;
554 __u8 encrypt;
556 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
557 return;
559 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
561 hp = hci_proto[HCI_PROTO_L2CAP];
562 if (hp && hp->security_cfm)
563 hp->security_cfm(conn, status, encrypt);
565 hp = hci_proto[HCI_PROTO_SCO];
566 if (hp && hp->security_cfm)
567 hp->security_cfm(conn, status, encrypt);
570 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
572 register struct hci_proto *hp;
574 hp = hci_proto[HCI_PROTO_L2CAP];
575 if (hp && hp->security_cfm)
576 hp->security_cfm(conn, status, encrypt);
578 hp = hci_proto[HCI_PROTO_SCO];
579 if (hp && hp->security_cfm)
580 hp->security_cfm(conn, status, encrypt);
583 int hci_register_proto(struct hci_proto *hproto);
584 int hci_unregister_proto(struct hci_proto *hproto);
586 /* ----- HCI callbacks ----- */
587 struct hci_cb {
588 struct list_head list;
590 char *name;
592 void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
593 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
594 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
597 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
599 struct list_head *p;
600 __u8 encrypt;
602 hci_proto_auth_cfm(conn, status);
604 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
605 return;
607 encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
609 read_lock_bh(&hci_cb_list_lock);
610 list_for_each(p, &hci_cb_list) {
611 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
612 if (cb->security_cfm)
613 cb->security_cfm(conn, status, encrypt);
615 read_unlock_bh(&hci_cb_list_lock);
618 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
620 struct list_head *p;
622 if (conn->sec_level == BT_SECURITY_SDP)
623 conn->sec_level = BT_SECURITY_LOW;
625 hci_proto_encrypt_cfm(conn, status, encrypt);
627 read_lock_bh(&hci_cb_list_lock);
628 list_for_each(p, &hci_cb_list) {
629 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
630 if (cb->security_cfm)
631 cb->security_cfm(conn, status, encrypt);
633 read_unlock_bh(&hci_cb_list_lock);
636 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
638 struct list_head *p;
640 read_lock_bh(&hci_cb_list_lock);
641 list_for_each(p, &hci_cb_list) {
642 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
643 if (cb->key_change_cfm)
644 cb->key_change_cfm(conn, status);
646 read_unlock_bh(&hci_cb_list_lock);
649 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
651 struct list_head *p;
653 read_lock_bh(&hci_cb_list_lock);
654 list_for_each(p, &hci_cb_list) {
655 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
656 if (cb->role_switch_cfm)
657 cb->role_switch_cfm(conn, status, role);
659 read_unlock_bh(&hci_cb_list_lock);
662 int hci_register_cb(struct hci_cb *hcb);
663 int hci_unregister_cb(struct hci_cb *hcb);
665 int hci_register_notifier(struct notifier_block *nb);
666 int hci_unregister_notifier(struct notifier_block *nb);
668 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
669 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
670 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
672 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
674 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
676 /* ----- HCI Sockets ----- */
677 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
679 /* HCI info for socket */
680 #define hci_pi(sk) ((struct hci_pinfo *) sk)
682 struct hci_pinfo {
683 struct bt_sock bt;
684 struct hci_dev *hdev;
685 struct hci_filter filter;
686 __u32 cmsg_mask;
689 /* HCI security filter */
690 #define HCI_SFLT_MAX_OGF 5
692 struct hci_sec_filter {
693 __u32 type_mask;
694 __u32 event_mask[2];
695 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
698 /* ----- HCI requests ----- */
699 #define HCI_REQ_DONE 0
700 #define HCI_REQ_PEND 1
701 #define HCI_REQ_CANCELED 2
703 #define hci_req_lock(d) down(&d->req_lock)
704 #define hci_req_unlock(d) up(&d->req_lock)
706 void hci_req_complete(struct hci_dev *hdev, int result);
708 #endif /* __HCI_CORE_H */