Enable zcache by default
[zen-stable.git] / net / bluetooth / hci_conn.c
blob07bc69ed9498c276e272d9215a3189b0df7beb30
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
48 static void hci_le_connect(struct hci_conn *conn)
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW;
58 memset(&cp, 0, sizeof(cp));
59 cp.scan_interval = cpu_to_le16(0x0060);
60 cp.scan_window = cpu_to_le16(0x0030);
61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type;
63 cp.conn_interval_min = cpu_to_le16(0x0028);
64 cp.conn_interval_max = cpu_to_le16(0x0038);
65 cp.supervision_timeout = cpu_to_le16(0x002a);
66 cp.min_ce_len = cpu_to_le16(0x0000);
67 cp.max_ce_len = cpu_to_le16(0x0000);
69 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
72 static void hci_le_connect_cancel(struct hci_conn *conn)
74 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
77 void hci_acl_connect(struct hci_conn *conn)
79 struct hci_dev *hdev = conn->hdev;
80 struct inquiry_entry *ie;
81 struct hci_cp_create_conn cp;
83 BT_DBG("%p", conn);
85 conn->state = BT_CONNECT;
86 conn->out = 1;
88 conn->link_mode = HCI_LM_MASTER;
90 conn->attempt++;
92 conn->link_policy = hdev->link_policy;
94 memset(&cp, 0, sizeof(cp));
95 bacpy(&cp.bdaddr, &conn->dst);
96 cp.pscan_rep_mode = 0x02;
98 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
99 if (ie) {
100 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
101 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
102 cp.pscan_mode = ie->data.pscan_mode;
103 cp.clock_offset = ie->data.clock_offset |
104 cpu_to_le16(0x8000);
107 memcpy(conn->dev_class, ie->data.dev_class, 3);
108 conn->ssp_mode = ie->data.ssp_mode;
111 cp.pkt_type = cpu_to_le16(conn->pkt_type);
112 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
113 cp.role_switch = 0x01;
114 else
115 cp.role_switch = 0x00;
117 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
120 static void hci_acl_connect_cancel(struct hci_conn *conn)
122 struct hci_cp_create_conn_cancel cp;
124 BT_DBG("%p", conn);
126 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
127 return;
129 bacpy(&cp.bdaddr, &conn->dst);
130 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
133 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
135 struct hci_cp_disconnect cp;
137 BT_DBG("%p", conn);
139 conn->state = BT_DISCONN;
141 cp.handle = cpu_to_le16(conn->handle);
142 cp.reason = reason;
143 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
146 void hci_add_sco(struct hci_conn *conn, __u16 handle)
148 struct hci_dev *hdev = conn->hdev;
149 struct hci_cp_add_sco cp;
151 BT_DBG("%p", conn);
153 conn->state = BT_CONNECT;
154 conn->out = 1;
156 conn->attempt++;
158 cp.handle = cpu_to_le16(handle);
159 cp.pkt_type = cpu_to_le16(conn->pkt_type);
161 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
164 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
166 struct hci_dev *hdev = conn->hdev;
167 struct hci_cp_setup_sync_conn cp;
169 BT_DBG("%p", conn);
171 conn->state = BT_CONNECT;
172 conn->out = 1;
174 conn->attempt++;
176 cp.handle = cpu_to_le16(handle);
177 cp.pkt_type = cpu_to_le16(conn->pkt_type);
179 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
180 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
181 cp.max_latency = cpu_to_le16(0xffff);
182 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
183 cp.retrans_effort = 0xff;
185 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
188 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
189 u16 latency, u16 to_multiplier)
191 struct hci_cp_le_conn_update cp;
192 struct hci_dev *hdev = conn->hdev;
194 memset(&cp, 0, sizeof(cp));
196 cp.handle = cpu_to_le16(conn->handle);
197 cp.conn_interval_min = cpu_to_le16(min);
198 cp.conn_interval_max = cpu_to_le16(max);
199 cp.conn_latency = cpu_to_le16(latency);
200 cp.supervision_timeout = cpu_to_le16(to_multiplier);
201 cp.min_ce_len = cpu_to_le16(0x0001);
202 cp.max_ce_len = cpu_to_le16(0x0001);
204 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
206 EXPORT_SYMBOL(hci_le_conn_update);
208 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 __u8 ltk[16])
211 struct hci_dev *hdev = conn->hdev;
212 struct hci_cp_le_start_enc cp;
214 BT_DBG("%p", conn);
216 memset(&cp, 0, sizeof(cp));
218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(cp.rand));
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
225 EXPORT_SYMBOL(hci_le_start_enc);
227 void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
229 struct hci_dev *hdev = conn->hdev;
230 struct hci_cp_le_ltk_reply cp;
232 BT_DBG("%p", conn);
234 memset(&cp, 0, sizeof(cp));
236 cp.handle = cpu_to_le16(conn->handle);
237 memcpy(cp.ltk, ltk, sizeof(ltk));
239 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
241 EXPORT_SYMBOL(hci_le_ltk_reply);
243 void hci_le_ltk_neg_reply(struct hci_conn *conn)
245 struct hci_dev *hdev = conn->hdev;
246 struct hci_cp_le_ltk_neg_reply cp;
248 BT_DBG("%p", conn);
250 memset(&cp, 0, sizeof(cp));
252 cp.handle = cpu_to_le16(conn->handle);
254 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
257 /* Device _must_ be locked */
258 void hci_sco_setup(struct hci_conn *conn, __u8 status)
260 struct hci_conn *sco = conn->link;
262 BT_DBG("%p", conn);
264 if (!sco)
265 return;
267 if (!status) {
268 if (lmp_esco_capable(conn->hdev))
269 hci_setup_sync(sco, conn->handle);
270 else
271 hci_add_sco(sco, conn->handle);
272 } else {
273 hci_proto_connect_cfm(sco, status);
274 hci_conn_del(sco);
278 static void hci_conn_timeout(struct work_struct *work)
280 struct hci_conn *conn = container_of(work, struct hci_conn,
281 disc_work.work);
282 struct hci_dev *hdev = conn->hdev;
283 __u8 reason;
285 BT_DBG("conn %p state %d", conn, conn->state);
287 if (atomic_read(&conn->refcnt))
288 return;
290 hci_dev_lock(hdev);
292 switch (conn->state) {
293 case BT_CONNECT:
294 case BT_CONNECT2:
295 if (conn->out) {
296 if (conn->type == ACL_LINK)
297 hci_acl_connect_cancel(conn);
298 else if (conn->type == LE_LINK)
299 hci_le_connect_cancel(conn);
301 break;
302 case BT_CONFIG:
303 case BT_CONNECTED:
304 reason = hci_proto_disconn_ind(conn);
305 hci_acl_disconn(conn, reason);
306 break;
307 default:
308 conn->state = BT_CLOSED;
309 break;
312 hci_dev_unlock(hdev);
315 /* Enter sniff mode */
316 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
318 struct hci_dev *hdev = conn->hdev;
320 BT_DBG("conn %p mode %d", conn, conn->mode);
322 if (test_bit(HCI_RAW, &hdev->flags))
323 return;
325 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
326 return;
328 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
329 return;
331 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
332 struct hci_cp_sniff_subrate cp;
333 cp.handle = cpu_to_le16(conn->handle);
334 cp.max_latency = cpu_to_le16(0);
335 cp.min_remote_timeout = cpu_to_le16(0);
336 cp.min_local_timeout = cpu_to_le16(0);
337 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
340 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
341 struct hci_cp_sniff_mode cp;
342 cp.handle = cpu_to_le16(conn->handle);
343 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
344 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
345 cp.attempt = cpu_to_le16(4);
346 cp.timeout = cpu_to_le16(1);
347 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
351 static void hci_conn_idle(unsigned long arg)
353 struct hci_conn *conn = (void *) arg;
355 BT_DBG("conn %p mode %d", conn, conn->mode);
357 hci_conn_enter_sniff_mode(conn);
360 static void hci_conn_auto_accept(unsigned long arg)
362 struct hci_conn *conn = (void *) arg;
363 struct hci_dev *hdev = conn->hdev;
365 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
366 &conn->dst);
369 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
371 struct hci_conn *conn;
373 BT_DBG("%s dst %s", hdev->name, batostr(dst));
375 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
376 if (!conn)
377 return NULL;
379 bacpy(&conn->dst, dst);
380 conn->hdev = hdev;
381 conn->type = type;
382 conn->mode = HCI_CM_ACTIVE;
383 conn->state = BT_OPEN;
384 conn->auth_type = HCI_AT_GENERAL_BONDING;
385 conn->io_capability = hdev->io_capability;
386 conn->remote_auth = 0xff;
387 conn->key_type = 0xff;
389 conn->power_save = 1;
390 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
392 switch (type) {
393 case ACL_LINK:
394 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
395 break;
396 case SCO_LINK:
397 if (lmp_esco_capable(hdev))
398 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
399 (hdev->esco_type & EDR_ESCO_MASK);
400 else
401 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
402 break;
403 case ESCO_LINK:
404 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
405 break;
408 skb_queue_head_init(&conn->data_q);
410 INIT_LIST_HEAD(&conn->chan_list);;
412 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
413 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
414 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
415 (unsigned long) conn);
417 atomic_set(&conn->refcnt, 0);
419 hci_dev_hold(hdev);
421 hci_conn_hash_add(hdev, conn);
422 if (hdev->notify)
423 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
425 atomic_set(&conn->devref, 0);
427 hci_conn_init_sysfs(conn);
429 return conn;
432 int hci_conn_del(struct hci_conn *conn)
434 struct hci_dev *hdev = conn->hdev;
436 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
438 del_timer(&conn->idle_timer);
440 cancel_delayed_work_sync(&conn->disc_work);
442 del_timer(&conn->auto_accept_timer);
444 if (conn->type == ACL_LINK) {
445 struct hci_conn *sco = conn->link;
446 if (sco)
447 sco->link = NULL;
449 /* Unacked frames */
450 hdev->acl_cnt += conn->sent;
451 } else if (conn->type == LE_LINK) {
452 if (hdev->le_pkts)
453 hdev->le_cnt += conn->sent;
454 else
455 hdev->acl_cnt += conn->sent;
456 } else {
457 struct hci_conn *acl = conn->link;
458 if (acl) {
459 acl->link = NULL;
460 hci_conn_put(acl);
465 hci_chan_list_flush(conn);
467 hci_conn_hash_del(hdev, conn);
468 if (hdev->notify)
469 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
471 skb_queue_purge(&conn->data_q);
473 hci_conn_put_device(conn);
475 hci_dev_put(hdev);
477 if (conn->handle == 0)
478 kfree(conn);
480 return 0;
483 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
485 int use_src = bacmp(src, BDADDR_ANY);
486 struct hci_dev *hdev = NULL, *d;
488 BT_DBG("%s -> %s", batostr(src), batostr(dst));
490 read_lock(&hci_dev_list_lock);
492 list_for_each_entry(d, &hci_dev_list, list) {
493 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
494 continue;
496 /* Simple routing:
497 * No source address - find interface with bdaddr != dst
498 * Source address - find interface with bdaddr == src
501 if (use_src) {
502 if (!bacmp(&d->bdaddr, src)) {
503 hdev = d; break;
505 } else {
506 if (bacmp(&d->bdaddr, dst)) {
507 hdev = d; break;
512 if (hdev)
513 hdev = hci_dev_hold(hdev);
515 read_unlock(&hci_dev_list_lock);
516 return hdev;
518 EXPORT_SYMBOL(hci_get_route);
520 /* Create SCO, ACL or LE connection.
521 * Device _must_ be locked */
522 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
524 struct hci_conn *acl;
525 struct hci_conn *sco;
526 struct hci_conn *le;
528 BT_DBG("%s dst %s", hdev->name, batostr(dst));
530 if (type == LE_LINK) {
531 struct adv_entry *entry;
533 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
534 if (le)
535 return ERR_PTR(-EBUSY);
537 entry = hci_find_adv_entry(hdev, dst);
538 if (!entry)
539 return ERR_PTR(-EHOSTUNREACH);
541 le = hci_conn_add(hdev, LE_LINK, dst);
542 if (!le)
543 return ERR_PTR(-ENOMEM);
545 le->dst_type = entry->bdaddr_type;
547 hci_le_connect(le);
549 hci_conn_hold(le);
551 return le;
554 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
555 if (!acl) {
556 acl = hci_conn_add(hdev, ACL_LINK, dst);
557 if (!acl)
558 return NULL;
561 hci_conn_hold(acl);
563 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
564 acl->sec_level = BT_SECURITY_LOW;
565 acl->pending_sec_level = sec_level;
566 acl->auth_type = auth_type;
567 hci_acl_connect(acl);
570 if (type == ACL_LINK)
571 return acl;
573 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
574 if (!sco) {
575 sco = hci_conn_add(hdev, type, dst);
576 if (!sco) {
577 hci_conn_put(acl);
578 return NULL;
582 acl->link = sco;
583 sco->link = acl;
585 hci_conn_hold(sco);
587 if (acl->state == BT_CONNECTED &&
588 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
589 acl->power_save = 1;
590 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
592 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
593 /* defer SCO setup until mode change completed */
594 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
595 return sco;
598 hci_sco_setup(acl, 0x00);
601 return sco;
603 EXPORT_SYMBOL(hci_connect);
605 /* Check link security requirement */
606 int hci_conn_check_link_mode(struct hci_conn *conn)
608 BT_DBG("conn %p", conn);
610 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
611 !(conn->link_mode & HCI_LM_ENCRYPT))
612 return 0;
614 return 1;
616 EXPORT_SYMBOL(hci_conn_check_link_mode);
618 /* Authenticate remote device */
619 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
621 BT_DBG("conn %p", conn);
623 if (conn->pending_sec_level > sec_level)
624 sec_level = conn->pending_sec_level;
626 if (sec_level > conn->sec_level)
627 conn->pending_sec_level = sec_level;
628 else if (conn->link_mode & HCI_LM_AUTH)
629 return 1;
631 /* Make sure we preserve an existing MITM requirement*/
632 auth_type |= (conn->auth_type & 0x01);
634 conn->auth_type = auth_type;
636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
637 struct hci_cp_auth_requested cp;
639 /* encrypt must be pending if auth is also pending */
640 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
642 cp.handle = cpu_to_le16(conn->handle);
643 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
644 sizeof(cp), &cp);
645 if (conn->key_type != 0xff)
646 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
649 return 0;
652 /* Encrypt the the link */
653 static void hci_conn_encrypt(struct hci_conn *conn)
655 BT_DBG("conn %p", conn);
657 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
658 struct hci_cp_set_conn_encrypt cp;
659 cp.handle = cpu_to_le16(conn->handle);
660 cp.encrypt = 0x01;
661 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
662 &cp);
666 /* Enable security */
667 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
669 BT_DBG("conn %p", conn);
671 /* For sdp we don't need the link key. */
672 if (sec_level == BT_SECURITY_SDP)
673 return 1;
675 /* For non 2.1 devices and low security level we don't need the link
676 key. */
677 if (sec_level == BT_SECURITY_LOW &&
678 (!conn->ssp_mode || !conn->hdev->ssp_mode))
679 return 1;
681 /* For other security levels we need the link key. */
682 if (!(conn->link_mode & HCI_LM_AUTH))
683 goto auth;
685 /* An authenticated combination key has sufficient security for any
686 security level. */
687 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
688 goto encrypt;
690 /* An unauthenticated combination key has sufficient security for
691 security level 1 and 2. */
692 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
693 (sec_level == BT_SECURITY_MEDIUM ||
694 sec_level == BT_SECURITY_LOW))
695 goto encrypt;
697 /* A combination key has always sufficient security for the security
698 levels 1 or 2. High security level requires the combination key
699 is generated using maximum PIN code length (16).
700 For pre 2.1 units. */
701 if (conn->key_type == HCI_LK_COMBINATION &&
702 (sec_level != BT_SECURITY_HIGH ||
703 conn->pin_length == 16))
704 goto encrypt;
706 auth:
707 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
708 return 0;
710 if (!hci_conn_auth(conn, sec_level, auth_type))
711 return 0;
713 encrypt:
714 if (conn->link_mode & HCI_LM_ENCRYPT)
715 return 1;
717 hci_conn_encrypt(conn);
718 return 0;
720 EXPORT_SYMBOL(hci_conn_security);
722 /* Check secure link requirement */
723 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
725 BT_DBG("conn %p", conn);
727 if (sec_level != BT_SECURITY_HIGH)
728 return 1; /* Accept if non-secure is required */
730 if (conn->sec_level == BT_SECURITY_HIGH)
731 return 1;
733 return 0; /* Reject not secure link */
735 EXPORT_SYMBOL(hci_conn_check_secure);
737 /* Change link key */
738 int hci_conn_change_link_key(struct hci_conn *conn)
740 BT_DBG("conn %p", conn);
742 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
743 struct hci_cp_change_conn_link_key cp;
744 cp.handle = cpu_to_le16(conn->handle);
745 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
746 sizeof(cp), &cp);
749 return 0;
751 EXPORT_SYMBOL(hci_conn_change_link_key);
753 /* Switch role */
754 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
756 BT_DBG("conn %p", conn);
758 if (!role && conn->link_mode & HCI_LM_MASTER)
759 return 1;
761 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
762 struct hci_cp_switch_role cp;
763 bacpy(&cp.bdaddr, &conn->dst);
764 cp.role = role;
765 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
768 return 0;
770 EXPORT_SYMBOL(hci_conn_switch_role);
772 /* Enter active mode */
773 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
775 struct hci_dev *hdev = conn->hdev;
777 BT_DBG("conn %p mode %d", conn, conn->mode);
779 if (test_bit(HCI_RAW, &hdev->flags))
780 return;
782 if (conn->mode != HCI_CM_SNIFF)
783 goto timer;
785 if (!conn->power_save && !force_active)
786 goto timer;
788 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
789 struct hci_cp_exit_sniff_mode cp;
790 cp.handle = cpu_to_le16(conn->handle);
791 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
794 timer:
795 if (hdev->idle_timeout > 0)
796 mod_timer(&conn->idle_timer,
797 jiffies + msecs_to_jiffies(hdev->idle_timeout));
800 /* Drop all connection on the device */
801 void hci_conn_hash_flush(struct hci_dev *hdev)
803 struct hci_conn_hash *h = &hdev->conn_hash;
804 struct hci_conn *c;
806 BT_DBG("hdev %s", hdev->name);
808 list_for_each_entry_rcu(c, &h->list, list) {
809 c->state = BT_CLOSED;
811 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
812 hci_conn_del(c);
816 /* Check pending connect attempts */
817 void hci_conn_check_pending(struct hci_dev *hdev)
819 struct hci_conn *conn;
821 BT_DBG("hdev %s", hdev->name);
823 hci_dev_lock(hdev);
825 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
826 if (conn)
827 hci_acl_connect(conn);
829 hci_dev_unlock(hdev);
832 void hci_conn_hold_device(struct hci_conn *conn)
834 atomic_inc(&conn->devref);
836 EXPORT_SYMBOL(hci_conn_hold_device);
838 void hci_conn_put_device(struct hci_conn *conn)
840 if (atomic_dec_and_test(&conn->devref))
841 hci_conn_del_sysfs(conn);
843 EXPORT_SYMBOL(hci_conn_put_device);
845 int hci_get_conn_list(void __user *arg)
847 register struct hci_conn *c;
848 struct hci_conn_list_req req, *cl;
849 struct hci_conn_info *ci;
850 struct hci_dev *hdev;
851 int n = 0, size, err;
853 if (copy_from_user(&req, arg, sizeof(req)))
854 return -EFAULT;
856 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
857 return -EINVAL;
859 size = sizeof(req) + req.conn_num * sizeof(*ci);
861 cl = kmalloc(size, GFP_KERNEL);
862 if (!cl)
863 return -ENOMEM;
865 hdev = hci_dev_get(req.dev_id);
866 if (!hdev) {
867 kfree(cl);
868 return -ENODEV;
871 ci = cl->conn_info;
873 hci_dev_lock(hdev);
874 list_for_each_entry(c, &hdev->conn_hash.list, list) {
875 bacpy(&(ci + n)->bdaddr, &c->dst);
876 (ci + n)->handle = c->handle;
877 (ci + n)->type = c->type;
878 (ci + n)->out = c->out;
879 (ci + n)->state = c->state;
880 (ci + n)->link_mode = c->link_mode;
881 if (++n >= req.conn_num)
882 break;
884 hci_dev_unlock(hdev);
886 cl->dev_id = hdev->id;
887 cl->conn_num = n;
888 size = sizeof(req) + n * sizeof(*ci);
890 hci_dev_put(hdev);
892 err = copy_to_user(arg, cl, size);
893 kfree(cl);
895 return err ? -EFAULT : 0;
898 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
900 struct hci_conn_info_req req;
901 struct hci_conn_info ci;
902 struct hci_conn *conn;
903 char __user *ptr = arg + sizeof(req);
905 if (copy_from_user(&req, arg, sizeof(req)))
906 return -EFAULT;
908 hci_dev_lock(hdev);
909 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
910 if (conn) {
911 bacpy(&ci.bdaddr, &conn->dst);
912 ci.handle = conn->handle;
913 ci.type = conn->type;
914 ci.out = conn->out;
915 ci.state = conn->state;
916 ci.link_mode = conn->link_mode;
918 hci_dev_unlock(hdev);
920 if (!conn)
921 return -ENOENT;
923 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
926 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
928 struct hci_auth_info_req req;
929 struct hci_conn *conn;
931 if (copy_from_user(&req, arg, sizeof(req)))
932 return -EFAULT;
934 hci_dev_lock(hdev);
935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
936 if (conn)
937 req.type = conn->auth_type;
938 hci_dev_unlock(hdev);
940 if (!conn)
941 return -ENOENT;
943 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
946 struct hci_chan *hci_chan_create(struct hci_conn *conn)
948 struct hci_dev *hdev = conn->hdev;
949 struct hci_chan *chan;
951 BT_DBG("%s conn %p", hdev->name, conn);
953 chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
954 if (!chan)
955 return NULL;
957 chan->conn = conn;
958 skb_queue_head_init(&chan->data_q);
960 list_add_rcu(&chan->list, &conn->chan_list);
962 return chan;
965 int hci_chan_del(struct hci_chan *chan)
967 struct hci_conn *conn = chan->conn;
968 struct hci_dev *hdev = conn->hdev;
970 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
972 list_del_rcu(&chan->list);
974 synchronize_rcu();
976 skb_queue_purge(&chan->data_q);
977 kfree(chan);
979 return 0;
982 void hci_chan_list_flush(struct hci_conn *conn)
984 struct hci_chan *chan;
986 BT_DBG("conn %p", conn);
988 list_for_each_entry_rcu(chan, &conn->chan_list, list)
989 hci_chan_del(chan);