ocfs2: fix several issues of append dio
[linux/fpc-iii.git] / net / bluetooth / hci_conn.c
blobb4548c739a6475446d643bd5b01ab8627ef1f08e
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
38 struct sco_param {
39 u16 pkt_type;
40 u16 max_latency;
41 u8 retrans_effort;
44 static const struct sco_param esco_param_cvsd[] = {
45 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
46 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
47 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
48 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
49 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
52 static const struct sco_param sco_param_cvsd[] = {
53 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
54 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
57 static const struct sco_param esco_param_msbc[] = {
58 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
59 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
62 static void hci_le_create_connection_cancel(struct hci_conn *conn)
64 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
67 /* This function requires the caller holds hdev->lock */
68 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
70 struct hci_conn_params *params;
71 struct smp_irk *irk;
72 bdaddr_t *bdaddr;
73 u8 bdaddr_type;
75 bdaddr = &conn->dst;
76 bdaddr_type = conn->dst_type;
78 /* Check if we need to convert to identity address */
79 irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
80 if (irk) {
81 bdaddr = &irk->bdaddr;
82 bdaddr_type = irk->addr_type;
85 params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
86 if (!params)
87 return;
89 /* The connection attempt was doing scan for new RPA, and is
90 * in scan phase. If params are not associated with any other
91 * autoconnect action, remove them completely. If they are, just unmark
92 * them as waiting for connection, by clearing explicit_connect field.
94 if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
95 hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
96 else
97 params->explicit_connect = false;
100 /* This function requires the caller holds hdev->lock */
101 static void hci_connect_le_scan_remove(struct hci_conn *conn)
103 hci_connect_le_scan_cleanup(conn);
105 hci_conn_hash_del(conn->hdev, conn);
106 hci_update_background_scan(conn->hdev);
109 static void hci_acl_create_connection(struct hci_conn *conn)
111 struct hci_dev *hdev = conn->hdev;
112 struct inquiry_entry *ie;
113 struct hci_cp_create_conn cp;
115 BT_DBG("hcon %p", conn);
117 conn->state = BT_CONNECT;
118 conn->out = true;
119 conn->role = HCI_ROLE_MASTER;
121 conn->attempt++;
123 conn->link_policy = hdev->link_policy;
125 memset(&cp, 0, sizeof(cp));
126 bacpy(&cp.bdaddr, &conn->dst);
127 cp.pscan_rep_mode = 0x02;
129 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
130 if (ie) {
131 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
132 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
133 cp.pscan_mode = ie->data.pscan_mode;
134 cp.clock_offset = ie->data.clock_offset |
135 cpu_to_le16(0x8000);
138 memcpy(conn->dev_class, ie->data.dev_class, 3);
139 if (ie->data.ssp_mode > 0)
140 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
143 cp.pkt_type = cpu_to_le16(conn->pkt_type);
144 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
145 cp.role_switch = 0x01;
146 else
147 cp.role_switch = 0x00;
149 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
152 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
154 struct hci_cp_create_conn_cancel cp;
156 BT_DBG("hcon %p", conn);
158 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
159 return;
161 bacpy(&cp.bdaddr, &conn->dst);
162 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
165 static void hci_reject_sco(struct hci_conn *conn)
167 struct hci_cp_reject_sync_conn_req cp;
169 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
170 bacpy(&cp.bdaddr, &conn->dst);
172 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
175 int hci_disconnect(struct hci_conn *conn, __u8 reason)
177 struct hci_cp_disconnect cp;
179 BT_DBG("hcon %p", conn);
181 /* When we are master of an established connection and it enters
182 * the disconnect timeout, then go ahead and try to read the
183 * current clock offset. Processing of the result is done
184 * within the event handling and hci_clock_offset_evt function.
186 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
187 struct hci_dev *hdev = conn->hdev;
188 struct hci_cp_read_clock_offset clkoff_cp;
190 clkoff_cp.handle = cpu_to_le16(conn->handle);
191 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
192 &clkoff_cp);
195 conn->state = BT_DISCONN;
197 cp.handle = cpu_to_le16(conn->handle);
198 cp.reason = reason;
199 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
202 static void hci_amp_disconn(struct hci_conn *conn)
204 struct hci_cp_disconn_phy_link cp;
206 BT_DBG("hcon %p", conn);
208 conn->state = BT_DISCONN;
210 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
211 cp.reason = hci_proto_disconn_ind(conn);
212 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
213 sizeof(cp), &cp);
216 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
218 struct hci_dev *hdev = conn->hdev;
219 struct hci_cp_add_sco cp;
221 BT_DBG("hcon %p", conn);
223 conn->state = BT_CONNECT;
224 conn->out = true;
226 conn->attempt++;
228 cp.handle = cpu_to_le16(handle);
229 cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
234 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
236 struct hci_dev *hdev = conn->hdev;
237 struct hci_cp_setup_sync_conn cp;
238 const struct sco_param *param;
240 BT_DBG("hcon %p", conn);
242 conn->state = BT_CONNECT;
243 conn->out = true;
245 conn->attempt++;
247 cp.handle = cpu_to_le16(handle);
249 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
250 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
251 cp.voice_setting = cpu_to_le16(conn->setting);
253 switch (conn->setting & SCO_AIRMODE_MASK) {
254 case SCO_AIRMODE_TRANSP:
255 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
256 return false;
257 param = &esco_param_msbc[conn->attempt - 1];
258 break;
259 case SCO_AIRMODE_CVSD:
260 if (lmp_esco_capable(conn->link)) {
261 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
262 return false;
263 param = &esco_param_cvsd[conn->attempt - 1];
264 } else {
265 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
266 return false;
267 param = &sco_param_cvsd[conn->attempt - 1];
269 break;
270 default:
271 return false;
274 cp.retrans_effort = param->retrans_effort;
275 cp.pkt_type = __cpu_to_le16(param->pkt_type);
276 cp.max_latency = __cpu_to_le16(param->max_latency);
278 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
279 return false;
281 return true;
284 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
285 u16 to_multiplier)
287 struct hci_dev *hdev = conn->hdev;
288 struct hci_conn_params *params;
289 struct hci_cp_le_conn_update cp;
291 hci_dev_lock(hdev);
293 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
294 if (params) {
295 params->conn_min_interval = min;
296 params->conn_max_interval = max;
297 params->conn_latency = latency;
298 params->supervision_timeout = to_multiplier;
301 hci_dev_unlock(hdev);
303 memset(&cp, 0, sizeof(cp));
304 cp.handle = cpu_to_le16(conn->handle);
305 cp.conn_interval_min = cpu_to_le16(min);
306 cp.conn_interval_max = cpu_to_le16(max);
307 cp.conn_latency = cpu_to_le16(latency);
308 cp.supervision_timeout = cpu_to_le16(to_multiplier);
309 cp.min_ce_len = cpu_to_le16(0x0000);
310 cp.max_ce_len = cpu_to_le16(0x0000);
312 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
314 if (params)
315 return 0x01;
317 return 0x00;
320 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
321 __u8 ltk[16], __u8 key_size)
323 struct hci_dev *hdev = conn->hdev;
324 struct hci_cp_le_start_enc cp;
326 BT_DBG("hcon %p", conn);
328 memset(&cp, 0, sizeof(cp));
330 cp.handle = cpu_to_le16(conn->handle);
331 cp.rand = rand;
332 cp.ediv = ediv;
333 memcpy(cp.ltk, ltk, key_size);
335 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
338 /* Device _must_ be locked */
339 void hci_sco_setup(struct hci_conn *conn, __u8 status)
341 struct hci_conn *sco = conn->link;
343 if (!sco)
344 return;
346 BT_DBG("hcon %p", conn);
348 if (!status) {
349 if (lmp_esco_capable(conn->hdev))
350 hci_setup_sync(sco, conn->handle);
351 else
352 hci_add_sco(sco, conn->handle);
353 } else {
354 hci_connect_cfm(sco, status);
355 hci_conn_del(sco);
359 static void hci_conn_timeout(struct work_struct *work)
361 struct hci_conn *conn = container_of(work, struct hci_conn,
362 disc_work.work);
363 int refcnt = atomic_read(&conn->refcnt);
365 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
367 WARN_ON(refcnt < 0);
369 /* FIXME: It was observed that in pairing failed scenario, refcnt
370 * drops below 0. Probably this is because l2cap_conn_del calls
371 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
372 * dropped. After that loop hci_chan_del is called which also drops
373 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
374 * otherwise drop it.
376 if (refcnt > 0)
377 return;
379 switch (conn->state) {
380 case BT_CONNECT:
381 case BT_CONNECT2:
382 if (conn->out) {
383 if (conn->type == ACL_LINK)
384 hci_acl_create_connection_cancel(conn);
385 else if (conn->type == LE_LINK) {
386 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
387 hci_connect_le_scan_remove(conn);
388 else
389 hci_le_create_connection_cancel(conn);
391 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
392 hci_reject_sco(conn);
394 break;
395 case BT_CONFIG:
396 case BT_CONNECTED:
397 if (conn->type == AMP_LINK) {
398 hci_amp_disconn(conn);
399 } else {
400 __u8 reason = hci_proto_disconn_ind(conn);
401 hci_disconnect(conn, reason);
403 break;
404 default:
405 conn->state = BT_CLOSED;
406 break;
410 /* Enter sniff mode */
411 static void hci_conn_idle(struct work_struct *work)
413 struct hci_conn *conn = container_of(work, struct hci_conn,
414 idle_work.work);
415 struct hci_dev *hdev = conn->hdev;
417 BT_DBG("hcon %p mode %d", conn, conn->mode);
419 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
420 return;
422 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
423 return;
425 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
426 struct hci_cp_sniff_subrate cp;
427 cp.handle = cpu_to_le16(conn->handle);
428 cp.max_latency = cpu_to_le16(0);
429 cp.min_remote_timeout = cpu_to_le16(0);
430 cp.min_local_timeout = cpu_to_le16(0);
431 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
434 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
435 struct hci_cp_sniff_mode cp;
436 cp.handle = cpu_to_le16(conn->handle);
437 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
438 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
439 cp.attempt = cpu_to_le16(4);
440 cp.timeout = cpu_to_le16(1);
441 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
445 static void hci_conn_auto_accept(struct work_struct *work)
447 struct hci_conn *conn = container_of(work, struct hci_conn,
448 auto_accept_work.work);
450 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
451 &conn->dst);
454 static void le_conn_timeout(struct work_struct *work)
456 struct hci_conn *conn = container_of(work, struct hci_conn,
457 le_conn_timeout.work);
458 struct hci_dev *hdev = conn->hdev;
460 BT_DBG("");
462 /* We could end up here due to having done directed advertising,
463 * so clean up the state if necessary. This should however only
464 * happen with broken hardware or if low duty cycle was used
465 * (which doesn't have a timeout of its own).
467 if (conn->role == HCI_ROLE_SLAVE) {
468 u8 enable = 0x00;
469 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
470 &enable);
471 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
472 return;
475 hci_le_create_connection_cancel(conn);
478 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
479 u8 role)
481 struct hci_conn *conn;
483 BT_DBG("%s dst %pMR", hdev->name, dst);
485 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
486 if (!conn)
487 return NULL;
489 bacpy(&conn->dst, dst);
490 bacpy(&conn->src, &hdev->bdaddr);
491 conn->hdev = hdev;
492 conn->type = type;
493 conn->role = role;
494 conn->mode = HCI_CM_ACTIVE;
495 conn->state = BT_OPEN;
496 conn->auth_type = HCI_AT_GENERAL_BONDING;
497 conn->io_capability = hdev->io_capability;
498 conn->remote_auth = 0xff;
499 conn->key_type = 0xff;
500 conn->rssi = HCI_RSSI_INVALID;
501 conn->tx_power = HCI_TX_POWER_INVALID;
502 conn->max_tx_power = HCI_TX_POWER_INVALID;
504 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
505 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
507 if (conn->role == HCI_ROLE_MASTER)
508 conn->out = true;
510 switch (type) {
511 case ACL_LINK:
512 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
513 break;
514 case LE_LINK:
515 /* conn->src should reflect the local identity address */
516 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
517 break;
518 case SCO_LINK:
519 if (lmp_esco_capable(hdev))
520 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
521 (hdev->esco_type & EDR_ESCO_MASK);
522 else
523 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
524 break;
525 case ESCO_LINK:
526 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
527 break;
530 skb_queue_head_init(&conn->data_q);
532 INIT_LIST_HEAD(&conn->chan_list);
534 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
535 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
536 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
537 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
539 atomic_set(&conn->refcnt, 0);
541 hci_dev_hold(hdev);
543 hci_conn_hash_add(hdev, conn);
544 if (hdev->notify)
545 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
547 hci_conn_init_sysfs(conn);
549 return conn;
552 int hci_conn_del(struct hci_conn *conn)
554 struct hci_dev *hdev = conn->hdev;
556 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
558 cancel_delayed_work_sync(&conn->disc_work);
559 cancel_delayed_work_sync(&conn->auto_accept_work);
560 cancel_delayed_work_sync(&conn->idle_work);
562 if (conn->type == ACL_LINK) {
563 struct hci_conn *sco = conn->link;
564 if (sco)
565 sco->link = NULL;
567 /* Unacked frames */
568 hdev->acl_cnt += conn->sent;
569 } else if (conn->type == LE_LINK) {
570 cancel_delayed_work(&conn->le_conn_timeout);
572 if (hdev->le_pkts)
573 hdev->le_cnt += conn->sent;
574 else
575 hdev->acl_cnt += conn->sent;
576 } else {
577 struct hci_conn *acl = conn->link;
578 if (acl) {
579 acl->link = NULL;
580 hci_conn_drop(acl);
584 hci_chan_list_flush(conn);
586 if (conn->amp_mgr)
587 amp_mgr_put(conn->amp_mgr);
589 hci_conn_hash_del(hdev, conn);
590 if (hdev->notify)
591 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
593 skb_queue_purge(&conn->data_q);
595 hci_conn_del_sysfs(conn);
597 debugfs_remove_recursive(conn->debugfs);
599 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
600 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
602 hci_dev_put(hdev);
604 hci_conn_put(conn);
606 return 0;
609 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
611 int use_src = bacmp(src, BDADDR_ANY);
612 struct hci_dev *hdev = NULL, *d;
614 BT_DBG("%pMR -> %pMR", src, dst);
616 read_lock(&hci_dev_list_lock);
618 list_for_each_entry(d, &hci_dev_list, list) {
619 if (!test_bit(HCI_UP, &d->flags) ||
620 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
621 d->dev_type != HCI_BREDR)
622 continue;
624 /* Simple routing:
625 * No source address - find interface with bdaddr != dst
626 * Source address - find interface with bdaddr == src
629 if (use_src) {
630 if (!bacmp(&d->bdaddr, src)) {
631 hdev = d; break;
633 } else {
634 if (bacmp(&d->bdaddr, dst)) {
635 hdev = d; break;
640 if (hdev)
641 hdev = hci_dev_hold(hdev);
643 read_unlock(&hci_dev_list_lock);
644 return hdev;
646 EXPORT_SYMBOL(hci_get_route);
648 /* This function requires the caller holds hdev->lock */
649 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
651 struct hci_dev *hdev = conn->hdev;
652 struct hci_conn_params *params;
654 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
655 conn->dst_type);
656 if (params && params->conn) {
657 hci_conn_drop(params->conn);
658 hci_conn_put(params->conn);
659 params->conn = NULL;
662 conn->state = BT_CLOSED;
664 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
665 status);
667 hci_connect_cfm(conn, status);
669 hci_conn_del(conn);
671 /* Since we may have temporarily stopped the background scanning in
672 * favor of connection establishment, we should restart it.
674 hci_update_background_scan(hdev);
676 /* Re-enable advertising in case this was a failed connection
677 * attempt as a peripheral.
679 mgmt_reenable_advertising(hdev);
682 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
684 struct hci_conn *conn;
686 hci_dev_lock(hdev);
688 conn = hci_lookup_le_connect(hdev);
690 if (!status) {
691 hci_connect_le_scan_cleanup(conn);
692 goto done;
695 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
696 status);
698 if (!conn)
699 goto done;
701 hci_le_conn_failed(conn, status);
703 done:
704 hci_dev_unlock(hdev);
707 static void hci_req_add_le_create_conn(struct hci_request *req,
708 struct hci_conn *conn)
710 struct hci_cp_le_create_conn cp;
711 struct hci_dev *hdev = conn->hdev;
712 u8 own_addr_type;
714 memset(&cp, 0, sizeof(cp));
716 /* Update random address, but set require_privacy to false so
717 * that we never connect with an non-resolvable address.
719 if (hci_update_random_address(req, false, &own_addr_type))
720 return;
722 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
723 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
724 bacpy(&cp.peer_addr, &conn->dst);
725 cp.peer_addr_type = conn->dst_type;
726 cp.own_address_type = own_addr_type;
727 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
728 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
729 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
730 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
731 cp.min_ce_len = cpu_to_le16(0x0000);
732 cp.max_ce_len = cpu_to_le16(0x0000);
734 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
736 conn->state = BT_CONNECT;
737 clear_bit(HCI_CONN_SCANNING, &conn->flags);
740 static void hci_req_directed_advertising(struct hci_request *req,
741 struct hci_conn *conn)
743 struct hci_dev *hdev = req->hdev;
744 struct hci_cp_le_set_adv_param cp;
745 u8 own_addr_type;
746 u8 enable;
748 /* Clear the HCI_LE_ADV bit temporarily so that the
749 * hci_update_random_address knows that it's safe to go ahead
750 * and write a new random address. The flag will be set back on
751 * as soon as the SET_ADV_ENABLE HCI command completes.
753 hci_dev_clear_flag(hdev, HCI_LE_ADV);
755 /* Set require_privacy to false so that the remote device has a
756 * chance of identifying us.
758 if (hci_update_random_address(req, false, &own_addr_type) < 0)
759 return;
761 memset(&cp, 0, sizeof(cp));
762 cp.type = LE_ADV_DIRECT_IND;
763 cp.own_address_type = own_addr_type;
764 cp.direct_addr_type = conn->dst_type;
765 bacpy(&cp.direct_addr, &conn->dst);
766 cp.channel_map = hdev->le_adv_channel_map;
768 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
770 enable = 0x01;
771 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
773 conn->state = BT_CONNECT;
776 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
777 u8 dst_type, u8 sec_level, u16 conn_timeout,
778 u8 role)
780 struct hci_conn_params *params;
781 struct hci_conn *conn, *conn_unfinished;
782 struct smp_irk *irk;
783 struct hci_request req;
784 int err;
786 /* Let's make sure that le is enabled.*/
787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
788 if (lmp_le_capable(hdev))
789 return ERR_PTR(-ECONNREFUSED);
791 return ERR_PTR(-EOPNOTSUPP);
794 /* Some devices send ATT messages as soon as the physical link is
795 * established. To be able to handle these ATT messages, the user-
796 * space first establishes the connection and then starts the pairing
797 * process.
799 * So if a hci_conn object already exists for the following connection
800 * attempt, we simply update pending_sec_level and auth_type fields
801 * and return the object found.
803 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
804 conn_unfinished = NULL;
805 if (conn) {
806 if (conn->state == BT_CONNECT &&
807 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
808 BT_DBG("will continue unfinished conn %pMR", dst);
809 conn_unfinished = conn;
810 } else {
811 if (conn->pending_sec_level < sec_level)
812 conn->pending_sec_level = sec_level;
813 goto done;
817 /* Since the controller supports only one LE connection attempt at a
818 * time, we return -EBUSY if there is any connection attempt running.
820 if (hci_lookup_le_connect(hdev))
821 return ERR_PTR(-EBUSY);
823 /* When given an identity address with existing identity
824 * resolving key, the connection needs to be established
825 * to a resolvable random address.
827 * Storing the resolvable random address is required here
828 * to handle connection failures. The address will later
829 * be resolved back into the original identity address
830 * from the connect request.
832 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
833 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
834 dst = &irk->rpa;
835 dst_type = ADDR_LE_DEV_RANDOM;
838 if (conn_unfinished) {
839 conn = conn_unfinished;
840 bacpy(&conn->dst, dst);
841 } else {
842 conn = hci_conn_add(hdev, LE_LINK, dst, role);
845 if (!conn)
846 return ERR_PTR(-ENOMEM);
848 conn->dst_type = dst_type;
849 conn->sec_level = BT_SECURITY_LOW;
850 conn->conn_timeout = conn_timeout;
852 if (!conn_unfinished)
853 conn->pending_sec_level = sec_level;
855 hci_req_init(&req, hdev);
857 /* Disable advertising if we're active. For master role
858 * connections most controllers will refuse to connect if
859 * advertising is enabled, and for slave role connections we
860 * anyway have to disable it in order to start directed
861 * advertising.
863 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
864 u8 enable = 0x00;
865 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
866 &enable);
869 /* If requested to connect as slave use directed advertising */
870 if (conn->role == HCI_ROLE_SLAVE) {
871 /* If we're active scanning most controllers are unable
872 * to initiate advertising. Simply reject the attempt.
874 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
875 hdev->le_scan_type == LE_SCAN_ACTIVE) {
876 skb_queue_purge(&req.cmd_q);
877 hci_conn_del(conn);
878 return ERR_PTR(-EBUSY);
881 hci_req_directed_advertising(&req, conn);
882 goto create_conn;
885 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
886 if (params) {
887 conn->le_conn_min_interval = params->conn_min_interval;
888 conn->le_conn_max_interval = params->conn_max_interval;
889 conn->le_conn_latency = params->conn_latency;
890 conn->le_supv_timeout = params->supervision_timeout;
891 } else {
892 conn->le_conn_min_interval = hdev->le_conn_min_interval;
893 conn->le_conn_max_interval = hdev->le_conn_max_interval;
894 conn->le_conn_latency = hdev->le_conn_latency;
895 conn->le_supv_timeout = hdev->le_supv_timeout;
898 /* If controller is scanning, we stop it since some controllers are
899 * not able to scan and connect at the same time. Also set the
900 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
901 * handler for scan disabling knows to set the correct discovery
902 * state.
904 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
905 hci_req_add_le_scan_disable(&req);
906 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
909 hci_req_add_le_create_conn(&req, conn);
911 create_conn:
912 err = hci_req_run(&req, create_le_conn_complete);
913 if (err) {
914 hci_conn_del(conn);
915 return ERR_PTR(err);
918 done:
919 /* If this is continuation of connect started by hci_connect_le_scan,
920 * it already called hci_conn_hold and calling it again would mess the
921 * counter.
923 if (!conn_unfinished)
924 hci_conn_hold(conn);
926 return conn;
929 static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
930 u16 opcode)
932 struct hci_conn *conn;
934 if (!status)
935 return;
937 BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
938 status);
940 hci_dev_lock(hdev);
942 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
943 if (conn)
944 hci_le_conn_failed(conn, status);
946 hci_dev_unlock(hdev);
949 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
951 struct hci_conn *conn;
953 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
954 if (!conn)
955 return false;
957 if (conn->dst_type != type)
958 return false;
960 if (conn->state != BT_CONNECTED)
961 return false;
963 return true;
966 /* This function requires the caller holds hdev->lock */
967 static int hci_explicit_conn_params_set(struct hci_request *req,
968 bdaddr_t *addr, u8 addr_type)
970 struct hci_dev *hdev = req->hdev;
971 struct hci_conn_params *params;
973 if (is_connected(hdev, addr, addr_type))
974 return -EISCONN;
976 params = hci_conn_params_add(hdev, addr, addr_type);
977 if (!params)
978 return -EIO;
980 /* If we created new params, or existing params were marked as disabled,
981 * mark them to be used just once to connect.
983 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
984 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
985 list_del_init(&params->action);
986 list_add(&params->action, &hdev->pend_le_conns);
989 params->explicit_connect = true;
990 __hci_update_background_scan(req);
992 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
993 params->auto_connect);
995 return 0;
998 /* This function requires the caller holds hdev->lock */
999 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1000 u8 dst_type, u8 sec_level,
1001 u16 conn_timeout, u8 role)
1003 struct hci_conn *conn;
1004 struct hci_request req;
1005 int err;
1007 /* Let's make sure that le is enabled.*/
1008 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1009 if (lmp_le_capable(hdev))
1010 return ERR_PTR(-ECONNREFUSED);
1012 return ERR_PTR(-EOPNOTSUPP);
1015 /* Some devices send ATT messages as soon as the physical link is
1016 * established. To be able to handle these ATT messages, the user-
1017 * space first establishes the connection and then starts the pairing
1018 * process.
1020 * So if a hci_conn object already exists for the following connection
1021 * attempt, we simply update pending_sec_level and auth_type fields
1022 * and return the object found.
1024 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
1025 if (conn) {
1026 if (conn->pending_sec_level < sec_level)
1027 conn->pending_sec_level = sec_level;
1028 goto done;
1031 BT_DBG("requesting refresh of dst_addr");
1033 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1034 if (!conn)
1035 return ERR_PTR(-ENOMEM);
1037 hci_req_init(&req, hdev);
1039 if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
1040 return ERR_PTR(-EBUSY);
1042 conn->state = BT_CONNECT;
1043 set_bit(HCI_CONN_SCANNING, &conn->flags);
1045 err = hci_req_run(&req, hci_connect_le_scan_complete);
1046 if (err && err != -ENODATA) {
1047 hci_conn_del(conn);
1048 return ERR_PTR(err);
1051 conn->dst_type = dst_type;
1052 conn->sec_level = BT_SECURITY_LOW;
1053 conn->pending_sec_level = sec_level;
1054 conn->conn_timeout = conn_timeout;
1056 done:
1057 hci_conn_hold(conn);
1058 return conn;
1061 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1062 u8 sec_level, u8 auth_type)
1064 struct hci_conn *acl;
1066 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1067 if (lmp_bredr_capable(hdev))
1068 return ERR_PTR(-ECONNREFUSED);
1070 return ERR_PTR(-EOPNOTSUPP);
1073 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1074 if (!acl) {
1075 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1076 if (!acl)
1077 return ERR_PTR(-ENOMEM);
1080 hci_conn_hold(acl);
1082 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1083 acl->sec_level = BT_SECURITY_LOW;
1084 acl->pending_sec_level = sec_level;
1085 acl->auth_type = auth_type;
1086 hci_acl_create_connection(acl);
1089 return acl;
1092 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1093 __u16 setting)
1095 struct hci_conn *acl;
1096 struct hci_conn *sco;
1098 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1099 if (IS_ERR(acl))
1100 return acl;
1102 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1103 if (!sco) {
1104 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1105 if (!sco) {
1106 hci_conn_drop(acl);
1107 return ERR_PTR(-ENOMEM);
1111 acl->link = sco;
1112 sco->link = acl;
1114 hci_conn_hold(sco);
1116 sco->setting = setting;
1118 if (acl->state == BT_CONNECTED &&
1119 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1120 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1121 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1123 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1124 /* defer SCO setup until mode change completed */
1125 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1126 return sco;
1129 hci_sco_setup(acl, 0x00);
1132 return sco;
1135 /* Check link security requirement */
1136 int hci_conn_check_link_mode(struct hci_conn *conn)
1138 BT_DBG("hcon %p", conn);
1140 /* In Secure Connections Only mode, it is required that Secure
1141 * Connections is used and the link is encrypted with AES-CCM
1142 * using a P-256 authenticated combination key.
1144 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1145 if (!hci_conn_sc_enabled(conn) ||
1146 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1147 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1148 return 0;
1151 if (hci_conn_ssp_enabled(conn) &&
1152 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1153 return 0;
1155 return 1;
1158 /* Authenticate remote device */
1159 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1161 BT_DBG("hcon %p", conn);
1163 if (conn->pending_sec_level > sec_level)
1164 sec_level = conn->pending_sec_level;
1166 if (sec_level > conn->sec_level)
1167 conn->pending_sec_level = sec_level;
1168 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1169 return 1;
1171 /* Make sure we preserve an existing MITM requirement*/
1172 auth_type |= (conn->auth_type & 0x01);
1174 conn->auth_type = auth_type;
1176 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1177 struct hci_cp_auth_requested cp;
1179 cp.handle = cpu_to_le16(conn->handle);
1180 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1181 sizeof(cp), &cp);
1183 /* If we're already encrypted set the REAUTH_PEND flag,
1184 * otherwise set the ENCRYPT_PEND.
1186 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1187 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1188 else
1189 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1192 return 0;
1195 /* Encrypt the the link */
1196 static void hci_conn_encrypt(struct hci_conn *conn)
1198 BT_DBG("hcon %p", conn);
1200 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1201 struct hci_cp_set_conn_encrypt cp;
1202 cp.handle = cpu_to_le16(conn->handle);
1203 cp.encrypt = 0x01;
1204 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1205 &cp);
1209 /* Enable security */
1210 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1211 bool initiator)
1213 BT_DBG("hcon %p", conn);
1215 if (conn->type == LE_LINK)
1216 return smp_conn_security(conn, sec_level);
1218 /* For sdp we don't need the link key. */
1219 if (sec_level == BT_SECURITY_SDP)
1220 return 1;
1222 /* For non 2.1 devices and low security level we don't need the link
1223 key. */
1224 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1225 return 1;
1227 /* For other security levels we need the link key. */
1228 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1229 goto auth;
1231 /* An authenticated FIPS approved combination key has sufficient
1232 * security for security level 4. */
1233 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1234 sec_level == BT_SECURITY_FIPS)
1235 goto encrypt;
1237 /* An authenticated combination key has sufficient security for
1238 security level 3. */
1239 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1240 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1241 sec_level == BT_SECURITY_HIGH)
1242 goto encrypt;
1244 /* An unauthenticated combination key has sufficient security for
1245 security level 1 and 2. */
1246 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1247 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1248 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1249 goto encrypt;
1251 /* A combination key has always sufficient security for the security
1252 levels 1 or 2. High security level requires the combination key
1253 is generated using maximum PIN code length (16).
1254 For pre 2.1 units. */
1255 if (conn->key_type == HCI_LK_COMBINATION &&
1256 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1257 conn->pin_length == 16))
1258 goto encrypt;
1260 auth:
1261 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1262 return 0;
1264 if (initiator)
1265 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1267 if (!hci_conn_auth(conn, sec_level, auth_type))
1268 return 0;
1270 encrypt:
1271 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1272 return 1;
1274 hci_conn_encrypt(conn);
1275 return 0;
1277 EXPORT_SYMBOL(hci_conn_security);
1279 /* Check secure link requirement */
1280 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1282 BT_DBG("hcon %p", conn);
1284 /* Accept if non-secure or higher security level is required */
1285 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1286 return 1;
1288 /* Accept if secure or higher security level is already present */
1289 if (conn->sec_level == BT_SECURITY_HIGH ||
1290 conn->sec_level == BT_SECURITY_FIPS)
1291 return 1;
1293 /* Reject not secure link */
1294 return 0;
1296 EXPORT_SYMBOL(hci_conn_check_secure);
1298 /* Switch role */
1299 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1301 BT_DBG("hcon %p", conn);
1303 if (role == conn->role)
1304 return 1;
1306 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1307 struct hci_cp_switch_role cp;
1308 bacpy(&cp.bdaddr, &conn->dst);
1309 cp.role = role;
1310 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1313 return 0;
1315 EXPORT_SYMBOL(hci_conn_switch_role);
1317 /* Enter active mode */
1318 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1320 struct hci_dev *hdev = conn->hdev;
1322 BT_DBG("hcon %p mode %d", conn, conn->mode);
1324 if (conn->mode != HCI_CM_SNIFF)
1325 goto timer;
1327 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1328 goto timer;
1330 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1331 struct hci_cp_exit_sniff_mode cp;
1332 cp.handle = cpu_to_le16(conn->handle);
1333 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1336 timer:
1337 if (hdev->idle_timeout > 0)
1338 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1339 msecs_to_jiffies(hdev->idle_timeout));
1342 /* Drop all connection on the device */
1343 void hci_conn_hash_flush(struct hci_dev *hdev)
1345 struct hci_conn_hash *h = &hdev->conn_hash;
1346 struct hci_conn *c, *n;
1348 BT_DBG("hdev %s", hdev->name);
1350 list_for_each_entry_safe(c, n, &h->list, list) {
1351 c->state = BT_CLOSED;
1353 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1354 hci_conn_del(c);
1358 /* Check pending connect attempts */
1359 void hci_conn_check_pending(struct hci_dev *hdev)
1361 struct hci_conn *conn;
1363 BT_DBG("hdev %s", hdev->name);
1365 hci_dev_lock(hdev);
1367 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1368 if (conn)
1369 hci_acl_create_connection(conn);
1371 hci_dev_unlock(hdev);
1374 static u32 get_link_mode(struct hci_conn *conn)
1376 u32 link_mode = 0;
1378 if (conn->role == HCI_ROLE_MASTER)
1379 link_mode |= HCI_LM_MASTER;
1381 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1382 link_mode |= HCI_LM_ENCRYPT;
1384 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1385 link_mode |= HCI_LM_AUTH;
1387 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1388 link_mode |= HCI_LM_SECURE;
1390 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1391 link_mode |= HCI_LM_FIPS;
1393 return link_mode;
1396 int hci_get_conn_list(void __user *arg)
1398 struct hci_conn *c;
1399 struct hci_conn_list_req req, *cl;
1400 struct hci_conn_info *ci;
1401 struct hci_dev *hdev;
1402 int n = 0, size, err;
1404 if (copy_from_user(&req, arg, sizeof(req)))
1405 return -EFAULT;
1407 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1408 return -EINVAL;
1410 size = sizeof(req) + req.conn_num * sizeof(*ci);
1412 cl = kmalloc(size, GFP_KERNEL);
1413 if (!cl)
1414 return -ENOMEM;
1416 hdev = hci_dev_get(req.dev_id);
1417 if (!hdev) {
1418 kfree(cl);
1419 return -ENODEV;
1422 ci = cl->conn_info;
1424 hci_dev_lock(hdev);
1425 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1426 bacpy(&(ci + n)->bdaddr, &c->dst);
1427 (ci + n)->handle = c->handle;
1428 (ci + n)->type = c->type;
1429 (ci + n)->out = c->out;
1430 (ci + n)->state = c->state;
1431 (ci + n)->link_mode = get_link_mode(c);
1432 if (++n >= req.conn_num)
1433 break;
1435 hci_dev_unlock(hdev);
1437 cl->dev_id = hdev->id;
1438 cl->conn_num = n;
1439 size = sizeof(req) + n * sizeof(*ci);
1441 hci_dev_put(hdev);
1443 err = copy_to_user(arg, cl, size);
1444 kfree(cl);
1446 return err ? -EFAULT : 0;
1449 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1451 struct hci_conn_info_req req;
1452 struct hci_conn_info ci;
1453 struct hci_conn *conn;
1454 char __user *ptr = arg + sizeof(req);
1456 if (copy_from_user(&req, arg, sizeof(req)))
1457 return -EFAULT;
1459 hci_dev_lock(hdev);
1460 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1461 if (conn) {
1462 bacpy(&ci.bdaddr, &conn->dst);
1463 ci.handle = conn->handle;
1464 ci.type = conn->type;
1465 ci.out = conn->out;
1466 ci.state = conn->state;
1467 ci.link_mode = get_link_mode(conn);
1469 hci_dev_unlock(hdev);
1471 if (!conn)
1472 return -ENOENT;
1474 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1477 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1479 struct hci_auth_info_req req;
1480 struct hci_conn *conn;
1482 if (copy_from_user(&req, arg, sizeof(req)))
1483 return -EFAULT;
1485 hci_dev_lock(hdev);
1486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1487 if (conn)
1488 req.type = conn->auth_type;
1489 hci_dev_unlock(hdev);
1491 if (!conn)
1492 return -ENOENT;
1494 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1497 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1499 struct hci_dev *hdev = conn->hdev;
1500 struct hci_chan *chan;
1502 BT_DBG("%s hcon %p", hdev->name, conn);
1504 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1505 BT_DBG("Refusing to create new hci_chan");
1506 return NULL;
1509 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1510 if (!chan)
1511 return NULL;
1513 chan->conn = hci_conn_get(conn);
1514 skb_queue_head_init(&chan->data_q);
1515 chan->state = BT_CONNECTED;
1517 list_add_rcu(&chan->list, &conn->chan_list);
1519 return chan;
1522 void hci_chan_del(struct hci_chan *chan)
1524 struct hci_conn *conn = chan->conn;
1525 struct hci_dev *hdev = conn->hdev;
1527 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1529 list_del_rcu(&chan->list);
1531 synchronize_rcu();
1533 /* Prevent new hci_chan's to be created for this hci_conn */
1534 set_bit(HCI_CONN_DROP, &conn->flags);
1536 hci_conn_put(conn);
1538 skb_queue_purge(&chan->data_q);
1539 kfree(chan);
1542 void hci_chan_list_flush(struct hci_conn *conn)
1544 struct hci_chan *chan, *n;
1546 BT_DBG("hcon %p", conn);
1548 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1549 hci_chan_del(chan);
1552 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1553 __u16 handle)
1555 struct hci_chan *hchan;
1557 list_for_each_entry(hchan, &hcon->chan_list, list) {
1558 if (hchan->handle == handle)
1559 return hchan;
1562 return NULL;
1565 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1567 struct hci_conn_hash *h = &hdev->conn_hash;
1568 struct hci_conn *hcon;
1569 struct hci_chan *hchan = NULL;
1571 rcu_read_lock();
1573 list_for_each_entry_rcu(hcon, &h->list, list) {
1574 hchan = __hci_chan_lookup_handle(hcon, handle);
1575 if (hchan)
1576 break;
1579 rcu_read_unlock();
1581 return hchan;