Merge tag 'fixes-for-v3.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / net / bluetooth / hci_conn.c
blobfe18825cc8a47ffba031dd1239aa98a7cea8a7cf
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
33 #include "smp.h"
34 #include "a2mp.h"
36 struct sco_param {
37 u16 pkt_type;
38 u16 max_latency;
39 u8 retrans_effort;
42 static const struct sco_param esco_param_cvsd[] = {
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
44 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
45 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
46 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
47 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
50 static const struct sco_param sco_param_cvsd[] = {
51 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
52 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
55 static const struct sco_param esco_param_msbc[] = {
56 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
57 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
60 static void hci_le_create_connection_cancel(struct hci_conn *conn)
62 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
65 static void hci_acl_create_connection(struct hci_conn *conn)
67 struct hci_dev *hdev = conn->hdev;
68 struct inquiry_entry *ie;
69 struct hci_cp_create_conn cp;
71 BT_DBG("hcon %p", conn);
73 conn->state = BT_CONNECT;
74 conn->out = true;
75 conn->role = HCI_ROLE_MASTER;
77 conn->attempt++;
79 conn->link_policy = hdev->link_policy;
81 memset(&cp, 0, sizeof(cp));
82 bacpy(&cp.bdaddr, &conn->dst);
83 cp.pscan_rep_mode = 0x02;
85 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
86 if (ie) {
87 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
88 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
89 cp.pscan_mode = ie->data.pscan_mode;
90 cp.clock_offset = ie->data.clock_offset |
91 cpu_to_le16(0x8000);
94 memcpy(conn->dev_class, ie->data.dev_class, 3);
95 if (ie->data.ssp_mode > 0)
96 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
99 cp.pkt_type = cpu_to_le16(conn->pkt_type);
100 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
101 cp.role_switch = 0x01;
102 else
103 cp.role_switch = 0x00;
105 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
108 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
110 struct hci_cp_create_conn_cancel cp;
112 BT_DBG("hcon %p", conn);
114 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
115 return;
117 bacpy(&cp.bdaddr, &conn->dst);
118 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
121 static void hci_reject_sco(struct hci_conn *conn)
123 struct hci_cp_reject_sync_conn_req cp;
125 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
126 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
131 int hci_disconnect(struct hci_conn *conn, __u8 reason)
133 struct hci_cp_disconnect cp;
135 BT_DBG("hcon %p", conn);
137 /* When we are master of an established connection and it enters
138 * the disconnect timeout, then go ahead and try to read the
139 * current clock offset. Processing of the result is done
140 * within the event handling and hci_clock_offset_evt function.
142 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
143 struct hci_dev *hdev = conn->hdev;
144 struct hci_cp_read_clock_offset clkoff_cp;
146 clkoff_cp.handle = cpu_to_le16(conn->handle);
147 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
148 &clkoff_cp);
151 conn->state = BT_DISCONN;
153 cp.handle = cpu_to_le16(conn->handle);
154 cp.reason = reason;
155 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
158 static void hci_amp_disconn(struct hci_conn *conn)
160 struct hci_cp_disconn_phy_link cp;
162 BT_DBG("hcon %p", conn);
164 conn->state = BT_DISCONN;
166 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
167 cp.reason = hci_proto_disconn_ind(conn);
168 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
169 sizeof(cp), &cp);
172 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
174 struct hci_dev *hdev = conn->hdev;
175 struct hci_cp_add_sco cp;
177 BT_DBG("hcon %p", conn);
179 conn->state = BT_CONNECT;
180 conn->out = true;
182 conn->attempt++;
184 cp.handle = cpu_to_le16(handle);
185 cp.pkt_type = cpu_to_le16(conn->pkt_type);
187 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
190 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
192 struct hci_dev *hdev = conn->hdev;
193 struct hci_cp_setup_sync_conn cp;
194 const struct sco_param *param;
196 BT_DBG("hcon %p", conn);
198 conn->state = BT_CONNECT;
199 conn->out = true;
201 conn->attempt++;
203 cp.handle = cpu_to_le16(handle);
205 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
206 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
207 cp.voice_setting = cpu_to_le16(conn->setting);
209 switch (conn->setting & SCO_AIRMODE_MASK) {
210 case SCO_AIRMODE_TRANSP:
211 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
212 return false;
213 param = &esco_param_msbc[conn->attempt - 1];
214 break;
215 case SCO_AIRMODE_CVSD:
216 if (lmp_esco_capable(conn->link)) {
217 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
218 return false;
219 param = &esco_param_cvsd[conn->attempt - 1];
220 } else {
221 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
222 return false;
223 param = &sco_param_cvsd[conn->attempt - 1];
225 break;
226 default:
227 return false;
230 cp.retrans_effort = param->retrans_effort;
231 cp.pkt_type = __cpu_to_le16(param->pkt_type);
232 cp.max_latency = __cpu_to_le16(param->max_latency);
234 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
235 return false;
237 return true;
240 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
241 u16 to_multiplier)
243 struct hci_dev *hdev = conn->hdev;
244 struct hci_conn_params *params;
245 struct hci_cp_le_conn_update cp;
247 hci_dev_lock(hdev);
249 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
250 if (params) {
251 params->conn_min_interval = min;
252 params->conn_max_interval = max;
253 params->conn_latency = latency;
254 params->supervision_timeout = to_multiplier;
257 hci_dev_unlock(hdev);
259 memset(&cp, 0, sizeof(cp));
260 cp.handle = cpu_to_le16(conn->handle);
261 cp.conn_interval_min = cpu_to_le16(min);
262 cp.conn_interval_max = cpu_to_le16(max);
263 cp.conn_latency = cpu_to_le16(latency);
264 cp.supervision_timeout = cpu_to_le16(to_multiplier);
265 cp.min_ce_len = cpu_to_le16(0x0000);
266 cp.max_ce_len = cpu_to_le16(0x0000);
268 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
270 if (params)
271 return 0x01;
273 return 0x00;
276 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
277 __u8 ltk[16])
279 struct hci_dev *hdev = conn->hdev;
280 struct hci_cp_le_start_enc cp;
282 BT_DBG("hcon %p", conn);
284 memset(&cp, 0, sizeof(cp));
286 cp.handle = cpu_to_le16(conn->handle);
287 cp.rand = rand;
288 cp.ediv = ediv;
289 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
291 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
294 /* Device _must_ be locked */
295 void hci_sco_setup(struct hci_conn *conn, __u8 status)
297 struct hci_conn *sco = conn->link;
299 if (!sco)
300 return;
302 BT_DBG("hcon %p", conn);
304 if (!status) {
305 if (lmp_esco_capable(conn->hdev))
306 hci_setup_sync(sco, conn->handle);
307 else
308 hci_add_sco(sco, conn->handle);
309 } else {
310 hci_proto_connect_cfm(sco, status);
311 hci_conn_del(sco);
315 static void hci_conn_timeout(struct work_struct *work)
317 struct hci_conn *conn = container_of(work, struct hci_conn,
318 disc_work.work);
319 int refcnt = atomic_read(&conn->refcnt);
321 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
323 WARN_ON(refcnt < 0);
325 /* FIXME: It was observed that in pairing failed scenario, refcnt
326 * drops below 0. Probably this is because l2cap_conn_del calls
327 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
328 * dropped. After that loop hci_chan_del is called which also drops
329 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
330 * otherwise drop it.
332 if (refcnt > 0)
333 return;
335 switch (conn->state) {
336 case BT_CONNECT:
337 case BT_CONNECT2:
338 if (conn->out) {
339 if (conn->type == ACL_LINK)
340 hci_acl_create_connection_cancel(conn);
341 else if (conn->type == LE_LINK)
342 hci_le_create_connection_cancel(conn);
343 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
344 hci_reject_sco(conn);
346 break;
347 case BT_CONFIG:
348 case BT_CONNECTED:
349 if (conn->type == AMP_LINK) {
350 hci_amp_disconn(conn);
351 } else {
352 __u8 reason = hci_proto_disconn_ind(conn);
353 hci_disconnect(conn, reason);
355 break;
356 default:
357 conn->state = BT_CLOSED;
358 break;
362 /* Enter sniff mode */
363 static void hci_conn_idle(struct work_struct *work)
365 struct hci_conn *conn = container_of(work, struct hci_conn,
366 idle_work.work);
367 struct hci_dev *hdev = conn->hdev;
369 BT_DBG("hcon %p mode %d", conn, conn->mode);
371 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
372 return;
374 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
375 return;
377 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
378 struct hci_cp_sniff_subrate cp;
379 cp.handle = cpu_to_le16(conn->handle);
380 cp.max_latency = cpu_to_le16(0);
381 cp.min_remote_timeout = cpu_to_le16(0);
382 cp.min_local_timeout = cpu_to_le16(0);
383 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
386 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
387 struct hci_cp_sniff_mode cp;
388 cp.handle = cpu_to_le16(conn->handle);
389 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
390 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
391 cp.attempt = cpu_to_le16(4);
392 cp.timeout = cpu_to_le16(1);
393 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
397 static void hci_conn_auto_accept(struct work_struct *work)
399 struct hci_conn *conn = container_of(work, struct hci_conn,
400 auto_accept_work.work);
402 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
403 &conn->dst);
406 static void le_conn_timeout(struct work_struct *work)
408 struct hci_conn *conn = container_of(work, struct hci_conn,
409 le_conn_timeout.work);
410 struct hci_dev *hdev = conn->hdev;
412 BT_DBG("");
414 /* We could end up here due to having done directed advertising,
415 * so clean up the state if necessary. This should however only
416 * happen with broken hardware or if low duty cycle was used
417 * (which doesn't have a timeout of its own).
419 if (conn->role == HCI_ROLE_SLAVE) {
420 u8 enable = 0x00;
421 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
422 &enable);
423 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
424 return;
427 hci_le_create_connection_cancel(conn);
430 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
431 u8 role)
433 struct hci_conn *conn;
435 BT_DBG("%s dst %pMR", hdev->name, dst);
437 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
438 if (!conn)
439 return NULL;
441 bacpy(&conn->dst, dst);
442 bacpy(&conn->src, &hdev->bdaddr);
443 conn->hdev = hdev;
444 conn->type = type;
445 conn->role = role;
446 conn->mode = HCI_CM_ACTIVE;
447 conn->state = BT_OPEN;
448 conn->auth_type = HCI_AT_GENERAL_BONDING;
449 conn->io_capability = hdev->io_capability;
450 conn->remote_auth = 0xff;
451 conn->key_type = 0xff;
452 conn->rssi = HCI_RSSI_INVALID;
453 conn->tx_power = HCI_TX_POWER_INVALID;
454 conn->max_tx_power = HCI_TX_POWER_INVALID;
456 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
457 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
459 if (conn->role == HCI_ROLE_MASTER)
460 conn->out = true;
462 switch (type) {
463 case ACL_LINK:
464 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
465 break;
466 case LE_LINK:
467 /* conn->src should reflect the local identity address */
468 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
469 break;
470 case SCO_LINK:
471 if (lmp_esco_capable(hdev))
472 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
473 (hdev->esco_type & EDR_ESCO_MASK);
474 else
475 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
476 break;
477 case ESCO_LINK:
478 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
479 break;
482 skb_queue_head_init(&conn->data_q);
484 INIT_LIST_HEAD(&conn->chan_list);
486 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
487 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
488 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
489 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
491 atomic_set(&conn->refcnt, 0);
493 hci_dev_hold(hdev);
495 hci_conn_hash_add(hdev, conn);
496 if (hdev->notify)
497 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
499 hci_conn_init_sysfs(conn);
501 return conn;
504 int hci_conn_del(struct hci_conn *conn)
506 struct hci_dev *hdev = conn->hdev;
508 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
510 cancel_delayed_work_sync(&conn->disc_work);
511 cancel_delayed_work_sync(&conn->auto_accept_work);
512 cancel_delayed_work_sync(&conn->idle_work);
514 if (conn->type == ACL_LINK) {
515 struct hci_conn *sco = conn->link;
516 if (sco)
517 sco->link = NULL;
519 /* Unacked frames */
520 hdev->acl_cnt += conn->sent;
521 } else if (conn->type == LE_LINK) {
522 cancel_delayed_work(&conn->le_conn_timeout);
524 if (hdev->le_pkts)
525 hdev->le_cnt += conn->sent;
526 else
527 hdev->acl_cnt += conn->sent;
528 } else {
529 struct hci_conn *acl = conn->link;
530 if (acl) {
531 acl->link = NULL;
532 hci_conn_drop(acl);
536 hci_chan_list_flush(conn);
538 if (conn->amp_mgr)
539 amp_mgr_put(conn->amp_mgr);
541 hci_conn_hash_del(hdev, conn);
542 if (hdev->notify)
543 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
545 skb_queue_purge(&conn->data_q);
547 hci_conn_del_sysfs(conn);
549 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
550 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
552 hci_dev_put(hdev);
554 hci_conn_put(conn);
556 return 0;
559 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
561 int use_src = bacmp(src, BDADDR_ANY);
562 struct hci_dev *hdev = NULL, *d;
564 BT_DBG("%pMR -> %pMR", src, dst);
566 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (!test_bit(HCI_UP, &d->flags) ||
570 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
571 d->dev_type != HCI_BREDR)
572 continue;
574 /* Simple routing:
575 * No source address - find interface with bdaddr != dst
576 * Source address - find interface with bdaddr == src
579 if (use_src) {
580 if (!bacmp(&d->bdaddr, src)) {
581 hdev = d; break;
583 } else {
584 if (bacmp(&d->bdaddr, dst)) {
585 hdev = d; break;
590 if (hdev)
591 hdev = hci_dev_hold(hdev);
593 read_unlock(&hci_dev_list_lock);
594 return hdev;
596 EXPORT_SYMBOL(hci_get_route);
598 /* This function requires the caller holds hdev->lock */
599 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
601 struct hci_dev *hdev = conn->hdev;
602 struct hci_conn_params *params;
604 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
605 conn->dst_type);
606 if (params && params->conn) {
607 hci_conn_drop(params->conn);
608 hci_conn_put(params->conn);
609 params->conn = NULL;
612 conn->state = BT_CLOSED;
614 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
615 status);
617 hci_proto_connect_cfm(conn, status);
619 hci_conn_del(conn);
621 /* Since we may have temporarily stopped the background scanning in
622 * favor of connection establishment, we should restart it.
624 hci_update_background_scan(hdev);
626 /* Re-enable advertising in case this was a failed connection
627 * attempt as a peripheral.
629 mgmt_reenable_advertising(hdev);
632 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
634 struct hci_conn *conn;
636 if (status == 0)
637 return;
639 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
640 status);
642 hci_dev_lock(hdev);
644 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
645 if (!conn)
646 goto done;
648 hci_le_conn_failed(conn, status);
650 done:
651 hci_dev_unlock(hdev);
654 static void hci_req_add_le_create_conn(struct hci_request *req,
655 struct hci_conn *conn)
657 struct hci_cp_le_create_conn cp;
658 struct hci_dev *hdev = conn->hdev;
659 u8 own_addr_type;
661 memset(&cp, 0, sizeof(cp));
663 /* Update random address, but set require_privacy to false so
664 * that we never connect with an non-resolvable address.
666 if (hci_update_random_address(req, false, &own_addr_type))
667 return;
669 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
670 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
671 bacpy(&cp.peer_addr, &conn->dst);
672 cp.peer_addr_type = conn->dst_type;
673 cp.own_address_type = own_addr_type;
674 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
675 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
676 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
677 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
678 cp.min_ce_len = cpu_to_le16(0x0000);
679 cp.max_ce_len = cpu_to_le16(0x0000);
681 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
683 conn->state = BT_CONNECT;
686 static void hci_req_directed_advertising(struct hci_request *req,
687 struct hci_conn *conn)
689 struct hci_dev *hdev = req->hdev;
690 struct hci_cp_le_set_adv_param cp;
691 u8 own_addr_type;
692 u8 enable;
694 /* Clear the HCI_LE_ADV bit temporarily so that the
695 * hci_update_random_address knows that it's safe to go ahead
696 * and write a new random address. The flag will be set back on
697 * as soon as the SET_ADV_ENABLE HCI command completes.
699 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
701 /* Set require_privacy to false so that the remote device has a
702 * chance of identifying us.
704 if (hci_update_random_address(req, false, &own_addr_type) < 0)
705 return;
707 memset(&cp, 0, sizeof(cp));
708 cp.type = LE_ADV_DIRECT_IND;
709 cp.own_address_type = own_addr_type;
710 cp.direct_addr_type = conn->dst_type;
711 bacpy(&cp.direct_addr, &conn->dst);
712 cp.channel_map = hdev->le_adv_channel_map;
714 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
716 enable = 0x01;
717 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
719 conn->state = BT_CONNECT;
722 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
723 u8 dst_type, u8 sec_level, u16 conn_timeout,
724 u8 role)
726 struct hci_conn_params *params;
727 struct hci_conn *conn;
728 struct smp_irk *irk;
729 struct hci_request req;
730 int err;
732 /* Some devices send ATT messages as soon as the physical link is
733 * established. To be able to handle these ATT messages, the user-
734 * space first establishes the connection and then starts the pairing
735 * process.
737 * So if a hci_conn object already exists for the following connection
738 * attempt, we simply update pending_sec_level and auth_type fields
739 * and return the object found.
741 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
742 if (conn) {
743 conn->pending_sec_level = sec_level;
744 goto done;
747 /* Since the controller supports only one LE connection attempt at a
748 * time, we return -EBUSY if there is any connection attempt running.
750 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
751 if (conn)
752 return ERR_PTR(-EBUSY);
754 /* When given an identity address with existing identity
755 * resolving key, the connection needs to be established
756 * to a resolvable random address.
758 * This uses the cached random resolvable address from
759 * a previous scan. When no cached address is available,
760 * try connecting to the identity address instead.
762 * Storing the resolvable random address is required here
763 * to handle connection failures. The address will later
764 * be resolved back into the original identity address
765 * from the connect request.
767 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
768 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
769 dst = &irk->rpa;
770 dst_type = ADDR_LE_DEV_RANDOM;
773 conn = hci_conn_add(hdev, LE_LINK, dst, role);
774 if (!conn)
775 return ERR_PTR(-ENOMEM);
777 conn->dst_type = dst_type;
778 conn->sec_level = BT_SECURITY_LOW;
779 conn->pending_sec_level = sec_level;
780 conn->conn_timeout = conn_timeout;
782 hci_req_init(&req, hdev);
784 /* Disable advertising if we're active. For master role
785 * connections most controllers will refuse to connect if
786 * advertising is enabled, and for slave role connections we
787 * anyway have to disable it in order to start directed
788 * advertising.
790 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
791 u8 enable = 0x00;
792 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
793 &enable);
796 /* If requested to connect as slave use directed advertising */
797 if (conn->role == HCI_ROLE_SLAVE) {
798 /* If we're active scanning most controllers are unable
799 * to initiate advertising. Simply reject the attempt.
801 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
802 hdev->le_scan_type == LE_SCAN_ACTIVE) {
803 skb_queue_purge(&req.cmd_q);
804 hci_conn_del(conn);
805 return ERR_PTR(-EBUSY);
808 hci_req_directed_advertising(&req, conn);
809 goto create_conn;
812 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
813 if (params) {
814 conn->le_conn_min_interval = params->conn_min_interval;
815 conn->le_conn_max_interval = params->conn_max_interval;
816 conn->le_conn_latency = params->conn_latency;
817 conn->le_supv_timeout = params->supervision_timeout;
818 } else {
819 conn->le_conn_min_interval = hdev->le_conn_min_interval;
820 conn->le_conn_max_interval = hdev->le_conn_max_interval;
821 conn->le_conn_latency = hdev->le_conn_latency;
822 conn->le_supv_timeout = hdev->le_supv_timeout;
825 /* If controller is scanning, we stop it since some controllers are
826 * not able to scan and connect at the same time. Also set the
827 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
828 * handler for scan disabling knows to set the correct discovery
829 * state.
831 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
832 hci_req_add_le_scan_disable(&req);
833 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
836 hci_req_add_le_create_conn(&req, conn);
838 create_conn:
839 err = hci_req_run(&req, create_le_conn_complete);
840 if (err) {
841 hci_conn_del(conn);
842 return ERR_PTR(err);
845 done:
846 hci_conn_hold(conn);
847 return conn;
850 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
851 u8 sec_level, u8 auth_type)
853 struct hci_conn *acl;
855 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
856 return ERR_PTR(-EOPNOTSUPP);
858 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
859 if (!acl) {
860 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
861 if (!acl)
862 return ERR_PTR(-ENOMEM);
865 hci_conn_hold(acl);
867 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
868 acl->sec_level = BT_SECURITY_LOW;
869 acl->pending_sec_level = sec_level;
870 acl->auth_type = auth_type;
871 hci_acl_create_connection(acl);
874 return acl;
877 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
878 __u16 setting)
880 struct hci_conn *acl;
881 struct hci_conn *sco;
883 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
884 if (IS_ERR(acl))
885 return acl;
887 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
888 if (!sco) {
889 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
890 if (!sco) {
891 hci_conn_drop(acl);
892 return ERR_PTR(-ENOMEM);
896 acl->link = sco;
897 sco->link = acl;
899 hci_conn_hold(sco);
901 sco->setting = setting;
903 if (acl->state == BT_CONNECTED &&
904 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
905 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
906 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
908 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
909 /* defer SCO setup until mode change completed */
910 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
911 return sco;
914 hci_sco_setup(acl, 0x00);
917 return sco;
920 /* Check link security requirement */
921 int hci_conn_check_link_mode(struct hci_conn *conn)
923 BT_DBG("hcon %p", conn);
925 /* In Secure Connections Only mode, it is required that Secure
926 * Connections is used and the link is encrypted with AES-CCM
927 * using a P-256 authenticated combination key.
929 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
930 if (!hci_conn_sc_enabled(conn) ||
931 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
932 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
933 return 0;
936 if (hci_conn_ssp_enabled(conn) &&
937 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
938 return 0;
940 return 1;
943 /* Authenticate remote device */
944 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
946 BT_DBG("hcon %p", conn);
948 if (conn->pending_sec_level > sec_level)
949 sec_level = conn->pending_sec_level;
951 if (sec_level > conn->sec_level)
952 conn->pending_sec_level = sec_level;
953 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
954 return 1;
956 /* Make sure we preserve an existing MITM requirement*/
957 auth_type |= (conn->auth_type & 0x01);
959 conn->auth_type = auth_type;
961 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
962 struct hci_cp_auth_requested cp;
964 cp.handle = cpu_to_le16(conn->handle);
965 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
966 sizeof(cp), &cp);
968 /* If we're already encrypted set the REAUTH_PEND flag,
969 * otherwise set the ENCRYPT_PEND.
971 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
972 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
973 else
974 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
977 return 0;
980 /* Encrypt the the link */
981 static void hci_conn_encrypt(struct hci_conn *conn)
983 BT_DBG("hcon %p", conn);
985 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
986 struct hci_cp_set_conn_encrypt cp;
987 cp.handle = cpu_to_le16(conn->handle);
988 cp.encrypt = 0x01;
989 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
990 &cp);
994 /* Enable security */
995 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
996 bool initiator)
998 BT_DBG("hcon %p", conn);
1000 if (conn->type == LE_LINK)
1001 return smp_conn_security(conn, sec_level);
1003 /* For sdp we don't need the link key. */
1004 if (sec_level == BT_SECURITY_SDP)
1005 return 1;
1007 /* For non 2.1 devices and low security level we don't need the link
1008 key. */
1009 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1010 return 1;
1012 /* For other security levels we need the link key. */
1013 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1014 goto auth;
1016 /* An authenticated FIPS approved combination key has sufficient
1017 * security for security level 4. */
1018 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1019 sec_level == BT_SECURITY_FIPS)
1020 goto encrypt;
1022 /* An authenticated combination key has sufficient security for
1023 security level 3. */
1024 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1025 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1026 sec_level == BT_SECURITY_HIGH)
1027 goto encrypt;
1029 /* An unauthenticated combination key has sufficient security for
1030 security level 1 and 2. */
1031 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1032 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1033 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1034 goto encrypt;
1036 /* A combination key has always sufficient security for the security
1037 levels 1 or 2. High security level requires the combination key
1038 is generated using maximum PIN code length (16).
1039 For pre 2.1 units. */
1040 if (conn->key_type == HCI_LK_COMBINATION &&
1041 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1042 conn->pin_length == 16))
1043 goto encrypt;
1045 auth:
1046 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1047 return 0;
1049 if (initiator)
1050 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1052 if (!hci_conn_auth(conn, sec_level, auth_type))
1053 return 0;
1055 encrypt:
1056 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1057 return 1;
1059 hci_conn_encrypt(conn);
1060 return 0;
1062 EXPORT_SYMBOL(hci_conn_security);
1064 /* Check secure link requirement */
1065 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1067 BT_DBG("hcon %p", conn);
1069 /* Accept if non-secure or higher security level is required */
1070 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1071 return 1;
1073 /* Accept if secure or higher security level is already present */
1074 if (conn->sec_level == BT_SECURITY_HIGH ||
1075 conn->sec_level == BT_SECURITY_FIPS)
1076 return 1;
1078 /* Reject not secure link */
1079 return 0;
1081 EXPORT_SYMBOL(hci_conn_check_secure);
1083 /* Change link key */
1084 int hci_conn_change_link_key(struct hci_conn *conn)
1086 BT_DBG("hcon %p", conn);
1088 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1089 struct hci_cp_change_conn_link_key cp;
1090 cp.handle = cpu_to_le16(conn->handle);
1091 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1092 sizeof(cp), &cp);
1095 return 0;
1098 /* Switch role */
1099 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1101 BT_DBG("hcon %p", conn);
1103 if (role == conn->role)
1104 return 1;
1106 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1107 struct hci_cp_switch_role cp;
1108 bacpy(&cp.bdaddr, &conn->dst);
1109 cp.role = role;
1110 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1113 return 0;
1115 EXPORT_SYMBOL(hci_conn_switch_role);
1117 /* Enter active mode */
1118 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1120 struct hci_dev *hdev = conn->hdev;
1122 BT_DBG("hcon %p mode %d", conn, conn->mode);
1124 if (conn->mode != HCI_CM_SNIFF)
1125 goto timer;
1127 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1128 goto timer;
1130 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1131 struct hci_cp_exit_sniff_mode cp;
1132 cp.handle = cpu_to_le16(conn->handle);
1133 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1136 timer:
1137 if (hdev->idle_timeout > 0)
1138 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1139 msecs_to_jiffies(hdev->idle_timeout));
1142 /* Drop all connection on the device */
1143 void hci_conn_hash_flush(struct hci_dev *hdev)
1145 struct hci_conn_hash *h = &hdev->conn_hash;
1146 struct hci_conn *c, *n;
1148 BT_DBG("hdev %s", hdev->name);
1150 list_for_each_entry_safe(c, n, &h->list, list) {
1151 c->state = BT_CLOSED;
1153 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1154 hci_conn_del(c);
1158 /* Check pending connect attempts */
1159 void hci_conn_check_pending(struct hci_dev *hdev)
1161 struct hci_conn *conn;
1163 BT_DBG("hdev %s", hdev->name);
1165 hci_dev_lock(hdev);
1167 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1168 if (conn)
1169 hci_acl_create_connection(conn);
1171 hci_dev_unlock(hdev);
1174 static u32 get_link_mode(struct hci_conn *conn)
1176 u32 link_mode = 0;
1178 if (conn->role == HCI_ROLE_MASTER)
1179 link_mode |= HCI_LM_MASTER;
1181 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1182 link_mode |= HCI_LM_ENCRYPT;
1184 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1185 link_mode |= HCI_LM_AUTH;
1187 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1188 link_mode |= HCI_LM_SECURE;
1190 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1191 link_mode |= HCI_LM_FIPS;
1193 return link_mode;
1196 int hci_get_conn_list(void __user *arg)
1198 struct hci_conn *c;
1199 struct hci_conn_list_req req, *cl;
1200 struct hci_conn_info *ci;
1201 struct hci_dev *hdev;
1202 int n = 0, size, err;
1204 if (copy_from_user(&req, arg, sizeof(req)))
1205 return -EFAULT;
1207 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1208 return -EINVAL;
1210 size = sizeof(req) + req.conn_num * sizeof(*ci);
1212 cl = kmalloc(size, GFP_KERNEL);
1213 if (!cl)
1214 return -ENOMEM;
1216 hdev = hci_dev_get(req.dev_id);
1217 if (!hdev) {
1218 kfree(cl);
1219 return -ENODEV;
1222 ci = cl->conn_info;
1224 hci_dev_lock(hdev);
1225 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1226 bacpy(&(ci + n)->bdaddr, &c->dst);
1227 (ci + n)->handle = c->handle;
1228 (ci + n)->type = c->type;
1229 (ci + n)->out = c->out;
1230 (ci + n)->state = c->state;
1231 (ci + n)->link_mode = get_link_mode(c);
1232 if (++n >= req.conn_num)
1233 break;
1235 hci_dev_unlock(hdev);
1237 cl->dev_id = hdev->id;
1238 cl->conn_num = n;
1239 size = sizeof(req) + n * sizeof(*ci);
1241 hci_dev_put(hdev);
1243 err = copy_to_user(arg, cl, size);
1244 kfree(cl);
1246 return err ? -EFAULT : 0;
1249 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1251 struct hci_conn_info_req req;
1252 struct hci_conn_info ci;
1253 struct hci_conn *conn;
1254 char __user *ptr = arg + sizeof(req);
1256 if (copy_from_user(&req, arg, sizeof(req)))
1257 return -EFAULT;
1259 hci_dev_lock(hdev);
1260 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1261 if (conn) {
1262 bacpy(&ci.bdaddr, &conn->dst);
1263 ci.handle = conn->handle;
1264 ci.type = conn->type;
1265 ci.out = conn->out;
1266 ci.state = conn->state;
1267 ci.link_mode = get_link_mode(conn);
1269 hci_dev_unlock(hdev);
1271 if (!conn)
1272 return -ENOENT;
1274 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1277 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1279 struct hci_auth_info_req req;
1280 struct hci_conn *conn;
1282 if (copy_from_user(&req, arg, sizeof(req)))
1283 return -EFAULT;
1285 hci_dev_lock(hdev);
1286 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1287 if (conn)
1288 req.type = conn->auth_type;
1289 hci_dev_unlock(hdev);
1291 if (!conn)
1292 return -ENOENT;
1294 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1297 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1299 struct hci_dev *hdev = conn->hdev;
1300 struct hci_chan *chan;
1302 BT_DBG("%s hcon %p", hdev->name, conn);
1304 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1305 BT_DBG("Refusing to create new hci_chan");
1306 return NULL;
1309 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1310 if (!chan)
1311 return NULL;
1313 chan->conn = hci_conn_get(conn);
1314 skb_queue_head_init(&chan->data_q);
1315 chan->state = BT_CONNECTED;
1317 list_add_rcu(&chan->list, &conn->chan_list);
1319 return chan;
1322 void hci_chan_del(struct hci_chan *chan)
1324 struct hci_conn *conn = chan->conn;
1325 struct hci_dev *hdev = conn->hdev;
1327 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1329 list_del_rcu(&chan->list);
1331 synchronize_rcu();
1333 /* Prevent new hci_chan's to be created for this hci_conn */
1334 set_bit(HCI_CONN_DROP, &conn->flags);
1336 hci_conn_put(conn);
1338 skb_queue_purge(&chan->data_q);
1339 kfree(chan);
1342 void hci_chan_list_flush(struct hci_conn *conn)
1344 struct hci_chan *chan, *n;
1346 BT_DBG("hcon %p", conn);
1348 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1349 hci_chan_del(chan);
1352 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1353 __u16 handle)
1355 struct hci_chan *hchan;
1357 list_for_each_entry(hchan, &hcon->chan_list, list) {
1358 if (hchan->handle == handle)
1359 return hchan;
1362 return NULL;
1365 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1367 struct hci_conn_hash *h = &hdev->conn_hash;
1368 struct hci_conn *hcon;
1369 struct hci_chan *hchan = NULL;
1371 rcu_read_lock();
1373 list_for_each_entry_rcu(hcon, &h->list, list) {
1374 hchan = __hci_chan_lookup_handle(hcon, handle);
1375 if (hchan)
1376 break;
1379 rcu_read_unlock();
1381 return hchan;