Btrfs: fix list transaction->pending_ordered corruption
[linux/fpc-iii.git] / net / bluetooth / hci_conn.c
blobb45eb243a5eed74d320b7381a83ecd3cae75d1df
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
33 #include "smp.h"
34 #include "a2mp.h"
36 struct sco_param {
37 u16 pkt_type;
38 u16 max_latency;
39 u8 retrans_effort;
42 static const struct sco_param esco_param_cvsd[] = {
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
44 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
45 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
46 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
47 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
50 static const struct sco_param sco_param_cvsd[] = {
51 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
52 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
55 static const struct sco_param esco_param_msbc[] = {
56 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
57 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
60 static void hci_le_create_connection_cancel(struct hci_conn *conn)
62 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
65 static void hci_acl_create_connection(struct hci_conn *conn)
67 struct hci_dev *hdev = conn->hdev;
68 struct inquiry_entry *ie;
69 struct hci_cp_create_conn cp;
71 BT_DBG("hcon %p", conn);
73 conn->state = BT_CONNECT;
74 conn->out = true;
75 conn->role = HCI_ROLE_MASTER;
77 conn->attempt++;
79 conn->link_policy = hdev->link_policy;
81 memset(&cp, 0, sizeof(cp));
82 bacpy(&cp.bdaddr, &conn->dst);
83 cp.pscan_rep_mode = 0x02;
85 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
86 if (ie) {
87 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
88 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
89 cp.pscan_mode = ie->data.pscan_mode;
90 cp.clock_offset = ie->data.clock_offset |
91 cpu_to_le16(0x8000);
94 memcpy(conn->dev_class, ie->data.dev_class, 3);
95 if (ie->data.ssp_mode > 0)
96 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
99 cp.pkt_type = cpu_to_le16(conn->pkt_type);
100 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
101 cp.role_switch = 0x01;
102 else
103 cp.role_switch = 0x00;
105 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
108 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
110 struct hci_cp_create_conn_cancel cp;
112 BT_DBG("hcon %p", conn);
114 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
115 return;
117 bacpy(&cp.bdaddr, &conn->dst);
118 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
121 static void hci_reject_sco(struct hci_conn *conn)
123 struct hci_cp_reject_sync_conn_req cp;
125 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
126 bacpy(&cp.bdaddr, &conn->dst);
128 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
131 int hci_disconnect(struct hci_conn *conn, __u8 reason)
133 struct hci_cp_disconnect cp;
135 BT_DBG("hcon %p", conn);
137 /* When we are master of an established connection and it enters
138 * the disconnect timeout, then go ahead and try to read the
139 * current clock offset. Processing of the result is done
140 * within the event handling and hci_clock_offset_evt function.
142 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
143 struct hci_dev *hdev = conn->hdev;
144 struct hci_cp_read_clock_offset cp;
146 cp.handle = cpu_to_le16(conn->handle);
147 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp);
150 conn->state = BT_DISCONN;
152 cp.handle = cpu_to_le16(conn->handle);
153 cp.reason = reason;
154 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
157 static void hci_amp_disconn(struct hci_conn *conn)
159 struct hci_cp_disconn_phy_link cp;
161 BT_DBG("hcon %p", conn);
163 conn->state = BT_DISCONN;
165 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
166 cp.reason = hci_proto_disconn_ind(conn);
167 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
168 sizeof(cp), &cp);
171 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
173 struct hci_dev *hdev = conn->hdev;
174 struct hci_cp_add_sco cp;
176 BT_DBG("hcon %p", conn);
178 conn->state = BT_CONNECT;
179 conn->out = true;
181 conn->attempt++;
183 cp.handle = cpu_to_le16(handle);
184 cp.pkt_type = cpu_to_le16(conn->pkt_type);
186 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
189 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
191 struct hci_dev *hdev = conn->hdev;
192 struct hci_cp_setup_sync_conn cp;
193 const struct sco_param *param;
195 BT_DBG("hcon %p", conn);
197 conn->state = BT_CONNECT;
198 conn->out = true;
200 conn->attempt++;
202 cp.handle = cpu_to_le16(handle);
204 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
205 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
206 cp.voice_setting = cpu_to_le16(conn->setting);
208 switch (conn->setting & SCO_AIRMODE_MASK) {
209 case SCO_AIRMODE_TRANSP:
210 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
211 return false;
212 param = &esco_param_msbc[conn->attempt - 1];
213 break;
214 case SCO_AIRMODE_CVSD:
215 if (lmp_esco_capable(conn->link)) {
216 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
217 return false;
218 param = &esco_param_cvsd[conn->attempt - 1];
219 } else {
220 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
221 return false;
222 param = &sco_param_cvsd[conn->attempt - 1];
224 break;
225 default:
226 return false;
229 cp.retrans_effort = param->retrans_effort;
230 cp.pkt_type = __cpu_to_le16(param->pkt_type);
231 cp.max_latency = __cpu_to_le16(param->max_latency);
233 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
234 return false;
236 return true;
239 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
240 u16 to_multiplier)
242 struct hci_dev *hdev = conn->hdev;
243 struct hci_conn_params *params;
244 struct hci_cp_le_conn_update cp;
246 hci_dev_lock(hdev);
248 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
249 if (params) {
250 params->conn_min_interval = min;
251 params->conn_max_interval = max;
252 params->conn_latency = latency;
253 params->supervision_timeout = to_multiplier;
256 hci_dev_unlock(hdev);
258 memset(&cp, 0, sizeof(cp));
259 cp.handle = cpu_to_le16(conn->handle);
260 cp.conn_interval_min = cpu_to_le16(min);
261 cp.conn_interval_max = cpu_to_le16(max);
262 cp.conn_latency = cpu_to_le16(latency);
263 cp.supervision_timeout = cpu_to_le16(to_multiplier);
264 cp.min_ce_len = cpu_to_le16(0x0000);
265 cp.max_ce_len = cpu_to_le16(0x0000);
267 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
269 if (params)
270 return 0x01;
272 return 0x00;
275 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
276 __u8 ltk[16])
278 struct hci_dev *hdev = conn->hdev;
279 struct hci_cp_le_start_enc cp;
281 BT_DBG("hcon %p", conn);
283 memset(&cp, 0, sizeof(cp));
285 cp.handle = cpu_to_le16(conn->handle);
286 cp.rand = rand;
287 cp.ediv = ediv;
288 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
290 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
293 /* Device _must_ be locked */
294 void hci_sco_setup(struct hci_conn *conn, __u8 status)
296 struct hci_conn *sco = conn->link;
298 if (!sco)
299 return;
301 BT_DBG("hcon %p", conn);
303 if (!status) {
304 if (lmp_esco_capable(conn->hdev))
305 hci_setup_sync(sco, conn->handle);
306 else
307 hci_add_sco(sco, conn->handle);
308 } else {
309 hci_proto_connect_cfm(sco, status);
310 hci_conn_del(sco);
314 static void hci_conn_timeout(struct work_struct *work)
316 struct hci_conn *conn = container_of(work, struct hci_conn,
317 disc_work.work);
318 int refcnt = atomic_read(&conn->refcnt);
320 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
322 WARN_ON(refcnt < 0);
324 /* FIXME: It was observed that in pairing failed scenario, refcnt
325 * drops below 0. Probably this is because l2cap_conn_del calls
326 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
327 * dropped. After that loop hci_chan_del is called which also drops
328 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
329 * otherwise drop it.
331 if (refcnt > 0)
332 return;
334 switch (conn->state) {
335 case BT_CONNECT:
336 case BT_CONNECT2:
337 if (conn->out) {
338 if (conn->type == ACL_LINK)
339 hci_acl_create_connection_cancel(conn);
340 else if (conn->type == LE_LINK)
341 hci_le_create_connection_cancel(conn);
342 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
343 hci_reject_sco(conn);
345 break;
346 case BT_CONFIG:
347 case BT_CONNECTED:
348 if (conn->type == AMP_LINK) {
349 hci_amp_disconn(conn);
350 } else {
351 __u8 reason = hci_proto_disconn_ind(conn);
352 hci_disconnect(conn, reason);
354 break;
355 default:
356 conn->state = BT_CLOSED;
357 break;
361 /* Enter sniff mode */
362 static void hci_conn_idle(struct work_struct *work)
364 struct hci_conn *conn = container_of(work, struct hci_conn,
365 idle_work.work);
366 struct hci_dev *hdev = conn->hdev;
368 BT_DBG("hcon %p mode %d", conn, conn->mode);
370 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
371 return;
373 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
374 return;
376 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
377 struct hci_cp_sniff_subrate cp;
378 cp.handle = cpu_to_le16(conn->handle);
379 cp.max_latency = cpu_to_le16(0);
380 cp.min_remote_timeout = cpu_to_le16(0);
381 cp.min_local_timeout = cpu_to_le16(0);
382 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
385 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
386 struct hci_cp_sniff_mode cp;
387 cp.handle = cpu_to_le16(conn->handle);
388 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
389 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
390 cp.attempt = cpu_to_le16(4);
391 cp.timeout = cpu_to_le16(1);
392 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
396 static void hci_conn_auto_accept(struct work_struct *work)
398 struct hci_conn *conn = container_of(work, struct hci_conn,
399 auto_accept_work.work);
401 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
402 &conn->dst);
405 static void le_conn_timeout(struct work_struct *work)
407 struct hci_conn *conn = container_of(work, struct hci_conn,
408 le_conn_timeout.work);
409 struct hci_dev *hdev = conn->hdev;
411 BT_DBG("");
413 /* We could end up here due to having done directed advertising,
414 * so clean up the state if necessary. This should however only
415 * happen with broken hardware or if low duty cycle was used
416 * (which doesn't have a timeout of its own).
418 if (conn->role == HCI_ROLE_SLAVE) {
419 u8 enable = 0x00;
420 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
421 &enable);
422 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
423 return;
426 hci_le_create_connection_cancel(conn);
429 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
430 u8 role)
432 struct hci_conn *conn;
434 BT_DBG("%s dst %pMR", hdev->name, dst);
436 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
437 if (!conn)
438 return NULL;
440 bacpy(&conn->dst, dst);
441 bacpy(&conn->src, &hdev->bdaddr);
442 conn->hdev = hdev;
443 conn->type = type;
444 conn->role = role;
445 conn->mode = HCI_CM_ACTIVE;
446 conn->state = BT_OPEN;
447 conn->auth_type = HCI_AT_GENERAL_BONDING;
448 conn->io_capability = hdev->io_capability;
449 conn->remote_auth = 0xff;
450 conn->key_type = 0xff;
451 conn->tx_power = HCI_TX_POWER_INVALID;
452 conn->max_tx_power = HCI_TX_POWER_INVALID;
454 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
455 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
457 if (conn->role == HCI_ROLE_MASTER)
458 conn->out = true;
460 switch (type) {
461 case ACL_LINK:
462 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
463 break;
464 case LE_LINK:
465 /* conn->src should reflect the local identity address */
466 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
467 break;
468 case SCO_LINK:
469 if (lmp_esco_capable(hdev))
470 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
471 (hdev->esco_type & EDR_ESCO_MASK);
472 else
473 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
474 break;
475 case ESCO_LINK:
476 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
477 break;
480 skb_queue_head_init(&conn->data_q);
482 INIT_LIST_HEAD(&conn->chan_list);
484 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
485 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
486 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
487 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
489 atomic_set(&conn->refcnt, 0);
491 hci_dev_hold(hdev);
493 hci_conn_hash_add(hdev, conn);
494 if (hdev->notify)
495 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
497 hci_conn_init_sysfs(conn);
499 return conn;
502 int hci_conn_del(struct hci_conn *conn)
504 struct hci_dev *hdev = conn->hdev;
506 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
508 cancel_delayed_work_sync(&conn->disc_work);
509 cancel_delayed_work_sync(&conn->auto_accept_work);
510 cancel_delayed_work_sync(&conn->idle_work);
512 if (conn->type == ACL_LINK) {
513 struct hci_conn *sco = conn->link;
514 if (sco)
515 sco->link = NULL;
517 /* Unacked frames */
518 hdev->acl_cnt += conn->sent;
519 } else if (conn->type == LE_LINK) {
520 cancel_delayed_work(&conn->le_conn_timeout);
522 if (hdev->le_pkts)
523 hdev->le_cnt += conn->sent;
524 else
525 hdev->acl_cnt += conn->sent;
526 } else {
527 struct hci_conn *acl = conn->link;
528 if (acl) {
529 acl->link = NULL;
530 hci_conn_drop(acl);
534 hci_chan_list_flush(conn);
536 if (conn->amp_mgr)
537 amp_mgr_put(conn->amp_mgr);
539 hci_conn_hash_del(hdev, conn);
540 if (hdev->notify)
541 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
543 skb_queue_purge(&conn->data_q);
545 hci_conn_del_sysfs(conn);
547 hci_dev_put(hdev);
549 hci_conn_put(conn);
551 return 0;
554 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
556 int use_src = bacmp(src, BDADDR_ANY);
557 struct hci_dev *hdev = NULL, *d;
559 BT_DBG("%pMR -> %pMR", src, dst);
561 read_lock(&hci_dev_list_lock);
563 list_for_each_entry(d, &hci_dev_list, list) {
564 if (!test_bit(HCI_UP, &d->flags) ||
565 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
566 d->dev_type != HCI_BREDR)
567 continue;
569 /* Simple routing:
570 * No source address - find interface with bdaddr != dst
571 * Source address - find interface with bdaddr == src
574 if (use_src) {
575 if (!bacmp(&d->bdaddr, src)) {
576 hdev = d; break;
578 } else {
579 if (bacmp(&d->bdaddr, dst)) {
580 hdev = d; break;
585 if (hdev)
586 hdev = hci_dev_hold(hdev);
588 read_unlock(&hci_dev_list_lock);
589 return hdev;
591 EXPORT_SYMBOL(hci_get_route);
593 /* This function requires the caller holds hdev->lock */
594 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
596 struct hci_dev *hdev = conn->hdev;
597 struct hci_conn_params *params;
599 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
600 conn->dst_type);
601 if (params && params->conn) {
602 hci_conn_drop(params->conn);
603 hci_conn_put(params->conn);
604 params->conn = NULL;
607 conn->state = BT_CLOSED;
609 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
610 status);
612 hci_proto_connect_cfm(conn, status);
614 hci_conn_del(conn);
616 /* Since we may have temporarily stopped the background scanning in
617 * favor of connection establishment, we should restart it.
619 hci_update_background_scan(hdev);
621 /* Re-enable advertising in case this was a failed connection
622 * attempt as a peripheral.
624 mgmt_reenable_advertising(hdev);
627 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
629 struct hci_conn *conn;
631 if (status == 0)
632 return;
634 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
635 status);
637 hci_dev_lock(hdev);
639 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
640 if (!conn)
641 goto done;
643 hci_le_conn_failed(conn, status);
645 done:
646 hci_dev_unlock(hdev);
649 static void hci_req_add_le_create_conn(struct hci_request *req,
650 struct hci_conn *conn)
652 struct hci_cp_le_create_conn cp;
653 struct hci_dev *hdev = conn->hdev;
654 u8 own_addr_type;
656 memset(&cp, 0, sizeof(cp));
658 /* Update random address, but set require_privacy to false so
659 * that we never connect with an unresolvable address.
661 if (hci_update_random_address(req, false, &own_addr_type))
662 return;
664 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
665 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
666 bacpy(&cp.peer_addr, &conn->dst);
667 cp.peer_addr_type = conn->dst_type;
668 cp.own_address_type = own_addr_type;
669 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
670 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
671 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
672 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
673 cp.min_ce_len = cpu_to_le16(0x0000);
674 cp.max_ce_len = cpu_to_le16(0x0000);
676 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
678 conn->state = BT_CONNECT;
681 static void hci_req_directed_advertising(struct hci_request *req,
682 struct hci_conn *conn)
684 struct hci_dev *hdev = req->hdev;
685 struct hci_cp_le_set_adv_param cp;
686 u8 own_addr_type;
687 u8 enable;
689 /* Clear the HCI_LE_ADV bit temporarily so that the
690 * hci_update_random_address knows that it's safe to go ahead
691 * and write a new random address. The flag will be set back on
692 * as soon as the SET_ADV_ENABLE HCI command completes.
694 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
696 /* Set require_privacy to false so that the remote device has a
697 * chance of identifying us.
699 if (hci_update_random_address(req, false, &own_addr_type) < 0)
700 return;
702 memset(&cp, 0, sizeof(cp));
703 cp.type = LE_ADV_DIRECT_IND;
704 cp.own_address_type = own_addr_type;
705 cp.direct_addr_type = conn->dst_type;
706 bacpy(&cp.direct_addr, &conn->dst);
707 cp.channel_map = hdev->le_adv_channel_map;
709 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
711 enable = 0x01;
712 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
714 conn->state = BT_CONNECT;
717 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
718 u8 dst_type, u8 sec_level, u16 conn_timeout,
719 u8 role)
721 struct hci_conn_params *params;
722 struct hci_conn *conn;
723 struct smp_irk *irk;
724 struct hci_request req;
725 int err;
727 /* Some devices send ATT messages as soon as the physical link is
728 * established. To be able to handle these ATT messages, the user-
729 * space first establishes the connection and then starts the pairing
730 * process.
732 * So if a hci_conn object already exists for the following connection
733 * attempt, we simply update pending_sec_level and auth_type fields
734 * and return the object found.
736 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
737 if (conn) {
738 conn->pending_sec_level = sec_level;
739 goto done;
742 /* Since the controller supports only one LE connection attempt at a
743 * time, we return -EBUSY if there is any connection attempt running.
745 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
746 if (conn)
747 return ERR_PTR(-EBUSY);
749 /* When given an identity address with existing identity
750 * resolving key, the connection needs to be established
751 * to a resolvable random address.
753 * This uses the cached random resolvable address from
754 * a previous scan. When no cached address is available,
755 * try connecting to the identity address instead.
757 * Storing the resolvable random address is required here
758 * to handle connection failures. The address will later
759 * be resolved back into the original identity address
760 * from the connect request.
762 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
763 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
764 dst = &irk->rpa;
765 dst_type = ADDR_LE_DEV_RANDOM;
768 conn = hci_conn_add(hdev, LE_LINK, dst, role);
769 if (!conn)
770 return ERR_PTR(-ENOMEM);
772 conn->dst_type = dst_type;
773 conn->sec_level = BT_SECURITY_LOW;
774 conn->pending_sec_level = sec_level;
775 conn->conn_timeout = conn_timeout;
777 hci_req_init(&req, hdev);
779 /* Disable advertising if we're active. For master role
780 * connections most controllers will refuse to connect if
781 * advertising is enabled, and for slave role connections we
782 * anyway have to disable it in order to start directed
783 * advertising.
785 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
786 u8 enable = 0x00;
787 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
788 &enable);
791 /* If requested to connect as slave use directed advertising */
792 if (conn->role == HCI_ROLE_SLAVE) {
793 /* If we're active scanning most controllers are unable
794 * to initiate advertising. Simply reject the attempt.
796 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
797 hdev->le_scan_type == LE_SCAN_ACTIVE) {
798 skb_queue_purge(&req.cmd_q);
799 hci_conn_del(conn);
800 return ERR_PTR(-EBUSY);
803 hci_req_directed_advertising(&req, conn);
804 goto create_conn;
807 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
808 if (params) {
809 conn->le_conn_min_interval = params->conn_min_interval;
810 conn->le_conn_max_interval = params->conn_max_interval;
811 conn->le_conn_latency = params->conn_latency;
812 conn->le_supv_timeout = params->supervision_timeout;
813 } else {
814 conn->le_conn_min_interval = hdev->le_conn_min_interval;
815 conn->le_conn_max_interval = hdev->le_conn_max_interval;
816 conn->le_conn_latency = hdev->le_conn_latency;
817 conn->le_supv_timeout = hdev->le_supv_timeout;
820 /* If controller is scanning, we stop it since some controllers are
821 * not able to scan and connect at the same time. Also set the
822 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
823 * handler for scan disabling knows to set the correct discovery
824 * state.
826 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
827 hci_req_add_le_scan_disable(&req);
828 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
831 hci_req_add_le_create_conn(&req, conn);
833 create_conn:
834 err = hci_req_run(&req, create_le_conn_complete);
835 if (err) {
836 hci_conn_del(conn);
837 return ERR_PTR(err);
840 done:
841 hci_conn_hold(conn);
842 return conn;
845 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
846 u8 sec_level, u8 auth_type)
848 struct hci_conn *acl;
850 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
851 return ERR_PTR(-EOPNOTSUPP);
853 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
854 if (!acl) {
855 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
856 if (!acl)
857 return ERR_PTR(-ENOMEM);
860 hci_conn_hold(acl);
862 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
863 acl->sec_level = BT_SECURITY_LOW;
864 acl->pending_sec_level = sec_level;
865 acl->auth_type = auth_type;
866 hci_acl_create_connection(acl);
869 return acl;
872 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
873 __u16 setting)
875 struct hci_conn *acl;
876 struct hci_conn *sco;
878 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
879 if (IS_ERR(acl))
880 return acl;
882 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
883 if (!sco) {
884 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
885 if (!sco) {
886 hci_conn_drop(acl);
887 return ERR_PTR(-ENOMEM);
891 acl->link = sco;
892 sco->link = acl;
894 hci_conn_hold(sco);
896 sco->setting = setting;
898 if (acl->state == BT_CONNECTED &&
899 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
900 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
901 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
903 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
904 /* defer SCO setup until mode change completed */
905 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
906 return sco;
909 hci_sco_setup(acl, 0x00);
912 return sco;
915 /* Check link security requirement */
916 int hci_conn_check_link_mode(struct hci_conn *conn)
918 BT_DBG("hcon %p", conn);
920 /* In Secure Connections Only mode, it is required that Secure
921 * Connections is used and the link is encrypted with AES-CCM
922 * using a P-256 authenticated combination key.
924 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
925 if (!hci_conn_sc_enabled(conn) ||
926 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
927 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
928 return 0;
931 if (hci_conn_ssp_enabled(conn) &&
932 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
933 return 0;
935 return 1;
938 /* Authenticate remote device */
939 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
941 BT_DBG("hcon %p", conn);
943 if (conn->pending_sec_level > sec_level)
944 sec_level = conn->pending_sec_level;
946 if (sec_level > conn->sec_level)
947 conn->pending_sec_level = sec_level;
948 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
949 return 1;
951 /* Make sure we preserve an existing MITM requirement*/
952 auth_type |= (conn->auth_type & 0x01);
954 conn->auth_type = auth_type;
956 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
957 struct hci_cp_auth_requested cp;
959 cp.handle = cpu_to_le16(conn->handle);
960 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
961 sizeof(cp), &cp);
963 /* If we're already encrypted set the REAUTH_PEND flag,
964 * otherwise set the ENCRYPT_PEND.
966 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
967 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
968 else
969 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
972 return 0;
975 /* Encrypt the the link */
976 static void hci_conn_encrypt(struct hci_conn *conn)
978 BT_DBG("hcon %p", conn);
980 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
981 struct hci_cp_set_conn_encrypt cp;
982 cp.handle = cpu_to_le16(conn->handle);
983 cp.encrypt = 0x01;
984 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
985 &cp);
989 /* Enable security */
990 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
991 bool initiator)
993 BT_DBG("hcon %p", conn);
995 if (conn->type == LE_LINK)
996 return smp_conn_security(conn, sec_level);
998 /* For sdp we don't need the link key. */
999 if (sec_level == BT_SECURITY_SDP)
1000 return 1;
1002 /* For non 2.1 devices and low security level we don't need the link
1003 key. */
1004 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1005 return 1;
1007 /* For other security levels we need the link key. */
1008 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1009 goto auth;
1011 /* An authenticated FIPS approved combination key has sufficient
1012 * security for security level 4. */
1013 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1014 sec_level == BT_SECURITY_FIPS)
1015 goto encrypt;
1017 /* An authenticated combination key has sufficient security for
1018 security level 3. */
1019 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1020 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1021 sec_level == BT_SECURITY_HIGH)
1022 goto encrypt;
1024 /* An unauthenticated combination key has sufficient security for
1025 security level 1 and 2. */
1026 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1027 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1028 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1029 goto encrypt;
1031 /* A combination key has always sufficient security for the security
1032 levels 1 or 2. High security level requires the combination key
1033 is generated using maximum PIN code length (16).
1034 For pre 2.1 units. */
1035 if (conn->key_type == HCI_LK_COMBINATION &&
1036 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1037 conn->pin_length == 16))
1038 goto encrypt;
1040 auth:
1041 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1042 return 0;
1044 if (initiator)
1045 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1047 if (!hci_conn_auth(conn, sec_level, auth_type))
1048 return 0;
1050 encrypt:
1051 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1052 return 1;
1054 hci_conn_encrypt(conn);
1055 return 0;
1057 EXPORT_SYMBOL(hci_conn_security);
1059 /* Check secure link requirement */
1060 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1062 BT_DBG("hcon %p", conn);
1064 /* Accept if non-secure or higher security level is required */
1065 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1066 return 1;
1068 /* Accept if secure or higher security level is already present */
1069 if (conn->sec_level == BT_SECURITY_HIGH ||
1070 conn->sec_level == BT_SECURITY_FIPS)
1071 return 1;
1073 /* Reject not secure link */
1074 return 0;
1076 EXPORT_SYMBOL(hci_conn_check_secure);
1078 /* Change link key */
1079 int hci_conn_change_link_key(struct hci_conn *conn)
1081 BT_DBG("hcon %p", conn);
1083 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1084 struct hci_cp_change_conn_link_key cp;
1085 cp.handle = cpu_to_le16(conn->handle);
1086 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1087 sizeof(cp), &cp);
1090 return 0;
1093 /* Switch role */
1094 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1096 BT_DBG("hcon %p", conn);
1098 if (role == conn->role)
1099 return 1;
1101 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1102 struct hci_cp_switch_role cp;
1103 bacpy(&cp.bdaddr, &conn->dst);
1104 cp.role = role;
1105 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1108 return 0;
1110 EXPORT_SYMBOL(hci_conn_switch_role);
1112 /* Enter active mode */
1113 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1115 struct hci_dev *hdev = conn->hdev;
1117 BT_DBG("hcon %p mode %d", conn, conn->mode);
1119 if (conn->mode != HCI_CM_SNIFF)
1120 goto timer;
1122 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1123 goto timer;
1125 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1126 struct hci_cp_exit_sniff_mode cp;
1127 cp.handle = cpu_to_le16(conn->handle);
1128 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1131 timer:
1132 if (hdev->idle_timeout > 0)
1133 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1134 msecs_to_jiffies(hdev->idle_timeout));
1137 /* Drop all connection on the device */
1138 void hci_conn_hash_flush(struct hci_dev *hdev)
1140 struct hci_conn_hash *h = &hdev->conn_hash;
1141 struct hci_conn *c, *n;
1143 BT_DBG("hdev %s", hdev->name);
1145 list_for_each_entry_safe(c, n, &h->list, list) {
1146 c->state = BT_CLOSED;
1148 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1149 hci_conn_del(c);
1153 /* Check pending connect attempts */
1154 void hci_conn_check_pending(struct hci_dev *hdev)
1156 struct hci_conn *conn;
1158 BT_DBG("hdev %s", hdev->name);
1160 hci_dev_lock(hdev);
1162 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1163 if (conn)
1164 hci_acl_create_connection(conn);
1166 hci_dev_unlock(hdev);
1169 static u32 get_link_mode(struct hci_conn *conn)
1171 u32 link_mode = 0;
1173 if (conn->role == HCI_ROLE_MASTER)
1174 link_mode |= HCI_LM_MASTER;
1176 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1177 link_mode |= HCI_LM_ENCRYPT;
1179 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1180 link_mode |= HCI_LM_AUTH;
1182 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1183 link_mode |= HCI_LM_SECURE;
1185 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1186 link_mode |= HCI_LM_FIPS;
1188 return link_mode;
1191 int hci_get_conn_list(void __user *arg)
1193 struct hci_conn *c;
1194 struct hci_conn_list_req req, *cl;
1195 struct hci_conn_info *ci;
1196 struct hci_dev *hdev;
1197 int n = 0, size, err;
1199 if (copy_from_user(&req, arg, sizeof(req)))
1200 return -EFAULT;
1202 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1203 return -EINVAL;
1205 size = sizeof(req) + req.conn_num * sizeof(*ci);
1207 cl = kmalloc(size, GFP_KERNEL);
1208 if (!cl)
1209 return -ENOMEM;
1211 hdev = hci_dev_get(req.dev_id);
1212 if (!hdev) {
1213 kfree(cl);
1214 return -ENODEV;
1217 ci = cl->conn_info;
1219 hci_dev_lock(hdev);
1220 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1221 bacpy(&(ci + n)->bdaddr, &c->dst);
1222 (ci + n)->handle = c->handle;
1223 (ci + n)->type = c->type;
1224 (ci + n)->out = c->out;
1225 (ci + n)->state = c->state;
1226 (ci + n)->link_mode = get_link_mode(c);
1227 if (++n >= req.conn_num)
1228 break;
1230 hci_dev_unlock(hdev);
1232 cl->dev_id = hdev->id;
1233 cl->conn_num = n;
1234 size = sizeof(req) + n * sizeof(*ci);
1236 hci_dev_put(hdev);
1238 err = copy_to_user(arg, cl, size);
1239 kfree(cl);
1241 return err ? -EFAULT : 0;
1244 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1246 struct hci_conn_info_req req;
1247 struct hci_conn_info ci;
1248 struct hci_conn *conn;
1249 char __user *ptr = arg + sizeof(req);
1251 if (copy_from_user(&req, arg, sizeof(req)))
1252 return -EFAULT;
1254 hci_dev_lock(hdev);
1255 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1256 if (conn) {
1257 bacpy(&ci.bdaddr, &conn->dst);
1258 ci.handle = conn->handle;
1259 ci.type = conn->type;
1260 ci.out = conn->out;
1261 ci.state = conn->state;
1262 ci.link_mode = get_link_mode(conn);
1264 hci_dev_unlock(hdev);
1266 if (!conn)
1267 return -ENOENT;
1269 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1272 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1274 struct hci_auth_info_req req;
1275 struct hci_conn *conn;
1277 if (copy_from_user(&req, arg, sizeof(req)))
1278 return -EFAULT;
1280 hci_dev_lock(hdev);
1281 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1282 if (conn)
1283 req.type = conn->auth_type;
1284 hci_dev_unlock(hdev);
1286 if (!conn)
1287 return -ENOENT;
1289 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1292 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1294 struct hci_dev *hdev = conn->hdev;
1295 struct hci_chan *chan;
1297 BT_DBG("%s hcon %p", hdev->name, conn);
1299 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1300 BT_DBG("Refusing to create new hci_chan");
1301 return NULL;
1304 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1305 if (!chan)
1306 return NULL;
1308 chan->conn = hci_conn_get(conn);
1309 skb_queue_head_init(&chan->data_q);
1310 chan->state = BT_CONNECTED;
1312 list_add_rcu(&chan->list, &conn->chan_list);
1314 return chan;
1317 void hci_chan_del(struct hci_chan *chan)
1319 struct hci_conn *conn = chan->conn;
1320 struct hci_dev *hdev = conn->hdev;
1322 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1324 list_del_rcu(&chan->list);
1326 synchronize_rcu();
1328 /* Prevent new hci_chan's to be created for this hci_conn */
1329 set_bit(HCI_CONN_DROP, &conn->flags);
1331 hci_conn_put(conn);
1333 skb_queue_purge(&chan->data_q);
1334 kfree(chan);
1337 void hci_chan_list_flush(struct hci_conn *conn)
1339 struct hci_chan *chan, *n;
1341 BT_DBG("hcon %p", conn);
1343 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1344 hci_chan_del(chan);
1347 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1348 __u16 handle)
1350 struct hci_chan *hchan;
1352 list_for_each_entry(hchan, &hcon->chan_list, list) {
1353 if (hchan->handle == handle)
1354 return hchan;
1357 return NULL;
1360 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1362 struct hci_conn_hash *h = &hdev->conn_hash;
1363 struct hci_conn *hcon;
1364 struct hci_chan *hchan = NULL;
1366 rcu_read_lock();
1368 list_for_each_entry_rcu(hcon, &h->list, list) {
1369 hchan = __hci_chan_lookup_handle(hcon, handle);
1370 if (hchan)
1371 break;
1374 rcu_read_unlock();
1376 return hchan;