Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / bluetooth / btintel.c
blob41ff2071d7eff38633963fe44ffc8dc10ae7995b
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
4 * Bluetooth support for Intel devices
6 * Copyright (C) 2015 Intel Corporation
7 */
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/regmap.h>
12 #include <asm/unaligned.h>
14 #include <net/bluetooth/bluetooth.h>
15 #include <net/bluetooth/hci_core.h>
17 #include "btintel.h"
19 #define VERSION "0.1"
21 #define BDADDR_INTEL (&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
22 #define RSA_HEADER_LEN 644
23 #define CSS_HEADER_OFFSET 8
24 #define ECDSA_OFFSET 644
25 #define ECDSA_HEADER_LEN 320
27 int btintel_check_bdaddr(struct hci_dev *hdev)
29 struct hci_rp_read_bd_addr *bda;
30 struct sk_buff *skb;
32 skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
33 HCI_INIT_TIMEOUT);
34 if (IS_ERR(skb)) {
35 int err = PTR_ERR(skb);
36 bt_dev_err(hdev, "Reading Intel device address failed (%d)",
37 err);
38 return err;
41 if (skb->len != sizeof(*bda)) {
42 bt_dev_err(hdev, "Intel device address length mismatch");
43 kfree_skb(skb);
44 return -EIO;
47 bda = (struct hci_rp_read_bd_addr *)skb->data;
49 /* For some Intel based controllers, the default Bluetooth device
50 * address 00:03:19:9E:8B:00 can be found. These controllers are
51 * fully operational, but have the danger of duplicate addresses
52 * and that in turn can cause problems with Bluetooth operation.
54 if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
55 bt_dev_err(hdev, "Found Intel default device address (%pMR)",
56 &bda->bdaddr);
57 set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
60 kfree_skb(skb);
62 return 0;
64 EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
66 int btintel_enter_mfg(struct hci_dev *hdev)
68 static const u8 param[] = { 0x01, 0x00 };
69 struct sk_buff *skb;
71 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
72 if (IS_ERR(skb)) {
73 bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)",
74 PTR_ERR(skb));
75 return PTR_ERR(skb);
77 kfree_skb(skb);
79 return 0;
81 EXPORT_SYMBOL_GPL(btintel_enter_mfg);
83 int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched)
85 u8 param[] = { 0x00, 0x00 };
86 struct sk_buff *skb;
88 /* The 2nd command parameter specifies the manufacturing exit method:
89 * 0x00: Just disable the manufacturing mode (0x00).
90 * 0x01: Disable manufacturing mode and reset with patches deactivated.
91 * 0x02: Disable manufacturing mode and reset with patches activated.
93 if (reset)
94 param[1] |= patched ? 0x02 : 0x01;
96 skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
97 if (IS_ERR(skb)) {
98 bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)",
99 PTR_ERR(skb));
100 return PTR_ERR(skb);
102 kfree_skb(skb);
104 return 0;
106 EXPORT_SYMBOL_GPL(btintel_exit_mfg);
108 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
110 struct sk_buff *skb;
111 int err;
113 skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
114 if (IS_ERR(skb)) {
115 err = PTR_ERR(skb);
116 bt_dev_err(hdev, "Changing Intel device address failed (%d)",
117 err);
118 return err;
120 kfree_skb(skb);
122 return 0;
124 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
126 int btintel_set_diag(struct hci_dev *hdev, bool enable)
128 struct sk_buff *skb;
129 u8 param[3];
130 int err;
132 if (enable) {
133 param[0] = 0x03;
134 param[1] = 0x03;
135 param[2] = 0x03;
136 } else {
137 param[0] = 0x00;
138 param[1] = 0x00;
139 param[2] = 0x00;
142 skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
143 if (IS_ERR(skb)) {
144 err = PTR_ERR(skb);
145 if (err == -ENODATA)
146 goto done;
147 bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)",
148 err);
149 return err;
151 kfree_skb(skb);
153 done:
154 btintel_set_event_mask(hdev, enable);
155 return 0;
157 EXPORT_SYMBOL_GPL(btintel_set_diag);
159 int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
161 int err, ret;
163 err = btintel_enter_mfg(hdev);
164 if (err)
165 return err;
167 ret = btintel_set_diag(hdev, enable);
169 err = btintel_exit_mfg(hdev, false, false);
170 if (err)
171 return err;
173 return ret;
175 EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
177 void btintel_hw_error(struct hci_dev *hdev, u8 code)
179 struct sk_buff *skb;
180 u8 type = 0x00;
182 bt_dev_err(hdev, "Hardware error 0x%2.2x", code);
184 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
185 if (IS_ERR(skb)) {
186 bt_dev_err(hdev, "Reset after hardware error failed (%ld)",
187 PTR_ERR(skb));
188 return;
190 kfree_skb(skb);
192 skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
193 if (IS_ERR(skb)) {
194 bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)",
195 PTR_ERR(skb));
196 return;
199 if (skb->len != 13) {
200 bt_dev_err(hdev, "Exception info size mismatch");
201 kfree_skb(skb);
202 return;
205 bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1));
207 kfree_skb(skb);
209 EXPORT_SYMBOL_GPL(btintel_hw_error);
211 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
213 const char *variant;
215 switch (ver->fw_variant) {
216 case 0x06:
217 variant = "Bootloader";
218 break;
219 case 0x23:
220 variant = "Firmware";
221 break;
222 default:
223 return;
226 bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u",
227 variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
228 ver->fw_build_num, ver->fw_build_ww,
229 2000 + ver->fw_build_yy);
231 EXPORT_SYMBOL_GPL(btintel_version_info);
233 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
234 const void *param)
236 while (plen > 0) {
237 struct sk_buff *skb;
238 u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
240 cmd_param[0] = fragment_type;
241 memcpy(cmd_param + 1, param, fragment_len);
243 skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
244 cmd_param, HCI_INIT_TIMEOUT);
245 if (IS_ERR(skb))
246 return PTR_ERR(skb);
248 kfree_skb(skb);
250 plen -= fragment_len;
251 param += fragment_len;
254 return 0;
256 EXPORT_SYMBOL_GPL(btintel_secure_send);
258 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
260 const struct firmware *fw;
261 struct sk_buff *skb;
262 const u8 *fw_ptr;
263 int err;
265 err = request_firmware_direct(&fw, ddc_name, &hdev->dev);
266 if (err < 0) {
267 bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)",
268 ddc_name, err);
269 return err;
272 bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name);
274 fw_ptr = fw->data;
276 /* DDC file contains one or more DDC structure which has
277 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
279 while (fw->size > fw_ptr - fw->data) {
280 u8 cmd_plen = fw_ptr[0] + sizeof(u8);
282 skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
283 HCI_INIT_TIMEOUT);
284 if (IS_ERR(skb)) {
285 bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)",
286 PTR_ERR(skb));
287 release_firmware(fw);
288 return PTR_ERR(skb);
291 fw_ptr += cmd_plen;
292 kfree_skb(skb);
295 release_firmware(fw);
297 bt_dev_info(hdev, "Applying Intel DDC parameters completed");
299 return 0;
301 EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
303 int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
305 u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
306 struct sk_buff *skb;
307 int err;
309 if (debug)
310 mask[1] |= 0x62;
312 skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
313 if (IS_ERR(skb)) {
314 err = PTR_ERR(skb);
315 bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err);
316 return err;
318 kfree_skb(skb);
320 return 0;
322 EXPORT_SYMBOL_GPL(btintel_set_event_mask);
324 int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
326 int err, ret;
328 err = btintel_enter_mfg(hdev);
329 if (err)
330 return err;
332 ret = btintel_set_event_mask(hdev, debug);
334 err = btintel_exit_mfg(hdev, false, false);
335 if (err)
336 return err;
338 return ret;
340 EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
342 int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
344 struct sk_buff *skb;
346 skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT);
347 if (IS_ERR(skb)) {
348 bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
349 PTR_ERR(skb));
350 return PTR_ERR(skb);
353 if (skb->len != sizeof(*ver)) {
354 bt_dev_err(hdev, "Intel version event size mismatch");
355 kfree_skb(skb);
356 return -EILSEQ;
359 memcpy(ver, skb->data, sizeof(*ver));
361 kfree_skb(skb);
363 return 0;
365 EXPORT_SYMBOL_GPL(btintel_read_version);
367 void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
369 const char *variant;
371 switch (version->img_type) {
372 case 0x01:
373 variant = "Bootloader";
374 bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
375 bt_dev_info(hdev, "Secure boot is %s",
376 version->secure_boot ? "enabled" : "disabled");
377 bt_dev_info(hdev, "OTP lock is %s",
378 version->otp_lock ? "enabled" : "disabled");
379 bt_dev_info(hdev, "API lock is %s",
380 version->api_lock ? "enabled" : "disabled");
381 bt_dev_info(hdev, "Debug lock is %s",
382 version->debug_lock ? "enabled" : "disabled");
383 bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
384 version->min_fw_build_nn, version->min_fw_build_cw,
385 2000 + version->min_fw_build_yy);
386 break;
387 case 0x03:
388 variant = "Firmware";
389 break;
390 default:
391 bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type);
392 goto done;
395 bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
396 2000 + (version->timestamp >> 8), version->timestamp & 0xff,
397 version->build_type, version->build_num);
399 done:
400 return;
402 EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
404 int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
406 struct sk_buff *skb;
407 const u8 param[1] = { 0xFF };
409 if (!version)
410 return -EINVAL;
412 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
413 if (IS_ERR(skb)) {
414 bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
415 PTR_ERR(skb));
416 return PTR_ERR(skb);
419 if (skb->data[0]) {
420 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
421 skb->data[0]);
422 kfree_skb(skb);
423 return -EIO;
426 /* Consume Command Complete Status field */
427 skb_pull(skb, 1);
429 /* Event parameters contatin multiple TLVs. Read each of them
430 * and only keep the required data. Also, it use existing legacy
431 * version field like hw_platform, hw_variant, and fw_variant
432 * to keep the existing setup flow
434 while (skb->len) {
435 struct intel_tlv *tlv;
437 tlv = (struct intel_tlv *)skb->data;
438 switch (tlv->type) {
439 case INTEL_TLV_CNVI_TOP:
440 version->cnvi_top =
441 __le32_to_cpu(get_unaligned_le32(tlv->val));
442 break;
443 case INTEL_TLV_CNVR_TOP:
444 version->cnvr_top =
445 __le32_to_cpu(get_unaligned_le32(tlv->val));
446 break;
447 case INTEL_TLV_CNVI_BT:
448 version->cnvi_bt =
449 __le32_to_cpu(get_unaligned_le32(tlv->val));
450 break;
451 case INTEL_TLV_CNVR_BT:
452 version->cnvr_bt =
453 __le32_to_cpu(get_unaligned_le32(tlv->val));
454 break;
455 case INTEL_TLV_DEV_REV_ID:
456 version->dev_rev_id =
457 __le16_to_cpu(get_unaligned_le16(tlv->val));
458 break;
459 case INTEL_TLV_IMAGE_TYPE:
460 version->img_type = tlv->val[0];
461 break;
462 case INTEL_TLV_TIME_STAMP:
463 version->timestamp =
464 __le16_to_cpu(get_unaligned_le16(tlv->val));
465 break;
466 case INTEL_TLV_BUILD_TYPE:
467 version->build_type = tlv->val[0];
468 break;
469 case INTEL_TLV_BUILD_NUM:
470 version->build_num =
471 __le32_to_cpu(get_unaligned_le32(tlv->val));
472 break;
473 case INTEL_TLV_SECURE_BOOT:
474 version->secure_boot = tlv->val[0];
475 break;
476 case INTEL_TLV_OTP_LOCK:
477 version->otp_lock = tlv->val[0];
478 break;
479 case INTEL_TLV_API_LOCK:
480 version->api_lock = tlv->val[0];
481 break;
482 case INTEL_TLV_DEBUG_LOCK:
483 version->debug_lock = tlv->val[0];
484 break;
485 case INTEL_TLV_MIN_FW:
486 version->min_fw_build_nn = tlv->val[0];
487 version->min_fw_build_cw = tlv->val[1];
488 version->min_fw_build_yy = tlv->val[2];
489 break;
490 case INTEL_TLV_LIMITED_CCE:
491 version->limited_cce = tlv->val[0];
492 break;
493 case INTEL_TLV_SBE_TYPE:
494 version->sbe_type = tlv->val[0];
495 break;
496 case INTEL_TLV_OTP_BDADDR:
497 memcpy(&version->otp_bd_addr, tlv->val, tlv->len);
498 break;
499 default:
500 /* Ignore rest of information */
501 break;
503 /* consume the current tlv and move to next*/
504 skb_pull(skb, tlv->len + sizeof(*tlv));
507 kfree_skb(skb);
508 return 0;
510 EXPORT_SYMBOL_GPL(btintel_read_version_tlv);
512 /* ------- REGMAP IBT SUPPORT ------- */
514 #define IBT_REG_MODE_8BIT 0x00
515 #define IBT_REG_MODE_16BIT 0x01
516 #define IBT_REG_MODE_32BIT 0x02
518 struct regmap_ibt_context {
519 struct hci_dev *hdev;
520 __u16 op_write;
521 __u16 op_read;
524 struct ibt_cp_reg_access {
525 __le32 addr;
526 __u8 mode;
527 __u8 len;
528 __u8 data[];
529 } __packed;
531 struct ibt_rp_reg_access {
532 __u8 status;
533 __le32 addr;
534 __u8 data[];
535 } __packed;
537 static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
538 void *val, size_t val_size)
540 struct regmap_ibt_context *ctx = context;
541 struct ibt_cp_reg_access cp;
542 struct ibt_rp_reg_access *rp;
543 struct sk_buff *skb;
544 int err = 0;
546 if (reg_size != sizeof(__le32))
547 return -EINVAL;
549 switch (val_size) {
550 case 1:
551 cp.mode = IBT_REG_MODE_8BIT;
552 break;
553 case 2:
554 cp.mode = IBT_REG_MODE_16BIT;
555 break;
556 case 4:
557 cp.mode = IBT_REG_MODE_32BIT;
558 break;
559 default:
560 return -EINVAL;
563 /* regmap provides a little-endian formatted addr */
564 cp.addr = *(__le32 *)addr;
565 cp.len = val_size;
567 bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr));
569 skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
570 HCI_CMD_TIMEOUT);
571 if (IS_ERR(skb)) {
572 err = PTR_ERR(skb);
573 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)",
574 le32_to_cpu(cp.addr), err);
575 return err;
578 if (skb->len != sizeof(*rp) + val_size) {
579 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len",
580 le32_to_cpu(cp.addr));
581 err = -EINVAL;
582 goto done;
585 rp = (struct ibt_rp_reg_access *)skb->data;
587 if (rp->addr != cp.addr) {
588 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr",
589 le32_to_cpu(rp->addr));
590 err = -EINVAL;
591 goto done;
594 memcpy(val, rp->data, val_size);
596 done:
597 kfree_skb(skb);
598 return err;
601 static int regmap_ibt_gather_write(void *context,
602 const void *addr, size_t reg_size,
603 const void *val, size_t val_size)
605 struct regmap_ibt_context *ctx = context;
606 struct ibt_cp_reg_access *cp;
607 struct sk_buff *skb;
608 int plen = sizeof(*cp) + val_size;
609 u8 mode;
610 int err = 0;
612 if (reg_size != sizeof(__le32))
613 return -EINVAL;
615 switch (val_size) {
616 case 1:
617 mode = IBT_REG_MODE_8BIT;
618 break;
619 case 2:
620 mode = IBT_REG_MODE_16BIT;
621 break;
622 case 4:
623 mode = IBT_REG_MODE_32BIT;
624 break;
625 default:
626 return -EINVAL;
629 cp = kmalloc(plen, GFP_KERNEL);
630 if (!cp)
631 return -ENOMEM;
633 /* regmap provides a little-endian formatted addr/value */
634 cp->addr = *(__le32 *)addr;
635 cp->mode = mode;
636 cp->len = val_size;
637 memcpy(&cp->data, val, val_size);
639 bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr));
641 skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
642 if (IS_ERR(skb)) {
643 err = PTR_ERR(skb);
644 bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)",
645 le32_to_cpu(cp->addr), err);
646 goto done;
648 kfree_skb(skb);
650 done:
651 kfree(cp);
652 return err;
655 static int regmap_ibt_write(void *context, const void *data, size_t count)
657 /* data contains register+value, since we only support 32bit addr,
658 * minimum data size is 4 bytes.
660 if (WARN_ONCE(count < 4, "Invalid register access"))
661 return -EINVAL;
663 return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4);
666 static void regmap_ibt_free_context(void *context)
668 kfree(context);
671 static struct regmap_bus regmap_ibt = {
672 .read = regmap_ibt_read,
673 .write = regmap_ibt_write,
674 .gather_write = regmap_ibt_gather_write,
675 .free_context = regmap_ibt_free_context,
676 .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
677 .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
680 /* Config is the same for all register regions */
681 static const struct regmap_config regmap_ibt_cfg = {
682 .name = "btintel_regmap",
683 .reg_bits = 32,
684 .val_bits = 32,
687 struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
688 u16 opcode_write)
690 struct regmap_ibt_context *ctx;
692 bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read,
693 opcode_write);
695 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
696 if (!ctx)
697 return ERR_PTR(-ENOMEM);
699 ctx->op_read = opcode_read;
700 ctx->op_write = opcode_write;
701 ctx->hdev = hdev;
703 return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg);
705 EXPORT_SYMBOL_GPL(btintel_regmap_init);
707 int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param)
709 struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 };
710 struct sk_buff *skb;
712 params.boot_param = cpu_to_le32(boot_param);
714 skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params,
715 HCI_INIT_TIMEOUT);
716 if (IS_ERR(skb)) {
717 bt_dev_err(hdev, "Failed to send Intel Reset command");
718 return PTR_ERR(skb);
721 kfree_skb(skb);
723 return 0;
725 EXPORT_SYMBOL_GPL(btintel_send_intel_reset);
727 int btintel_read_boot_params(struct hci_dev *hdev,
728 struct intel_boot_params *params)
730 struct sk_buff *skb;
732 skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
733 if (IS_ERR(skb)) {
734 bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
735 PTR_ERR(skb));
736 return PTR_ERR(skb);
739 if (skb->len != sizeof(*params)) {
740 bt_dev_err(hdev, "Intel boot parameters size mismatch");
741 kfree_skb(skb);
742 return -EILSEQ;
745 memcpy(params, skb->data, sizeof(*params));
747 kfree_skb(skb);
749 if (params->status) {
750 bt_dev_err(hdev, "Intel boot parameters command failed (%02x)",
751 params->status);
752 return -bt_to_errno(params->status);
755 bt_dev_info(hdev, "Device revision is %u",
756 le16_to_cpu(params->dev_revid));
758 bt_dev_info(hdev, "Secure boot is %s",
759 params->secure_boot ? "enabled" : "disabled");
761 bt_dev_info(hdev, "OTP lock is %s",
762 params->otp_lock ? "enabled" : "disabled");
764 bt_dev_info(hdev, "API lock is %s",
765 params->api_lock ? "enabled" : "disabled");
767 bt_dev_info(hdev, "Debug lock is %s",
768 params->debug_lock ? "enabled" : "disabled");
770 bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
771 params->min_fw_build_nn, params->min_fw_build_cw,
772 2000 + params->min_fw_build_yy);
774 return 0;
776 EXPORT_SYMBOL_GPL(btintel_read_boot_params);
778 static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev,
779 const struct firmware *fw)
781 int err;
783 /* Start the firmware download transaction with the Init fragment
784 * represented by the 128 bytes of CSS header.
786 err = btintel_secure_send(hdev, 0x00, 128, fw->data);
787 if (err < 0) {
788 bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
789 goto done;
792 /* Send the 256 bytes of public key information from the firmware
793 * as the PKey fragment.
795 err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
796 if (err < 0) {
797 bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
798 goto done;
801 /* Send the 256 bytes of signature information from the firmware
802 * as the Sign fragment.
804 err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
805 if (err < 0) {
806 bt_dev_err(hdev, "Failed to send firmware signature (%d)", err);
807 goto done;
810 done:
811 return err;
814 static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev,
815 const struct firmware *fw)
817 int err;
819 /* Start the firmware download transaction with the Init fragment
820 * represented by the 128 bytes of CSS header.
822 err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644);
823 if (err < 0) {
824 bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
825 return err;
828 /* Send the 96 bytes of public key information from the firmware
829 * as the PKey fragment.
831 err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128);
832 if (err < 0) {
833 bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
834 return err;
837 /* Send the 96 bytes of signature information from the firmware
838 * as the Sign fragment
840 err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224);
841 if (err < 0) {
842 bt_dev_err(hdev, "Failed to send firmware signature (%d)",
843 err);
844 return err;
846 return 0;
849 static int btintel_download_firmware_payload(struct hci_dev *hdev,
850 const struct firmware *fw,
851 u32 *boot_param, size_t offset)
853 int err;
854 const u8 *fw_ptr;
855 u32 frag_len;
857 fw_ptr = fw->data + offset;
858 frag_len = 0;
859 err = -EINVAL;
861 while (fw_ptr - fw->data < fw->size) {
862 struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
864 /* Each SKU has a different reset parameter to use in the
865 * HCI_Intel_Reset command and it is embedded in the firmware
866 * data. So, instead of using static value per SKU, check
867 * the firmware data and save it for later use.
869 if (le16_to_cpu(cmd->opcode) == 0xfc0e) {
870 /* The boot parameter is the first 32-bit value
871 * and rest of 3 octets are reserved.
873 *boot_param = get_unaligned_le32(fw_ptr + sizeof(*cmd));
875 bt_dev_dbg(hdev, "boot_param=0x%x", *boot_param);
878 frag_len += sizeof(*cmd) + cmd->plen;
880 /* The parameter length of the secure send command requires
881 * a 4 byte alignment. It happens so that the firmware file
882 * contains proper Intel_NOP commands to align the fragments
883 * as needed.
885 * Send set of commands with 4 byte alignment from the
886 * firmware data buffer as a single Data fragement.
888 if (!(frag_len % 4)) {
889 err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
890 if (err < 0) {
891 bt_dev_err(hdev,
892 "Failed to send firmware data (%d)",
893 err);
894 goto done;
897 fw_ptr += frag_len;
898 frag_len = 0;
902 done:
903 return err;
906 int btintel_download_firmware(struct hci_dev *hdev,
907 const struct firmware *fw,
908 u32 *boot_param)
910 int err;
912 err = btintel_sfi_rsa_header_secure_send(hdev, fw);
913 if (err)
914 return err;
916 return btintel_download_firmware_payload(hdev, fw, boot_param,
917 RSA_HEADER_LEN);
919 EXPORT_SYMBOL_GPL(btintel_download_firmware);
921 int btintel_download_firmware_newgen(struct hci_dev *hdev,
922 const struct firmware *fw, u32 *boot_param,
923 u8 hw_variant, u8 sbe_type)
925 int err;
926 u32 css_header_ver;
928 /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
929 * only RSA secure boot engine. Hence, the corresponding sfi file will
930 * have RSA header of 644 bytes followed by Command Buffer.
932 * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA
933 * secure boot engine. As a result, the corresponding sfi file will
934 * have RSA header of 644, ECDSA header of 320 bytes followed by
935 * Command Buffer.
937 * CSS Header byte positions 0x08 to 0x0B represent the CSS Header
938 * version: RSA(0x00010000) , ECDSA (0x00020000)
940 css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET);
941 if (css_header_ver != 0x00010000) {
942 bt_dev_err(hdev, "Invalid CSS Header version");
943 return -EINVAL;
946 if (hw_variant <= 0x14) {
947 if (sbe_type != 0x00) {
948 bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)",
949 hw_variant);
950 return -EINVAL;
953 err = btintel_sfi_rsa_header_secure_send(hdev, fw);
954 if (err)
955 return err;
957 err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN);
958 if (err)
959 return err;
960 } else if (hw_variant >= 0x17) {
961 /* Check if CSS header for ECDSA follows the RSA header */
962 if (fw->data[ECDSA_OFFSET] != 0x06)
963 return -EINVAL;
965 /* Check if the CSS Header version is ECDSA(0x00020000) */
966 css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET);
967 if (css_header_ver != 0x00020000) {
968 bt_dev_err(hdev, "Invalid CSS Header version");
969 return -EINVAL;
972 if (sbe_type == 0x00) {
973 err = btintel_sfi_rsa_header_secure_send(hdev, fw);
974 if (err)
975 return err;
977 err = btintel_download_firmware_payload(hdev, fw,
978 boot_param,
979 RSA_HEADER_LEN + ECDSA_HEADER_LEN);
980 if (err)
981 return err;
982 } else if (sbe_type == 0x01) {
983 err = btintel_sfi_ecdsa_header_secure_send(hdev, fw);
984 if (err)
985 return err;
987 err = btintel_download_firmware_payload(hdev, fw,
988 boot_param,
989 RSA_HEADER_LEN + ECDSA_HEADER_LEN);
990 if (err)
991 return err;
994 return 0;
996 EXPORT_SYMBOL_GPL(btintel_download_firmware_newgen);
998 void btintel_reset_to_bootloader(struct hci_dev *hdev)
1000 struct intel_reset params;
1001 struct sk_buff *skb;
1003 /* Send Intel Reset command. This will result in
1004 * re-enumeration of BT controller.
1006 * Intel Reset parameter description:
1007 * reset_type : 0x00 (Soft reset),
1008 * 0x01 (Hard reset)
1009 * patch_enable : 0x00 (Do not enable),
1010 * 0x01 (Enable)
1011 * ddc_reload : 0x00 (Do not reload),
1012 * 0x01 (Reload)
1013 * boot_option: 0x00 (Current image),
1014 * 0x01 (Specified boot address)
1015 * boot_param: Boot address
1018 params.reset_type = 0x01;
1019 params.patch_enable = 0x01;
1020 params.ddc_reload = 0x01;
1021 params.boot_option = 0x00;
1022 params.boot_param = cpu_to_le32(0x00000000);
1024 skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params),
1025 &params, HCI_INIT_TIMEOUT);
1026 if (IS_ERR(skb)) {
1027 bt_dev_err(hdev, "FW download error recovery failed (%ld)",
1028 PTR_ERR(skb));
1029 return;
1031 bt_dev_info(hdev, "Intel reset sent to retry FW download");
1032 kfree_skb(skb);
1034 /* Current Intel BT controllers(ThP/JfP) hold the USB reset
1035 * lines for 2ms when it receives Intel Reset in bootloader mode.
1036 * Whereas, the upcoming Intel BT controllers will hold USB reset
1037 * for 150ms. To keep the delay generic, 150ms is chosen here.
1039 msleep(150);
1041 EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader);
1043 int btintel_read_debug_features(struct hci_dev *hdev,
1044 struct intel_debug_features *features)
1046 struct sk_buff *skb;
1047 u8 page_no = 1;
1049 /* Intel controller supports two pages, each page is of 128-bit
1050 * feature bit mask. And each bit defines specific feature support
1052 skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no,
1053 HCI_INIT_TIMEOUT);
1054 if (IS_ERR(skb)) {
1055 bt_dev_err(hdev, "Reading supported features failed (%ld)",
1056 PTR_ERR(skb));
1057 return PTR_ERR(skb);
1060 if (skb->len != (sizeof(features->page1) + 3)) {
1061 bt_dev_err(hdev, "Supported features event size mismatch");
1062 kfree_skb(skb);
1063 return -EILSEQ;
1066 memcpy(features->page1, skb->data + 3, sizeof(features->page1));
1068 /* Read the supported features page2 if required in future.
1070 kfree_skb(skb);
1071 return 0;
1073 EXPORT_SYMBOL_GPL(btintel_read_debug_features);
1075 int btintel_set_debug_features(struct hci_dev *hdev,
1076 const struct intel_debug_features *features)
1078 u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
1079 0x00, 0x00, 0x00 };
1080 struct sk_buff *skb;
1082 if (!features)
1083 return -EINVAL;
1085 if (!(features->page1[0] & 0x3f)) {
1086 bt_dev_info(hdev, "Telemetry exception format not supported");
1087 return 0;
1090 skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
1091 if (IS_ERR(skb)) {
1092 bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
1093 PTR_ERR(skb));
1094 return PTR_ERR(skb);
1097 kfree_skb(skb);
1098 return 0;
1100 EXPORT_SYMBOL_GPL(btintel_set_debug_features);
1102 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
1103 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
1104 MODULE_VERSION(VERSION);
1105 MODULE_LICENSE("GPL");
1106 MODULE_FIRMWARE("intel/ibt-11-5.sfi");
1107 MODULE_FIRMWARE("intel/ibt-11-5.ddc");
1108 MODULE_FIRMWARE("intel/ibt-12-16.sfi");
1109 MODULE_FIRMWARE("intel/ibt-12-16.ddc");