1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Bluetooth support for Intel PCIe devices
6 * Copyright (C) 2024 Intel Corporation
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
17 #include <linux/unaligned.h>
19 #include <net/bluetooth/bluetooth.h>
20 #include <net/bluetooth/hci_core.h>
23 #include "btintel_pcie.h"
27 #define BTINTEL_PCI_DEVICE(dev, subdev) \
28 .vendor = PCI_VENDOR_ID_INTEL, \
30 .subvendor = PCI_ANY_ID, \
31 .subdevice = (subdev), \
34 #define POLL_INTERVAL_US 10
36 /* Intel Bluetooth PCIe device id table */
37 static const struct pci_device_id btintel_pcie_table
[] = {
38 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID
) },
41 MODULE_DEVICE_TABLE(pci
, btintel_pcie_table
);
43 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
44 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
45 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
46 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
47 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
48 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
49 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
51 /* Alive interrupt context */
55 BTINTEL_PCIE_HCI_RESET
,
56 BTINTEL_PCIE_INTEL_HCI_RESET1
,
57 BTINTEL_PCIE_INTEL_HCI_RESET2
,
62 static inline void ipc_print_ia_ring(struct hci_dev
*hdev
, struct ia
*ia
,
65 bt_dev_dbg(hdev
, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
66 queue_num
== BTINTEL_PCIE_TXQ_NUM
? "TXQ" : "RXQ",
67 ia
->tr_hia
[queue_num
], ia
->tr_tia
[queue_num
],
68 ia
->cr_hia
[queue_num
], ia
->cr_tia
[queue_num
]);
71 static inline void ipc_print_urbd1(struct hci_dev
*hdev
, struct urbd1
*urbd1
,
74 bt_dev_dbg(hdev
, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
75 index
, urbd1
->frbd_tag
, urbd1
->status
, urbd1
->fixed
);
78 static struct btintel_pcie_data
*btintel_pcie_get_data(struct msix_entry
*entry
)
80 u8 queue
= entry
->entry
;
81 struct msix_entry
*entries
= entry
- queue
;
83 return container_of(entries
, struct btintel_pcie_data
, msix_entries
[0]);
86 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
87 * of the TFD is updated and ready to transmit.
89 static void btintel_pcie_set_tx_db(struct btintel_pcie_data
*data
, u16 index
)
94 val
|= (BTINTEL_PCIE_TX_DB_VEC
<< 16);
96 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR
, val
);
99 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
100 * descriptor) with the data length and the DMA address of the data buffer.
102 static void btintel_pcie_prepare_tx(struct txq
*txq
, u16 tfd_index
,
105 struct data_buf
*buf
;
108 tfd
= &txq
->tfds
[tfd_index
];
109 memset(tfd
, 0, sizeof(*tfd
));
111 buf
= &txq
->bufs
[tfd_index
];
113 tfd
->size
= skb
->len
;
114 tfd
->addr
= buf
->data_p_addr
;
116 /* Copy the outgoing data to DMA buffer */
117 memcpy(buf
->data
, skb
->data
, tfd
->size
);
120 static int btintel_pcie_send_sync(struct btintel_pcie_data
*data
,
125 struct txq
*txq
= &data
->txq
;
127 tfd_index
= data
->ia
.tr_hia
[BTINTEL_PCIE_TXQ_NUM
];
129 if (tfd_index
> txq
->count
)
132 /* Prepare for TX. It updates the TFD with the length of data and
133 * address of the DMA buffer, and copy the data to the DMA buffer
135 btintel_pcie_prepare_tx(txq
, tfd_index
, skb
);
137 tfd_index
= (tfd_index
+ 1) % txq
->count
;
138 data
->ia
.tr_hia
[BTINTEL_PCIE_TXQ_NUM
] = tfd_index
;
140 /* Arm wait event condition */
141 data
->tx_wait_done
= false;
143 /* Set the doorbell to notify the device */
144 btintel_pcie_set_tx_db(data
, tfd_index
);
146 /* Wait for the complete interrupt - URBD0 */
147 ret
= wait_event_timeout(data
->tx_wait_q
, data
->tx_wait_done
,
148 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS
));
155 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
156 * is available to receive the data
158 static void btintel_pcie_set_rx_db(struct btintel_pcie_data
*data
, u16 index
)
163 val
|= (BTINTEL_PCIE_RX_DB_VEC
<< 16);
165 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR
, val
);
168 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
169 * DMA address of the free buffer.
171 static void btintel_pcie_prepare_rx(struct rxq
*rxq
, u16 frbd_index
)
173 struct data_buf
*buf
;
176 /* Get the buffer of the FRBD for DMA */
177 buf
= &rxq
->bufs
[frbd_index
];
179 frbd
= &rxq
->frbds
[frbd_index
];
180 memset(frbd
, 0, sizeof(*frbd
));
183 frbd
->tag
= frbd_index
;
184 frbd
->addr
= buf
->data_p_addr
;
187 static int btintel_pcie_submit_rx(struct btintel_pcie_data
*data
)
190 struct rxq
*rxq
= &data
->rxq
;
192 frbd_index
= data
->ia
.tr_hia
[BTINTEL_PCIE_RXQ_NUM
];
194 if (frbd_index
> rxq
->count
)
197 /* Prepare for RX submit. It updates the FRBD with the address of DMA
200 btintel_pcie_prepare_rx(rxq
, frbd_index
);
202 frbd_index
= (frbd_index
+ 1) % rxq
->count
;
203 data
->ia
.tr_hia
[BTINTEL_PCIE_RXQ_NUM
] = frbd_index
;
204 ipc_print_ia_ring(data
->hdev
, &data
->ia
, BTINTEL_PCIE_RXQ_NUM
);
206 /* Set the doorbell to notify the device */
207 btintel_pcie_set_rx_db(data
, frbd_index
);
212 static int btintel_pcie_start_rx(struct btintel_pcie_data
*data
)
216 for (i
= 0; i
< BTINTEL_PCIE_RX_MAX_QUEUE
; i
++) {
217 ret
= btintel_pcie_submit_rx(data
);
225 static void btintel_pcie_reset_ia(struct btintel_pcie_data
*data
)
227 memset(data
->ia
.tr_hia
, 0, sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
);
228 memset(data
->ia
.tr_tia
, 0, sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
);
229 memset(data
->ia
.cr_hia
, 0, sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
);
230 memset(data
->ia
.cr_tia
, 0, sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
);
233 static int btintel_pcie_reset_bt(struct btintel_pcie_data
*data
)
238 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
);
240 reg
&= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA
|
241 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT
|
242 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT
);
243 reg
|= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON
;
245 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
, reg
);
248 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
);
249 if (reg
& BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS
)
251 usleep_range(10000, 12000);
253 } while (--retry
> 0);
254 usleep_range(10000, 12000);
256 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
);
258 reg
&= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA
|
259 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT
|
260 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT
);
261 reg
|= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET
;
262 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
, reg
);
263 usleep_range(10000, 12000);
265 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
);
266 bt_dev_dbg(data
->hdev
, "csr register after reset: 0x%8.8x", reg
);
268 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_BOOT_STAGE_REG
);
270 /* If shared hardware reset is success then boot stage register shall be
273 return reg
== 0 ? 0 : -ENODEV
;
276 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
277 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
278 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
279 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
280 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
282 static int btintel_pcie_enable_bt(struct btintel_pcie_data
*data
)
287 data
->gp0_received
= false;
289 /* Update the DMA address of CI struct to CSR */
290 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG
,
291 data
->ci_p_addr
& 0xffffffff);
292 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG
,
293 (u64
)data
->ci_p_addr
>> 32);
295 /* Reset the cached value of boot stage. it is updated by the MSI-X
296 * gp0 interrupt handler.
298 data
->boot_stage_cache
= 0x0;
300 /* Set MAC_INIT bit to start primary bootloader */
301 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
);
302 reg
&= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT
|
303 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON
|
304 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET
);
305 reg
|= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA
|
306 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT
);
308 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
, reg
);
310 /* MAC is ready. Enable BT FUNC */
311 btintel_pcie_set_reg_bits(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
,
312 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT
);
314 btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_FUNC_CTRL_REG
);
316 /* wait for interrupt from the device after booting up to primary
319 data
->alive_intr_ctxt
= BTINTEL_PCIE_ROM
;
320 err
= wait_event_timeout(data
->gp0_wait_q
, data
->gp0_received
,
321 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS
));
325 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
326 if (~data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_ROM
)
332 /* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP
333 * Sometimes during firmware image switching from ROM to IML or IML to OP image,
334 * the previous image bit is not cleared by firmware when alive interrupt is
335 * received. Driver needs to take care of these sticky bits when deciding the
336 * current image running on controller.
337 * Ex: 0x10 and 0x11 - both represents that controller is running IML
339 static inline bool btintel_pcie_in_rom(struct btintel_pcie_data
*data
)
341 return data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_ROM
&&
342 !(data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_IML
) &&
343 !(data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW
);
346 static inline bool btintel_pcie_in_op(struct btintel_pcie_data
*data
)
348 return data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW
;
351 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data
*data
)
353 return data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_IML
&&
354 !(data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW
);
357 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data
*data
)
359 return data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY
;
362 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data
*data
)
364 return !(data
->boot_stage_cache
& BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY
);
367 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data
*data
,
370 bt_dev_dbg(data
->hdev
, "writing sleep_ctl_reg: 0x%8.8x", dxstate
);
371 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG
, dxstate
);
374 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt
)
376 switch (alive_intr_ctxt
) {
377 case BTINTEL_PCIE_ROM
:
379 case BTINTEL_PCIE_FW_DL
:
381 case BTINTEL_PCIE_D0
:
383 case BTINTEL_PCIE_D3
:
385 case BTINTEL_PCIE_HCI_RESET
:
387 case BTINTEL_PCIE_INTEL_HCI_RESET1
:
388 return "intel_reset1";
389 case BTINTEL_PCIE_INTEL_HCI_RESET2
:
390 return "intel_reset2";
396 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
397 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
399 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data
*data
)
401 bool submit_rx
, signal_waitq
;
404 /* This interrupt is for three different causes and it is not easy to
405 * know what causes the interrupt. So, it compares each register value
406 * with cached value and update it before it wake up the queue.
408 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_BOOT_STAGE_REG
);
409 if (reg
!= data
->boot_stage_cache
)
410 data
->boot_stage_cache
= reg
;
412 bt_dev_dbg(data
->hdev
, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
413 btintel_pcie_alivectxt_state2str(data
->alive_intr_ctxt
),
414 data
->boot_stage_cache
, reg
);
415 reg
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG
);
416 if (reg
!= data
->img_resp_cache
)
417 data
->img_resp_cache
= reg
;
419 data
->gp0_received
= true;
421 old_ctxt
= data
->alive_intr_ctxt
;
423 signal_waitq
= false;
425 switch (data
->alive_intr_ctxt
) {
426 case BTINTEL_PCIE_ROM
:
427 data
->alive_intr_ctxt
= BTINTEL_PCIE_FW_DL
;
430 case BTINTEL_PCIE_FW_DL
:
431 /* Error case is already handled. Ideally control shall not
435 case BTINTEL_PCIE_INTEL_HCI_RESET1
:
436 if (btintel_pcie_in_op(data
)) {
441 if (btintel_pcie_in_iml(data
)) {
443 data
->alive_intr_ctxt
= BTINTEL_PCIE_FW_DL
;
447 case BTINTEL_PCIE_INTEL_HCI_RESET2
:
448 if (btintel_test_and_clear_flag(data
->hdev
, INTEL_WAIT_FOR_D0
)) {
449 btintel_wake_up_flag(data
->hdev
, INTEL_WAIT_FOR_D0
);
450 data
->alive_intr_ctxt
= BTINTEL_PCIE_D0
;
453 case BTINTEL_PCIE_D0
:
454 if (btintel_pcie_in_d3(data
)) {
455 data
->alive_intr_ctxt
= BTINTEL_PCIE_D3
;
460 case BTINTEL_PCIE_D3
:
461 if (btintel_pcie_in_d0(data
)) {
462 data
->alive_intr_ctxt
= BTINTEL_PCIE_D0
;
468 case BTINTEL_PCIE_HCI_RESET
:
469 data
->alive_intr_ctxt
= BTINTEL_PCIE_D0
;
474 bt_dev_err(data
->hdev
, "Unknown state: 0x%2.2x",
475 data
->alive_intr_ctxt
);
480 btintel_pcie_reset_ia(data
);
481 btintel_pcie_start_rx(data
);
485 bt_dev_dbg(data
->hdev
, "wake up gp0 wait_q");
486 wake_up(&data
->gp0_wait_q
);
489 if (old_ctxt
!= data
->alive_intr_ctxt
)
490 bt_dev_dbg(data
->hdev
, "alive context changed: %s -> %s",
491 btintel_pcie_alivectxt_state2str(old_ctxt
),
492 btintel_pcie_alivectxt_state2str(data
->alive_intr_ctxt
));
495 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
497 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data
*data
)
503 cr_tia
= data
->ia
.cr_tia
[BTINTEL_PCIE_TXQ_NUM
];
504 cr_hia
= data
->ia
.cr_hia
[BTINTEL_PCIE_TXQ_NUM
];
506 if (cr_tia
== cr_hia
)
511 while (cr_tia
!= cr_hia
) {
512 data
->tx_wait_done
= true;
513 wake_up(&data
->tx_wait_q
);
515 urbd0
= &txq
->urbd0s
[cr_tia
];
517 if (urbd0
->tfd_index
> txq
->count
)
520 cr_tia
= (cr_tia
+ 1) % txq
->count
;
521 data
->ia
.cr_tia
[BTINTEL_PCIE_TXQ_NUM
] = cr_tia
;
522 ipc_print_ia_ring(data
->hdev
, &data
->ia
, BTINTEL_PCIE_TXQ_NUM
);
526 static int btintel_pcie_recv_event(struct hci_dev
*hdev
, struct sk_buff
*skb
)
528 struct hci_event_hdr
*hdr
= (void *)skb
->data
;
529 const char diagnostics_hdr
[] = { 0x87, 0x80, 0x03 };
530 struct btintel_pcie_data
*data
= hci_get_drvdata(hdev
);
532 if (skb
->len
> HCI_EVENT_HDR_SIZE
&& hdr
->evt
== 0xff &&
534 const void *ptr
= skb
->data
+ HCI_EVENT_HDR_SIZE
+ 1;
535 unsigned int len
= skb
->len
- HCI_EVENT_HDR_SIZE
- 1;
537 if (btintel_test_flag(hdev
, INTEL_BOOTLOADER
)) {
538 switch (skb
->data
[2]) {
540 /* When switching to the operational firmware
541 * the device sends a vendor specific event
542 * indicating that the bootup completed.
544 btintel_bootup(hdev
, ptr
, len
);
546 /* If bootup event is from operational image,
547 * driver needs to write sleep control register to
550 if (btintel_pcie_in_op(data
)) {
551 btintel_pcie_wr_sleep_cntrl(data
, BTINTEL_PCIE_STATE_D0
);
552 data
->alive_intr_ctxt
= BTINTEL_PCIE_INTEL_HCI_RESET2
;
557 if (btintel_pcie_in_iml(data
)) {
558 /* In case of IML, there is no concept
559 * of D0 transition. Just mimic as if
560 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
561 * bit and waking up the task waiting on
562 * INTEL_WAIT_FOR_D0. This is required
563 * as intel_boot() is common function for
564 * both IML and OP image loading.
566 if (btintel_test_and_clear_flag(data
->hdev
,
568 btintel_wake_up_flag(data
->hdev
,
574 /* When the firmware loading completes the
575 * device sends out a vendor specific event
576 * indicating the result of the firmware
579 btintel_secure_send_result(hdev
, ptr
, len
);
585 /* Handle all diagnostics events separately. May still call
588 if (len
>= sizeof(diagnostics_hdr
) &&
589 memcmp(&skb
->data
[2], diagnostics_hdr
,
590 sizeof(diagnostics_hdr
)) == 0) {
591 return btintel_diagnostics(hdev
, skb
);
594 /* This is a debug event that comes from IML and OP image when it
595 * starts execution. There is no need pass this event to stack.
597 if (skb
->data
[2] == 0x97)
601 return hci_recv_frame(hdev
, skb
);
603 /* Process the received rx data
604 * It check the frame header to identify the data type and create skb
605 * and calling HCI API
607 static int btintel_pcie_recv_frame(struct btintel_pcie_data
*data
,
614 struct sk_buff
*new_skb
;
616 struct hci_dev
*hdev
= data
->hdev
;
618 spin_lock(&data
->hci_rx_lock
);
620 /* The first 4 bytes indicates the Intel PCIe specific packet type */
621 pdata
= skb_pull_data(skb
, BTINTEL_PCIE_HCI_TYPE_LEN
);
623 bt_dev_err(hdev
, "Corrupted packet received");
628 pcie_pkt_type
= get_unaligned_le32(pdata
);
630 switch (pcie_pkt_type
) {
631 case BTINTEL_PCIE_HCI_ACL_PKT
:
632 if (skb
->len
>= HCI_ACL_HDR_SIZE
) {
633 plen
= HCI_ACL_HDR_SIZE
+ __le16_to_cpu(hci_acl_hdr(skb
)->dlen
);
634 pkt_type
= HCI_ACLDATA_PKT
;
636 bt_dev_err(hdev
, "ACL packet is too short");
642 case BTINTEL_PCIE_HCI_SCO_PKT
:
643 if (skb
->len
>= HCI_SCO_HDR_SIZE
) {
644 plen
= HCI_SCO_HDR_SIZE
+ hci_sco_hdr(skb
)->dlen
;
645 pkt_type
= HCI_SCODATA_PKT
;
647 bt_dev_err(hdev
, "SCO packet is too short");
653 case BTINTEL_PCIE_HCI_EVT_PKT
:
654 if (skb
->len
>= HCI_EVENT_HDR_SIZE
) {
655 plen
= HCI_EVENT_HDR_SIZE
+ hci_event_hdr(skb
)->plen
;
656 pkt_type
= HCI_EVENT_PKT
;
658 bt_dev_err(hdev
, "Event packet is too short");
664 case BTINTEL_PCIE_HCI_ISO_PKT
:
665 if (skb
->len
>= HCI_ISO_HDR_SIZE
) {
666 plen
= HCI_ISO_HDR_SIZE
+ __le16_to_cpu(hci_iso_hdr(skb
)->dlen
);
667 pkt_type
= HCI_ISODATA_PKT
;
669 bt_dev_err(hdev
, "ISO packet is too short");
676 bt_dev_err(hdev
, "Invalid packet type received: 0x%4.4x",
682 if (skb
->len
< plen
) {
683 bt_dev_err(hdev
, "Received corrupted packet. type: 0x%2.2x",
689 bt_dev_dbg(hdev
, "pkt_type: 0x%2.2x len: %u", pkt_type
, plen
);
691 new_skb
= bt_skb_alloc(plen
, GFP_ATOMIC
);
693 bt_dev_err(hdev
, "Failed to allocate memory for skb of len: %u",
699 hci_skb_pkt_type(new_skb
) = pkt_type
;
700 skb_put_data(new_skb
, skb
->data
, plen
);
701 hdev
->stat
.byte_rx
+= plen
;
703 if (pcie_pkt_type
== BTINTEL_PCIE_HCI_EVT_PKT
)
704 ret
= btintel_pcie_recv_event(hdev
, new_skb
);
706 ret
= hci_recv_frame(hdev
, new_skb
);
712 spin_unlock(&data
->hci_rx_lock
);
717 static void btintel_pcie_rx_work(struct work_struct
*work
)
719 struct btintel_pcie_data
*data
= container_of(work
,
720 struct btintel_pcie_data
, rx_work
);
723 struct hci_dev
*hdev
= data
->hdev
;
725 /* Process the sk_buf in queue and send to the HCI layer */
726 while ((skb
= skb_dequeue(&data
->rx_skb_q
))) {
727 err
= btintel_pcie_recv_frame(data
, skb
);
729 bt_dev_err(hdev
, "Failed to send received frame: %d",
735 /* create sk_buff with data and save it to queue and start RX work */
736 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data
*data
, u8 status
,
740 struct rfh_hdr
*rfh_hdr
;
745 len
= rfh_hdr
->packet_len
;
751 /* Remove RFH header */
752 buf
+= sizeof(*rfh_hdr
);
754 skb
= alloc_skb(len
, GFP_ATOMIC
);
758 skb_put_data(skb
, buf
, len
);
759 skb_queue_tail(&data
->rx_skb_q
, skb
);
760 queue_work(data
->workqueue
, &data
->rx_work
);
763 ret
= btintel_pcie_submit_rx(data
);
768 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
769 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data
*data
)
774 struct data_buf
*buf
;
776 struct hci_dev
*hdev
= data
->hdev
;
778 cr_hia
= data
->ia
.cr_hia
[BTINTEL_PCIE_RXQ_NUM
];
779 cr_tia
= data
->ia
.cr_tia
[BTINTEL_PCIE_RXQ_NUM
];
781 bt_dev_dbg(hdev
, "RXQ: cr_hia: %u cr_tia: %u", cr_hia
, cr_tia
);
783 /* Check CR_TIA and CR_HIA for change */
784 if (cr_tia
== cr_hia
) {
785 bt_dev_warn(hdev
, "RXQ: no new CD found");
791 /* The firmware sends multiple CD in a single MSI-X and it needs to
792 * process all received CDs in this interrupt.
794 while (cr_tia
!= cr_hia
) {
795 urbd1
= &rxq
->urbd1s
[cr_tia
];
796 ipc_print_urbd1(data
->hdev
, urbd1
, cr_tia
);
798 buf
= &rxq
->bufs
[urbd1
->frbd_tag
];
800 bt_dev_err(hdev
, "RXQ: failed to get the DMA buffer for %d",
805 ret
= btintel_pcie_submit_rx_work(data
, urbd1
->status
,
808 bt_dev_err(hdev
, "RXQ: failed to submit rx request");
812 cr_tia
= (cr_tia
+ 1) % rxq
->count
;
813 data
->ia
.cr_tia
[BTINTEL_PCIE_RXQ_NUM
] = cr_tia
;
814 ipc_print_ia_ring(data
->hdev
, &data
->ia
, BTINTEL_PCIE_RXQ_NUM
);
818 static irqreturn_t
btintel_pcie_msix_isr(int irq
, void *data
)
820 return IRQ_WAKE_THREAD
;
823 static irqreturn_t
btintel_pcie_irq_msix_handler(int irq
, void *dev_id
)
825 struct msix_entry
*entry
= dev_id
;
826 struct btintel_pcie_data
*data
= btintel_pcie_get_data(entry
);
827 u32 intr_fh
, intr_hw
;
829 spin_lock(&data
->irq_lock
);
830 intr_fh
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES
);
831 intr_hw
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES
);
833 /* Clear causes registers to avoid being handling the same cause */
834 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES
, intr_fh
);
835 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES
, intr_hw
);
836 spin_unlock(&data
->irq_lock
);
838 if (unlikely(!(intr_fh
| intr_hw
))) {
839 /* Ignore interrupt, inta == 0 */
843 /* This interrupt is triggered by the firmware after updating
844 * boot_stage register and image_response register
846 if (intr_hw
& BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0
)
847 btintel_pcie_msix_gp0_handler(data
);
850 if (intr_fh
& BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0
)
851 btintel_pcie_msix_tx_handle(data
);
854 if (intr_fh
& BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1
)
855 btintel_pcie_msix_rx_handle(data
);
858 * Before sending the interrupt the HW disables it to prevent a nested
859 * interrupt. This is done by writing 1 to the corresponding bit in
860 * the mask register. After handling the interrupt, it should be
861 * re-enabled by clearing this bit. This register is defined as write 1
862 * clear (W1C) register, meaning that it's cleared by writing 1
865 btintel_pcie_wr_reg32(data
, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST
,
871 /* This function requests the irq for MSI-X and registers the handlers per irq.
872 * Currently, it requests only 1 irq for all interrupt causes.
874 static int btintel_pcie_setup_irq(struct btintel_pcie_data
*data
)
879 for (i
= 0; i
< BTINTEL_PCIE_MSIX_VEC_MAX
; i
++)
880 data
->msix_entries
[i
].entry
= i
;
882 num_irqs
= pci_alloc_irq_vectors(data
->pdev
, BTINTEL_PCIE_MSIX_VEC_MIN
,
883 BTINTEL_PCIE_MSIX_VEC_MAX
, PCI_IRQ_MSIX
);
887 data
->alloc_vecs
= num_irqs
;
888 data
->msix_enabled
= 1;
891 /* setup irq handler */
892 for (i
= 0; i
< data
->alloc_vecs
; i
++) {
893 struct msix_entry
*msix_entry
;
895 msix_entry
= &data
->msix_entries
[i
];
896 msix_entry
->vector
= pci_irq_vector(data
->pdev
, i
);
898 err
= devm_request_threaded_irq(&data
->pdev
->dev
,
900 btintel_pcie_msix_isr
,
901 btintel_pcie_irq_msix_handler
,
906 pci_free_irq_vectors(data
->pdev
);
907 data
->alloc_vecs
= 0;
914 struct btintel_pcie_causes_list
{
920 static struct btintel_pcie_causes_list causes_list
[] = {
921 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0
, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK
, 0x00 },
922 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1
, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK
, 0x01 },
923 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0
, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK
, 0x20 },
926 /* This function configures the interrupt masks for both HW_INT_CAUSES and
927 * FH_INT_CAUSES which are meaningful to us.
929 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
930 * need to call this function again to configure since the masks
931 * are reset to 0xFFFFFFFF after reset.
933 static void btintel_pcie_config_msix(struct btintel_pcie_data
*data
)
936 int val
= data
->def_irq
| BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE
;
938 /* Set Non Auto Clear Cause */
939 for (i
= 0; i
< ARRAY_SIZE(causes_list
); i
++) {
940 btintel_pcie_wr_reg8(data
,
941 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list
[i
].cause_num
),
943 btintel_pcie_clr_reg_bits(data
,
944 causes_list
[i
].mask_reg
,
945 causes_list
[i
].cause
);
948 /* Save the initial interrupt mask */
949 data
->fh_init_mask
= ~btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK
);
950 data
->hw_init_mask
= ~btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK
);
953 static int btintel_pcie_config_pcie(struct pci_dev
*pdev
,
954 struct btintel_pcie_data
*data
)
958 err
= pcim_enable_device(pdev
);
962 pci_set_master(pdev
);
964 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
966 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
971 data
->base_addr
= pcim_iomap_region(pdev
, 0, KBUILD_MODNAME
);
972 if (IS_ERR(data
->base_addr
))
973 return PTR_ERR(data
->base_addr
);
975 err
= btintel_pcie_setup_irq(data
);
979 /* Configure MSI-X with causes list */
980 btintel_pcie_config_msix(data
);
985 static void btintel_pcie_init_ci(struct btintel_pcie_data
*data
,
989 ci
->size
= sizeof(*ci
);
991 ci
->addr_cr_hia
= data
->ia
.cr_hia_p_addr
;
992 ci
->addr_tr_tia
= data
->ia
.tr_tia_p_addr
;
993 ci
->addr_cr_tia
= data
->ia
.cr_tia_p_addr
;
994 ci
->addr_tr_hia
= data
->ia
.tr_hia_p_addr
;
995 ci
->num_cr_ia
= BTINTEL_PCIE_NUM_QUEUES
;
996 ci
->num_tr_ia
= BTINTEL_PCIE_NUM_QUEUES
;
997 ci
->addr_urbdq0
= data
->txq
.urbd0s_p_addr
;
998 ci
->addr_tfdq
= data
->txq
.tfds_p_addr
;
999 ci
->num_tfdq
= data
->txq
.count
;
1000 ci
->num_urbdq0
= data
->txq
.count
;
1001 ci
->tfdq_db_vec
= BTINTEL_PCIE_TXQ_NUM
;
1002 ci
->urbdq0_db_vec
= BTINTEL_PCIE_TXQ_NUM
;
1003 ci
->rbd_size
= BTINTEL_PCIE_RBD_SIZE_4K
;
1004 ci
->addr_frbdq
= data
->rxq
.frbds_p_addr
;
1005 ci
->num_frbdq
= data
->rxq
.count
;
1006 ci
->frbdq_db_vec
= BTINTEL_PCIE_RXQ_NUM
;
1007 ci
->addr_urbdq1
= data
->rxq
.urbd1s_p_addr
;
1008 ci
->num_urbdq1
= data
->rxq
.count
;
1009 ci
->urbdq_db_vec
= BTINTEL_PCIE_RXQ_NUM
;
1012 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data
*data
,
1015 /* Free data buffers first */
1016 dma_free_coherent(&data
->pdev
->dev
, txq
->count
* BTINTEL_PCIE_BUFFER_SIZE
,
1017 txq
->buf_v_addr
, txq
->buf_p_addr
);
1021 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data
*data
,
1025 struct data_buf
*buf
;
1027 /* Allocate the same number of buffers as the descriptor */
1028 txq
->bufs
= kmalloc_array(txq
->count
, sizeof(*buf
), GFP_KERNEL
);
1032 /* Allocate full chunk of data buffer for DMA first and do indexing and
1033 * initialization next, so it can be freed easily
1035 txq
->buf_v_addr
= dma_alloc_coherent(&data
->pdev
->dev
,
1036 txq
->count
* BTINTEL_PCIE_BUFFER_SIZE
,
1038 GFP_KERNEL
| __GFP_NOWARN
);
1039 if (!txq
->buf_v_addr
) {
1044 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1045 * have virtual address and physical address
1047 for (i
= 0; i
< txq
->count
; i
++) {
1048 buf
= &txq
->bufs
[i
];
1049 buf
->data_p_addr
= txq
->buf_p_addr
+ (i
* BTINTEL_PCIE_BUFFER_SIZE
);
1050 buf
->data
= txq
->buf_v_addr
+ (i
* BTINTEL_PCIE_BUFFER_SIZE
);
1056 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data
*data
,
1059 /* Free data buffers first */
1060 dma_free_coherent(&data
->pdev
->dev
, rxq
->count
* BTINTEL_PCIE_BUFFER_SIZE
,
1061 rxq
->buf_v_addr
, rxq
->buf_p_addr
);
1065 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data
*data
,
1069 struct data_buf
*buf
;
1071 /* Allocate the same number of buffers as the descriptor */
1072 rxq
->bufs
= kmalloc_array(rxq
->count
, sizeof(*buf
), GFP_KERNEL
);
1076 /* Allocate full chunk of data buffer for DMA first and do indexing and
1077 * initialization next, so it can be freed easily
1079 rxq
->buf_v_addr
= dma_alloc_coherent(&data
->pdev
->dev
,
1080 rxq
->count
* BTINTEL_PCIE_BUFFER_SIZE
,
1082 GFP_KERNEL
| __GFP_NOWARN
);
1083 if (!rxq
->buf_v_addr
) {
1088 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1089 * have virtual address and physical address
1091 for (i
= 0; i
< rxq
->count
; i
++) {
1092 buf
= &rxq
->bufs
[i
];
1093 buf
->data_p_addr
= rxq
->buf_p_addr
+ (i
* BTINTEL_PCIE_BUFFER_SIZE
);
1094 buf
->data
= rxq
->buf_v_addr
+ (i
* BTINTEL_PCIE_BUFFER_SIZE
);
1100 static void btintel_pcie_setup_ia(struct btintel_pcie_data
*data
,
1101 dma_addr_t p_addr
, void *v_addr
,
1104 /* TR Head Index Array */
1105 ia
->tr_hia_p_addr
= p_addr
;
1106 ia
->tr_hia
= v_addr
;
1108 /* TR Tail Index Array */
1109 ia
->tr_tia_p_addr
= p_addr
+ sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
;
1110 ia
->tr_tia
= v_addr
+ sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
;
1112 /* CR Head index Array */
1113 ia
->cr_hia_p_addr
= p_addr
+ (sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 2);
1114 ia
->cr_hia
= v_addr
+ (sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 2);
1116 /* CR Tail Index Array */
1117 ia
->cr_tia_p_addr
= p_addr
+ (sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 3);
1118 ia
->cr_tia
= v_addr
+ (sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 3);
1121 static void btintel_pcie_free(struct btintel_pcie_data
*data
)
1123 btintel_pcie_free_rxq_bufs(data
, &data
->rxq
);
1124 btintel_pcie_free_txq_bufs(data
, &data
->txq
);
1126 dma_pool_free(data
->dma_pool
, data
->dma_v_addr
, data
->dma_p_addr
);
1127 dma_pool_destroy(data
->dma_pool
);
1130 /* Allocate tx and rx queues, any related data structures and buffers.
1132 static int btintel_pcie_alloc(struct btintel_pcie_data
*data
)
1139 /* Allocate the chunk of DMA memory for descriptors, index array, and
1140 * context information, instead of allocating individually.
1141 * The DMA memory for data buffer is allocated while setting up the
1144 * Total size is sum of the following
1145 * + size of TFD * Number of descriptors in queue
1146 * + size of URBD0 * Number of descriptors in queue
1147 * + size of FRBD * Number of descriptors in queue
1148 * + size of URBD1 * Number of descriptors in queue
1149 * + size of index * Number of queues(2) * type of index array(4)
1150 * + size of context information
1152 total
= (sizeof(struct tfd
) + sizeof(struct urbd0
) + sizeof(struct frbd
)
1153 + sizeof(struct urbd1
)) * BTINTEL_DESCS_COUNT
;
1155 /* Add the sum of size of index array and size of ci struct */
1156 total
+= (sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 4) + sizeof(struct ctx_info
);
1158 /* Allocate DMA Pool */
1159 data
->dma_pool
= dma_pool_create(KBUILD_MODNAME
, &data
->pdev
->dev
,
1160 total
, BTINTEL_PCIE_DMA_POOL_ALIGNMENT
, 0);
1161 if (!data
->dma_pool
) {
1166 v_addr
= dma_pool_zalloc(data
->dma_pool
, GFP_KERNEL
| __GFP_NOWARN
,
1169 dma_pool_destroy(data
->dma_pool
);
1174 data
->dma_p_addr
= p_addr
;
1175 data
->dma_v_addr
= v_addr
;
1177 /* Setup descriptor count */
1178 data
->txq
.count
= BTINTEL_DESCS_COUNT
;
1179 data
->rxq
.count
= BTINTEL_DESCS_COUNT
;
1182 data
->txq
.tfds_p_addr
= p_addr
;
1183 data
->txq
.tfds
= v_addr
;
1185 p_addr
+= (sizeof(struct tfd
) * BTINTEL_DESCS_COUNT
);
1186 v_addr
+= (sizeof(struct tfd
) * BTINTEL_DESCS_COUNT
);
1189 data
->txq
.urbd0s_p_addr
= p_addr
;
1190 data
->txq
.urbd0s
= v_addr
;
1192 p_addr
+= (sizeof(struct urbd0
) * BTINTEL_DESCS_COUNT
);
1193 v_addr
+= (sizeof(struct urbd0
) * BTINTEL_DESCS_COUNT
);
1196 data
->rxq
.frbds_p_addr
= p_addr
;
1197 data
->rxq
.frbds
= v_addr
;
1199 p_addr
+= (sizeof(struct frbd
) * BTINTEL_DESCS_COUNT
);
1200 v_addr
+= (sizeof(struct frbd
) * BTINTEL_DESCS_COUNT
);
1203 data
->rxq
.urbd1s_p_addr
= p_addr
;
1204 data
->rxq
.urbd1s
= v_addr
;
1206 p_addr
+= (sizeof(struct urbd1
) * BTINTEL_DESCS_COUNT
);
1207 v_addr
+= (sizeof(struct urbd1
) * BTINTEL_DESCS_COUNT
);
1209 /* Setup data buffers for txq */
1210 err
= btintel_pcie_setup_txq_bufs(data
, &data
->txq
);
1212 goto exit_error_pool
;
1214 /* Setup data buffers for rxq */
1215 err
= btintel_pcie_setup_rxq_bufs(data
, &data
->rxq
);
1217 goto exit_error_txq
;
1219 /* Setup Index Array */
1220 btintel_pcie_setup_ia(data
, p_addr
, v_addr
, &data
->ia
);
1222 /* Setup Context Information */
1223 p_addr
+= sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 4;
1224 v_addr
+= sizeof(u16
) * BTINTEL_PCIE_NUM_QUEUES
* 4;
1227 data
->ci_p_addr
= p_addr
;
1229 /* Initialize the CI */
1230 btintel_pcie_init_ci(data
, data
->ci
);
1235 btintel_pcie_free_txq_bufs(data
, &data
->txq
);
1237 dma_pool_free(data
->dma_pool
, data
->dma_v_addr
, data
->dma_p_addr
);
1238 dma_pool_destroy(data
->dma_pool
);
1243 static int btintel_pcie_open(struct hci_dev
*hdev
)
1245 bt_dev_dbg(hdev
, "");
1250 static int btintel_pcie_close(struct hci_dev
*hdev
)
1252 bt_dev_dbg(hdev
, "");
1257 static int btintel_pcie_inject_cmd_complete(struct hci_dev
*hdev
, __u16 opcode
)
1259 struct sk_buff
*skb
;
1260 struct hci_event_hdr
*hdr
;
1261 struct hci_ev_cmd_complete
*evt
;
1263 skb
= bt_skb_alloc(sizeof(*hdr
) + sizeof(*evt
) + 1, GFP_KERNEL
);
1267 hdr
= (struct hci_event_hdr
*)skb_put(skb
, sizeof(*hdr
));
1268 hdr
->evt
= HCI_EV_CMD_COMPLETE
;
1269 hdr
->plen
= sizeof(*evt
) + 1;
1271 evt
= (struct hci_ev_cmd_complete
*)skb_put(skb
, sizeof(*evt
));
1273 evt
->opcode
= cpu_to_le16(opcode
);
1275 *(u8
*)skb_put(skb
, 1) = 0x00;
1277 hci_skb_pkt_type(skb
) = HCI_EVENT_PKT
;
1279 return hci_recv_frame(hdev
, skb
);
1282 static int btintel_pcie_send_frame(struct hci_dev
*hdev
,
1283 struct sk_buff
*skb
)
1285 struct btintel_pcie_data
*data
= hci_get_drvdata(hdev
);
1286 struct hci_command_hdr
*cmd
;
1292 /* Due to the fw limitation, the type header of the packet should be
1293 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1294 * the first byte to get the packet type and redirect the rest of data
1295 * packet to the right handler.
1297 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1298 * from DMA memory and by the time it reads the first 4 bytes, it has
1299 * already consumed some part of packet. Thus the packet type indicator
1300 * for iBT PCIe is 4 bytes.
1302 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1303 * head room for profile and driver use, and before sending the data
1304 * to the device, append the iBT PCIe packet type in the front.
1306 switch (hci_skb_pkt_type(skb
)) {
1307 case HCI_COMMAND_PKT
:
1308 type
= BTINTEL_PCIE_HCI_CMD_PKT
;
1309 cmd
= (void *)skb
->data
;
1310 opcode
= le16_to_cpu(cmd
->opcode
);
1311 if (btintel_test_flag(hdev
, INTEL_BOOTLOADER
)) {
1312 struct hci_command_hdr
*cmd
= (void *)skb
->data
;
1313 __u16 opcode
= le16_to_cpu(cmd
->opcode
);
1315 /* When the 0xfc01 command is issued to boot into
1316 * the operational firmware, it will actually not
1317 * send a command complete event. To keep the flow
1318 * control working inject that event here.
1320 if (opcode
== 0xfc01)
1321 btintel_pcie_inject_cmd_complete(hdev
, opcode
);
1323 hdev
->stat
.cmd_tx
++;
1325 case HCI_ACLDATA_PKT
:
1326 type
= BTINTEL_PCIE_HCI_ACL_PKT
;
1327 hdev
->stat
.acl_tx
++;
1329 case HCI_SCODATA_PKT
:
1330 type
= BTINTEL_PCIE_HCI_SCO_PKT
;
1331 hdev
->stat
.sco_tx
++;
1333 case HCI_ISODATA_PKT
:
1334 type
= BTINTEL_PCIE_HCI_ISO_PKT
;
1337 bt_dev_err(hdev
, "Unknown HCI packet type");
1340 memcpy(skb_push(skb
, BTINTEL_PCIE_HCI_TYPE_LEN
), &type
,
1341 BTINTEL_PCIE_HCI_TYPE_LEN
);
1343 ret
= btintel_pcie_send_sync(data
, skb
);
1345 hdev
->stat
.err_tx
++;
1346 bt_dev_err(hdev
, "Failed to send frame (%d)", ret
);
1350 if (type
== BTINTEL_PCIE_HCI_CMD_PKT
&&
1351 (opcode
== HCI_OP_RESET
|| opcode
== 0xfc01)) {
1352 old_ctxt
= data
->alive_intr_ctxt
;
1353 data
->alive_intr_ctxt
=
1354 (opcode
== 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1
:
1355 BTINTEL_PCIE_HCI_RESET
);
1356 bt_dev_dbg(data
->hdev
, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
1357 opcode
, btintel_pcie_alivectxt_state2str(old_ctxt
),
1358 btintel_pcie_alivectxt_state2str(data
->alive_intr_ctxt
));
1359 if (opcode
== HCI_OP_RESET
) {
1360 data
->gp0_received
= false;
1361 ret
= wait_event_timeout(data
->gp0_wait_q
,
1363 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS
));
1365 hdev
->stat
.err_tx
++;
1366 bt_dev_err(hdev
, "No alive interrupt received for %s",
1367 btintel_pcie_alivectxt_state2str(data
->alive_intr_ctxt
));
1373 hdev
->stat
.byte_tx
+= skb
->len
;
1380 static void btintel_pcie_release_hdev(struct btintel_pcie_data
*data
)
1382 struct hci_dev
*hdev
;
1385 hci_unregister_dev(hdev
);
1390 static int btintel_pcie_setup_internal(struct hci_dev
*hdev
)
1392 const u8 param
[1] = { 0xFF };
1393 struct intel_version_tlv ver_tlv
;
1394 struct sk_buff
*skb
;
1397 BT_DBG("%s", hdev
->name
);
1399 skb
= __hci_cmd_sync(hdev
, 0xfc05, 1, param
, HCI_CMD_TIMEOUT
);
1401 bt_dev_err(hdev
, "Reading Intel version command failed (%ld)",
1403 return PTR_ERR(skb
);
1406 /* Check the status */
1408 bt_dev_err(hdev
, "Intel Read Version command failed (%02x)",
1414 /* Apply the common HCI quirks for Intel device */
1415 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
);
1416 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY
, &hdev
->quirks
);
1417 set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG
, &hdev
->quirks
);
1419 /* Set up the quality report callback for Intel devices */
1420 hdev
->set_quality_report
= btintel_set_quality_report
;
1422 memset(&ver_tlv
, 0, sizeof(ver_tlv
));
1423 /* For TLV type device, parse the tlv data */
1424 err
= btintel_parse_version_tlv(hdev
, &ver_tlv
, skb
);
1426 bt_dev_err(hdev
, "Failed to parse TLV version information");
1430 switch (INTEL_HW_PLATFORM(ver_tlv
.cnvi_bt
)) {
1434 bt_dev_err(hdev
, "Unsupported Intel hardware platform (0x%2x)",
1435 INTEL_HW_PLATFORM(ver_tlv
.cnvi_bt
));
1440 /* Check for supported iBT hardware variants of this firmware
1443 * This check has been put in place to ensure correct forward
1444 * compatibility options when newer hardware variants come
1447 switch (INTEL_HW_VARIANT(ver_tlv
.cnvi_bt
)) {
1448 case 0x1e: /* BzrI */
1449 /* Display version information of TLV type */
1450 btintel_version_info_tlv(hdev
, &ver_tlv
);
1452 /* Apply the device specific HCI quirks for TLV based devices
1454 * All TLV based devices support WBS
1456 set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED
, &hdev
->quirks
);
1458 /* Setup MSFT Extension support */
1459 btintel_set_msft_opcode(hdev
,
1460 INTEL_HW_VARIANT(ver_tlv
.cnvi_bt
));
1462 err
= btintel_bootloader_setup_tlv(hdev
, &ver_tlv
);
1467 bt_dev_err(hdev
, "Unsupported Intel hw variant (%u)",
1468 INTEL_HW_VARIANT(ver_tlv
.cnvi_bt
));
1474 btintel_print_fseq_info(hdev
);
1481 static int btintel_pcie_setup(struct hci_dev
*hdev
)
1483 int err
, fw_dl_retry
= 0;
1484 struct btintel_pcie_data
*data
= hci_get_drvdata(hdev
);
1486 while ((err
= btintel_pcie_setup_internal(hdev
)) && fw_dl_retry
++ < 1) {
1487 bt_dev_err(hdev
, "Firmware download retry count: %d",
1489 err
= btintel_pcie_reset_bt(data
);
1491 bt_dev_err(hdev
, "Failed to do shr reset: %d", err
);
1494 usleep_range(10000, 12000);
1495 btintel_pcie_reset_ia(data
);
1496 btintel_pcie_config_msix(data
);
1497 err
= btintel_pcie_enable_bt(data
);
1499 bt_dev_err(hdev
, "Failed to enable hardware: %d", err
);
1502 btintel_pcie_start_rx(data
);
1507 static int btintel_pcie_setup_hdev(struct btintel_pcie_data
*data
)
1510 struct hci_dev
*hdev
;
1512 hdev
= hci_alloc_dev_priv(sizeof(struct btintel_data
));
1516 hdev
->bus
= HCI_PCI
;
1517 hci_set_drvdata(hdev
, data
);
1520 SET_HCIDEV_DEV(hdev
, &data
->pdev
->dev
);
1522 hdev
->manufacturer
= 2;
1523 hdev
->open
= btintel_pcie_open
;
1524 hdev
->close
= btintel_pcie_close
;
1525 hdev
->send
= btintel_pcie_send_frame
;
1526 hdev
->setup
= btintel_pcie_setup
;
1527 hdev
->shutdown
= btintel_shutdown_combined
;
1528 hdev
->hw_error
= btintel_hw_error
;
1529 hdev
->set_diag
= btintel_set_diag
;
1530 hdev
->set_bdaddr
= btintel_set_bdaddr
;
1532 err
= hci_register_dev(hdev
);
1534 BT_ERR("Failed to register to hdev (%d)", err
);
1545 static int btintel_pcie_probe(struct pci_dev
*pdev
,
1546 const struct pci_device_id
*ent
)
1549 struct btintel_pcie_data
*data
;
1554 data
= devm_kzalloc(&pdev
->dev
, sizeof(*data
), GFP_KERNEL
);
1560 spin_lock_init(&data
->irq_lock
);
1561 spin_lock_init(&data
->hci_rx_lock
);
1563 init_waitqueue_head(&data
->gp0_wait_q
);
1564 data
->gp0_received
= false;
1566 init_waitqueue_head(&data
->tx_wait_q
);
1567 data
->tx_wait_done
= false;
1569 data
->workqueue
= alloc_ordered_workqueue(KBUILD_MODNAME
, WQ_HIGHPRI
);
1570 if (!data
->workqueue
)
1573 skb_queue_head_init(&data
->rx_skb_q
);
1574 INIT_WORK(&data
->rx_work
, btintel_pcie_rx_work
);
1576 data
->boot_stage_cache
= 0x00;
1577 data
->img_resp_cache
= 0x00;
1579 err
= btintel_pcie_config_pcie(pdev
, data
);
1583 pci_set_drvdata(pdev
, data
);
1585 err
= btintel_pcie_alloc(data
);
1589 err
= btintel_pcie_enable_bt(data
);
1593 /* CNV information (CNVi and CNVr) is in CSR */
1594 data
->cnvi
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_HW_REV_REG
);
1596 data
->cnvr
= btintel_pcie_rd_reg32(data
, BTINTEL_PCIE_CSR_RF_ID_REG
);
1598 err
= btintel_pcie_start_rx(data
);
1602 err
= btintel_pcie_setup_hdev(data
);
1606 bt_dev_dbg(data
->hdev
, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data
->cnvi
,
1611 /* reset device before exit */
1612 btintel_pcie_reset_bt(data
);
1614 pci_clear_master(pdev
);
1616 pci_set_drvdata(pdev
, NULL
);
1621 static void btintel_pcie_remove(struct pci_dev
*pdev
)
1623 struct btintel_pcie_data
*data
;
1625 data
= pci_get_drvdata(pdev
);
1627 btintel_pcie_reset_bt(data
);
1628 for (int i
= 0; i
< data
->alloc_vecs
; i
++) {
1629 struct msix_entry
*msix_entry
;
1631 msix_entry
= &data
->msix_entries
[i
];
1632 free_irq(msix_entry
->vector
, msix_entry
);
1635 pci_free_irq_vectors(pdev
);
1637 btintel_pcie_release_hdev(data
);
1639 flush_work(&data
->rx_work
);
1641 destroy_workqueue(data
->workqueue
);
1643 btintel_pcie_free(data
);
1645 pci_clear_master(pdev
);
1647 pci_set_drvdata(pdev
, NULL
);
1650 static struct pci_driver btintel_pcie_driver
= {
1651 .name
= KBUILD_MODNAME
,
1652 .id_table
= btintel_pcie_table
,
1653 .probe
= btintel_pcie_probe
,
1654 .remove
= btintel_pcie_remove
,
1656 module_pci_driver(btintel_pcie_driver
);
1658 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
1659 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION
);
1660 MODULE_VERSION(VERSION
);
1661 MODULE_LICENSE("GPL");