2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode
{
37 ATH10K_PCI_IRQ_AUTO
= 0,
38 ATH10K_PCI_IRQ_LEGACY
= 1,
39 ATH10K_PCI_IRQ_MSI
= 2,
42 static unsigned int ath10k_target_ps
;
43 static unsigned int ath10k_pci_irq_mode
= ATH10K_PCI_IRQ_AUTO
;
45 module_param(ath10k_target_ps
, uint
, 0644);
46 MODULE_PARM_DESC(ath10k_target_ps
, "Enable ath10k Target (SoC) PS option");
48 module_param_named(irq_mode
, ath10k_pci_irq_mode
, uint
, 0644);
49 MODULE_PARM_DESC(irq_mode
, "0: auto, 1: legacy, 2: msi (default: 0)");
51 #define QCA988X_2_0_DEVICE_ID (0x003c)
53 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table
) = {
54 { PCI_VDEVICE(ATHEROS
, QCA988X_2_0_DEVICE_ID
) }, /* PCI-E QCA988X V2 */
58 static int ath10k_pci_diag_read_access(struct ath10k
*ar
, u32 address
,
61 static void ath10k_pci_process_ce(struct ath10k
*ar
);
62 static int ath10k_pci_post_rx(struct ath10k
*ar
);
63 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe
*pipe_info
,
65 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe
*pipe_info
);
66 static void ath10k_pci_stop_ce(struct ath10k
*ar
);
67 static int ath10k_pci_device_reset(struct ath10k
*ar
);
68 static int ath10k_pci_wait_for_target_init(struct ath10k
*ar
);
69 static int ath10k_pci_init_irq(struct ath10k
*ar
);
70 static int ath10k_pci_deinit_irq(struct ath10k
*ar
);
71 static int ath10k_pci_request_irq(struct ath10k
*ar
);
72 static void ath10k_pci_free_irq(struct ath10k
*ar
);
73 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe
*tx_pipe
,
74 struct ath10k_ce_pipe
*rx_pipe
,
75 struct bmi_xfer
*xfer
);
76 static void ath10k_pci_cleanup_ce(struct ath10k
*ar
);
78 static const struct ce_attr host_ce_config_wlan
[] = {
79 /* CE0: host->target HTC control and raw streams */
81 .flags
= CE_ATTR_FLAGS
,
87 /* CE1: target->host HTT + HTC control */
89 .flags
= CE_ATTR_FLAGS
,
95 /* CE2: target->host WMI */
97 .flags
= CE_ATTR_FLAGS
,
103 /* CE3: host->target WMI */
105 .flags
= CE_ATTR_FLAGS
,
111 /* CE4: host->target HTT */
113 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
114 .src_nentries
= CE_HTT_H2T_MSG_SRC_NENTRIES
,
121 .flags
= CE_ATTR_FLAGS
,
127 /* CE6: target autonomous hif_memcpy */
129 .flags
= CE_ATTR_FLAGS
,
135 /* CE7: ce_diag, the Diagnostic Window */
137 .flags
= CE_ATTR_FLAGS
,
139 .src_sz_max
= DIAG_TRANSFER_LIMIT
,
144 /* Target firmware's Copy Engine configuration. */
145 static const struct ce_pipe_config target_ce_config_wlan
[] = {
146 /* CE0: host->target HTC control and raw streams */
149 .pipedir
= PIPEDIR_OUT
,
152 .flags
= CE_ATTR_FLAGS
,
156 /* CE1: target->host HTT + HTC control */
159 .pipedir
= PIPEDIR_IN
,
162 .flags
= CE_ATTR_FLAGS
,
166 /* CE2: target->host WMI */
169 .pipedir
= PIPEDIR_IN
,
172 .flags
= CE_ATTR_FLAGS
,
176 /* CE3: host->target WMI */
179 .pipedir
= PIPEDIR_OUT
,
182 .flags
= CE_ATTR_FLAGS
,
186 /* CE4: host->target HTT */
189 .pipedir
= PIPEDIR_OUT
,
192 .flags
= CE_ATTR_FLAGS
,
196 /* NB: 50% of src nentries, since tx has 2 frags */
201 .pipedir
= PIPEDIR_OUT
,
204 .flags
= CE_ATTR_FLAGS
,
208 /* CE6: Reserved for target autonomous hif_memcpy */
211 .pipedir
= PIPEDIR_INOUT
,
214 .flags
= CE_ATTR_FLAGS
,
218 /* CE7 used only by Host */
221 static bool ath10k_pci_irq_pending(struct ath10k
*ar
)
225 /* Check if the shared legacy irq is for us */
226 cause
= ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
227 PCIE_INTR_CAUSE_ADDRESS
);
228 if (cause
& (PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
))
234 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k
*ar
)
236 /* IMPORTANT: INTR_CLR register has to be set after
237 * INTR_ENABLE is set to 0, otherwise interrupt can not be
239 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_ENABLE_ADDRESS
,
241 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_CLR_ADDRESS
,
242 PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
);
244 /* IMPORTANT: this extra read transaction is required to
245 * flush the posted write buffer. */
246 (void) ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
247 PCIE_INTR_ENABLE_ADDRESS
);
250 static void ath10k_pci_enable_legacy_irq(struct ath10k
*ar
)
252 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+
253 PCIE_INTR_ENABLE_ADDRESS
,
254 PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
);
256 /* IMPORTANT: this extra read transaction is required to
257 * flush the posted write buffer. */
258 (void) ath10k_pci_read32(ar
, SOC_CORE_BASE_ADDRESS
+
259 PCIE_INTR_ENABLE_ADDRESS
);
262 static irqreturn_t
ath10k_pci_early_irq_handler(int irq
, void *arg
)
264 struct ath10k
*ar
= arg
;
265 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
267 if (ar_pci
->num_msi_intrs
== 0) {
268 if (!ath10k_pci_irq_pending(ar
))
271 ath10k_pci_disable_and_clear_legacy_irq(ar
);
274 tasklet_schedule(&ar_pci
->early_irq_tasklet
);
279 static int ath10k_pci_request_early_irq(struct ath10k
*ar
)
281 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
284 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
285 * interrupt from irq vector is triggered in all cases for FW
286 * indication/errors */
287 ret
= request_irq(ar_pci
->pdev
->irq
, ath10k_pci_early_irq_handler
,
288 IRQF_SHARED
, "ath10k_pci (early)", ar
);
290 ath10k_warn("failed to request early irq: %d\n", ret
);
297 static void ath10k_pci_free_early_irq(struct ath10k
*ar
)
299 free_irq(ath10k_pci_priv(ar
)->pdev
->irq
, ar
);
303 * Diagnostic read/write access is provided for startup/config/debug usage.
304 * Caller must guarantee proper alignment, when applicable, and single user
307 static int ath10k_pci_diag_read_mem(struct ath10k
*ar
, u32 address
, void *data
,
310 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
313 unsigned int completed_nbytes
, orig_nbytes
, remaining_bytes
;
316 struct ath10k_ce_pipe
*ce_diag
;
317 /* Host buffer address in CE space */
319 dma_addr_t ce_data_base
= 0;
320 void *data_buf
= NULL
;
324 * This code cannot handle reads to non-memory space. Redirect to the
325 * register read fn but preserve the multi word read capability of
328 if (address
< DRAM_BASE_ADDRESS
) {
329 if (!IS_ALIGNED(address
, 4) ||
330 !IS_ALIGNED((unsigned long)data
, 4))
333 while ((nbytes
>= 4) && ((ret
= ath10k_pci_diag_read_access(
334 ar
, address
, (u32
*)data
)) == 0)) {
335 nbytes
-= sizeof(u32
);
336 address
+= sizeof(u32
);
342 ce_diag
= ar_pci
->ce_diag
;
345 * Allocate a temporary bounce buffer to hold caller's data
346 * to be DMA'ed from Target. This guarantees
347 * 1) 4-byte alignment
348 * 2) Buffer in DMA-able space
350 orig_nbytes
= nbytes
;
351 data_buf
= (unsigned char *)pci_alloc_consistent(ar_pci
->pdev
,
359 memset(data_buf
, 0, orig_nbytes
);
361 remaining_bytes
= orig_nbytes
;
362 ce_data
= ce_data_base
;
363 while (remaining_bytes
) {
364 nbytes
= min_t(unsigned int, remaining_bytes
,
365 DIAG_TRANSFER_LIMIT
);
367 ret
= ath10k_ce_recv_buf_enqueue(ce_diag
, NULL
, ce_data
);
371 /* Request CE to send from Target(!) address to Host buffer */
373 * The address supplied by the caller is in the
374 * Target CPU virtual address space.
376 * In order to use this address with the diagnostic CE,
377 * convert it from Target CPU virtual address space
378 * to CE address space
381 address
= TARG_CPU_SPACE_TO_CE_SPACE(ar
, ar_pci
->mem
,
383 ath10k_pci_sleep(ar
);
385 ret
= ath10k_ce_send(ce_diag
, NULL
, (u32
)address
, nbytes
, 0,
391 while (ath10k_ce_completed_send_next(ce_diag
, NULL
, &buf
,
395 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
401 if (nbytes
!= completed_nbytes
) {
406 if (buf
!= (u32
) address
) {
412 while (ath10k_ce_completed_recv_next(ce_diag
, NULL
, &buf
,
417 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
423 if (nbytes
!= completed_nbytes
) {
428 if (buf
!= ce_data
) {
433 remaining_bytes
-= nbytes
;
440 /* Copy data from allocated DMA buf to caller's buf */
441 WARN_ON_ONCE(orig_nbytes
& 3);
442 for (i
= 0; i
< orig_nbytes
/ sizeof(__le32
); i
++) {
444 __le32_to_cpu(((__le32
*)data_buf
)[i
]);
447 ath10k_dbg(ATH10K_DBG_PCI
, "%s failure (0x%x)\n",
451 pci_free_consistent(ar_pci
->pdev
, orig_nbytes
,
452 data_buf
, ce_data_base
);
457 /* Read 4-byte aligned data from Target memory or register */
458 static int ath10k_pci_diag_read_access(struct ath10k
*ar
, u32 address
,
461 /* Assume range doesn't cross this boundary */
462 if (address
>= DRAM_BASE_ADDRESS
)
463 return ath10k_pci_diag_read_mem(ar
, address
, data
, sizeof(u32
));
466 *data
= ath10k_pci_read32(ar
, address
);
467 ath10k_pci_sleep(ar
);
471 static int ath10k_pci_diag_write_mem(struct ath10k
*ar
, u32 address
,
472 const void *data
, int nbytes
)
474 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
477 unsigned int completed_nbytes
, orig_nbytes
, remaining_bytes
;
480 struct ath10k_ce_pipe
*ce_diag
;
481 void *data_buf
= NULL
;
482 u32 ce_data
; /* Host buffer address in CE space */
483 dma_addr_t ce_data_base
= 0;
486 ce_diag
= ar_pci
->ce_diag
;
489 * Allocate a temporary bounce buffer to hold caller's data
490 * to be DMA'ed to Target. This guarantees
491 * 1) 4-byte alignment
492 * 2) Buffer in DMA-able space
494 orig_nbytes
= nbytes
;
495 data_buf
= (unsigned char *)pci_alloc_consistent(ar_pci
->pdev
,
503 /* Copy caller's data to allocated DMA buf */
504 WARN_ON_ONCE(orig_nbytes
& 3);
505 for (i
= 0; i
< orig_nbytes
/ sizeof(__le32
); i
++)
506 ((__le32
*)data_buf
)[i
] = __cpu_to_le32(((u32
*)data
)[i
]);
509 * The address supplied by the caller is in the
510 * Target CPU virtual address space.
512 * In order to use this address with the diagnostic CE,
514 * Target CPU virtual address space
519 address
= TARG_CPU_SPACE_TO_CE_SPACE(ar
, ar_pci
->mem
, address
);
520 ath10k_pci_sleep(ar
);
522 remaining_bytes
= orig_nbytes
;
523 ce_data
= ce_data_base
;
524 while (remaining_bytes
) {
525 /* FIXME: check cast */
526 nbytes
= min_t(int, remaining_bytes
, DIAG_TRANSFER_LIMIT
);
528 /* Set up to receive directly into Target(!) address */
529 ret
= ath10k_ce_recv_buf_enqueue(ce_diag
, NULL
, address
);
534 * Request CE to send caller-supplied data that
535 * was copied to bounce buffer to Target(!) address.
537 ret
= ath10k_ce_send(ce_diag
, NULL
, (u32
) ce_data
,
543 while (ath10k_ce_completed_send_next(ce_diag
, NULL
, &buf
,
548 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
554 if (nbytes
!= completed_nbytes
) {
559 if (buf
!= ce_data
) {
565 while (ath10k_ce_completed_recv_next(ce_diag
, NULL
, &buf
,
570 if (i
++ > DIAG_ACCESS_CE_TIMEOUT_MS
) {
576 if (nbytes
!= completed_nbytes
) {
581 if (buf
!= address
) {
586 remaining_bytes
-= nbytes
;
593 pci_free_consistent(ar_pci
->pdev
, orig_nbytes
, data_buf
,
598 ath10k_dbg(ATH10K_DBG_PCI
, "%s failure (0x%x)\n", __func__
,
604 /* Write 4B data to Target memory or register */
605 static int ath10k_pci_diag_write_access(struct ath10k
*ar
, u32 address
,
608 /* Assume range doesn't cross this boundary */
609 if (address
>= DRAM_BASE_ADDRESS
)
610 return ath10k_pci_diag_write_mem(ar
, address
, &data
,
614 ath10k_pci_write32(ar
, address
, data
);
615 ath10k_pci_sleep(ar
);
619 static bool ath10k_pci_target_is_awake(struct ath10k
*ar
)
621 void __iomem
*mem
= ath10k_pci_priv(ar
)->mem
;
623 val
= ioread32(mem
+ PCIE_LOCAL_BASE_ADDRESS
+
625 return (RTC_STATE_V_GET(val
) == RTC_STATE_V_ON
);
628 int ath10k_do_pci_wake(struct ath10k
*ar
)
630 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
631 void __iomem
*pci_addr
= ar_pci
->mem
;
635 if (atomic_read(&ar_pci
->keep_awake_count
) == 0) {
637 iowrite32(PCIE_SOC_WAKE_V_MASK
,
638 pci_addr
+ PCIE_LOCAL_BASE_ADDRESS
+
639 PCIE_SOC_WAKE_ADDRESS
);
641 atomic_inc(&ar_pci
->keep_awake_count
);
643 if (ar_pci
->verified_awake
)
647 if (ath10k_pci_target_is_awake(ar
)) {
648 ar_pci
->verified_awake
= true;
652 if (tot_delay
> PCIE_WAKE_TIMEOUT
) {
653 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
655 atomic_read(&ar_pci
->keep_awake_count
));
660 tot_delay
+= curr_delay
;
667 void ath10k_do_pci_sleep(struct ath10k
*ar
)
669 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
670 void __iomem
*pci_addr
= ar_pci
->mem
;
672 if (atomic_dec_and_test(&ar_pci
->keep_awake_count
)) {
674 ar_pci
->verified_awake
= false;
675 iowrite32(PCIE_SOC_WAKE_RESET
,
676 pci_addr
+ PCIE_LOCAL_BASE_ADDRESS
+
677 PCIE_SOC_WAKE_ADDRESS
);
682 * FIXME: Handle OOM properly.
685 struct ath10k_pci_compl
*get_free_compl(struct ath10k_pci_pipe
*pipe_info
)
687 struct ath10k_pci_compl
*compl = NULL
;
689 spin_lock_bh(&pipe_info
->pipe_lock
);
690 if (list_empty(&pipe_info
->compl_free
)) {
691 ath10k_warn("Completion buffers are full\n");
694 compl = list_first_entry(&pipe_info
->compl_free
,
695 struct ath10k_pci_compl
, list
);
696 list_del(&compl->list
);
698 spin_unlock_bh(&pipe_info
->pipe_lock
);
702 /* Called by lower (CE) layer when a send to Target completes. */
703 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe
*ce_state
)
705 struct ath10k
*ar
= ce_state
->ar
;
706 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
707 struct ath10k_pci_pipe
*pipe_info
= &ar_pci
->pipe_info
[ce_state
->id
];
708 struct ath10k_pci_compl
*compl;
709 void *transfer_context
;
712 unsigned int transfer_id
;
714 while (ath10k_ce_completed_send_next(ce_state
, &transfer_context
,
716 &transfer_id
) == 0) {
717 compl = get_free_compl(pipe_info
);
721 compl->state
= ATH10K_PCI_COMPL_SEND
;
722 compl->ce_state
= ce_state
;
723 compl->pipe_info
= pipe_info
;
724 compl->skb
= transfer_context
;
725 compl->nbytes
= nbytes
;
726 compl->transfer_id
= transfer_id
;
730 * Add the completion to the processing queue.
732 spin_lock_bh(&ar_pci
->compl_lock
);
733 list_add_tail(&compl->list
, &ar_pci
->compl_process
);
734 spin_unlock_bh(&ar_pci
->compl_lock
);
737 ath10k_pci_process_ce(ar
);
740 /* Called by lower (CE) layer when data is received from the Target. */
741 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe
*ce_state
)
743 struct ath10k
*ar
= ce_state
->ar
;
744 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
745 struct ath10k_pci_pipe
*pipe_info
= &ar_pci
->pipe_info
[ce_state
->id
];
746 struct ath10k_pci_compl
*compl;
748 void *transfer_context
;
751 unsigned int transfer_id
;
754 while (ath10k_ce_completed_recv_next(ce_state
, &transfer_context
,
755 &ce_data
, &nbytes
, &transfer_id
,
757 compl = get_free_compl(pipe_info
);
761 compl->state
= ATH10K_PCI_COMPL_RECV
;
762 compl->ce_state
= ce_state
;
763 compl->pipe_info
= pipe_info
;
764 compl->skb
= transfer_context
;
765 compl->nbytes
= nbytes
;
766 compl->transfer_id
= transfer_id
;
767 compl->flags
= flags
;
769 skb
= transfer_context
;
770 dma_unmap_single(ar
->dev
, ATH10K_SKB_CB(skb
)->paddr
,
771 skb
->len
+ skb_tailroom(skb
),
774 * Add the completion to the processing queue.
776 spin_lock_bh(&ar_pci
->compl_lock
);
777 list_add_tail(&compl->list
, &ar_pci
->compl_process
);
778 spin_unlock_bh(&ar_pci
->compl_lock
);
781 ath10k_pci_process_ce(ar
);
784 /* Send the first nbytes bytes of the buffer */
785 static int ath10k_pci_hif_send_head(struct ath10k
*ar
, u8 pipe_id
,
786 unsigned int transfer_id
,
787 unsigned int bytes
, struct sk_buff
*nbuf
)
789 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(nbuf
);
790 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
791 struct ath10k_pci_pipe
*pipe_info
= &(ar_pci
->pipe_info
[pipe_id
]);
792 struct ath10k_ce_pipe
*ce_hdl
= pipe_info
->ce_hdl
;
797 len
= min(bytes
, nbuf
->len
);
801 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len
);
803 ath10k_dbg(ATH10K_DBG_PCI
,
804 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
805 nbuf
->data
, (unsigned long long) skb_cb
->paddr
,
807 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP
, NULL
,
809 nbuf
->data
, nbuf
->len
);
811 ret
= ath10k_ce_send(ce_hdl
, nbuf
, skb_cb
->paddr
, len
, transfer_id
,
814 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf
);
819 static u16
ath10k_pci_hif_get_free_queue_number(struct ath10k
*ar
, u8 pipe
)
821 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
822 return ath10k_ce_num_free_src_entries(ar_pci
->pipe_info
[pipe
].ce_hdl
);
825 static void ath10k_pci_hif_dump_area(struct ath10k
*ar
)
827 u32 reg_dump_area
= 0;
828 u32 reg_dump_values
[REG_DUMP_COUNT_QCA988X
] = {};
833 ath10k_err("firmware crashed!\n");
834 ath10k_err("hardware name %s version 0x%x\n",
835 ar
->hw_params
.name
, ar
->target_version
);
836 ath10k_err("firmware version: %u.%u.%u.%u\n", ar
->fw_version_major
,
837 ar
->fw_version_minor
, ar
->fw_version_release
,
838 ar
->fw_version_build
);
840 host_addr
= host_interest_item_address(HI_ITEM(hi_failure_state
));
841 ret
= ath10k_pci_diag_read_mem(ar
, host_addr
,
842 ®_dump_area
, sizeof(u32
));
844 ath10k_err("failed to read FW dump area address: %d\n", ret
);
848 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area
);
850 ret
= ath10k_pci_diag_read_mem(ar
, reg_dump_area
,
852 REG_DUMP_COUNT_QCA988X
* sizeof(u32
));
854 ath10k_err("failed to read FW dump area: %d\n", ret
);
858 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X
% 4);
860 ath10k_err("target Register Dump\n");
861 for (i
= 0; i
< REG_DUMP_COUNT_QCA988X
; i
+= 4)
862 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
865 reg_dump_values
[i
+ 1],
866 reg_dump_values
[i
+ 2],
867 reg_dump_values
[i
+ 3]);
869 queue_work(ar
->workqueue
, &ar
->restart_work
);
872 static void ath10k_pci_hif_send_complete_check(struct ath10k
*ar
, u8 pipe
,
878 * Decide whether to actually poll for completions, or just
879 * wait for a later chance.
880 * If there seem to be plenty of resources left, then just wait
881 * since checking involves reading a CE register, which is a
882 * relatively expensive operation.
884 resources
= ath10k_pci_hif_get_free_queue_number(ar
, pipe
);
887 * If at least 50% of the total resources are still available,
888 * don't bother checking again yet.
890 if (resources
> (host_ce_config_wlan
[pipe
].src_nentries
>> 1))
893 ath10k_ce_per_engine_service(ar
, pipe
);
896 static void ath10k_pci_hif_set_callbacks(struct ath10k
*ar
,
897 struct ath10k_hif_cb
*callbacks
)
899 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
901 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
903 memcpy(&ar_pci
->msg_callbacks_current
, callbacks
,
904 sizeof(ar_pci
->msg_callbacks_current
));
907 static int ath10k_pci_alloc_compl(struct ath10k
*ar
)
909 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
910 const struct ce_attr
*attr
;
911 struct ath10k_pci_pipe
*pipe_info
;
912 struct ath10k_pci_compl
*compl;
913 int i
, pipe_num
, completions
;
915 spin_lock_init(&ar_pci
->compl_lock
);
916 INIT_LIST_HEAD(&ar_pci
->compl_process
);
918 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
919 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
921 spin_lock_init(&pipe_info
->pipe_lock
);
922 INIT_LIST_HEAD(&pipe_info
->compl_free
);
924 /* Handle Diagnostic CE specially */
925 if (pipe_info
->ce_hdl
== ar_pci
->ce_diag
)
928 attr
= &host_ce_config_wlan
[pipe_num
];
931 if (attr
->src_nentries
)
932 completions
+= attr
->src_nentries
;
934 if (attr
->dest_nentries
)
935 completions
+= attr
->dest_nentries
;
937 for (i
= 0; i
< completions
; i
++) {
938 compl = kmalloc(sizeof(*compl), GFP_KERNEL
);
940 ath10k_warn("No memory for completion state\n");
941 ath10k_pci_cleanup_ce(ar
);
945 compl->state
= ATH10K_PCI_COMPL_FREE
;
946 list_add_tail(&compl->list
, &pipe_info
->compl_free
);
953 static int ath10k_pci_setup_ce_irq(struct ath10k
*ar
)
955 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
956 const struct ce_attr
*attr
;
957 struct ath10k_pci_pipe
*pipe_info
;
958 int pipe_num
, disable_interrupts
;
960 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
961 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
963 /* Handle Diagnostic CE specially */
964 if (pipe_info
->ce_hdl
== ar_pci
->ce_diag
)
967 attr
= &host_ce_config_wlan
[pipe_num
];
969 if (attr
->src_nentries
) {
970 disable_interrupts
= attr
->flags
& CE_ATTR_DIS_INTR
;
971 ath10k_ce_send_cb_register(pipe_info
->ce_hdl
,
972 ath10k_pci_ce_send_done
,
976 if (attr
->dest_nentries
)
977 ath10k_ce_recv_cb_register(pipe_info
->ce_hdl
,
978 ath10k_pci_ce_recv_data
);
984 static void ath10k_pci_kill_tasklet(struct ath10k
*ar
)
986 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
989 tasklet_kill(&ar_pci
->intr_tq
);
990 tasklet_kill(&ar_pci
->msi_fw_err
);
991 tasklet_kill(&ar_pci
->early_irq_tasklet
);
993 for (i
= 0; i
< CE_COUNT
; i
++)
994 tasklet_kill(&ar_pci
->pipe_info
[i
].intr
);
997 static void ath10k_pci_stop_ce(struct ath10k
*ar
)
999 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1000 struct ath10k_pci_compl
*compl;
1001 struct sk_buff
*skb
;
1003 /* Mark pending completions as aborted, so that upper layers free up
1004 * their associated resources */
1005 spin_lock_bh(&ar_pci
->compl_lock
);
1006 list_for_each_entry(compl, &ar_pci
->compl_process
, list
) {
1008 ATH10K_SKB_CB(skb
)->is_aborted
= true;
1010 spin_unlock_bh(&ar_pci
->compl_lock
);
1013 static void ath10k_pci_cleanup_ce(struct ath10k
*ar
)
1015 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1016 struct ath10k_pci_compl
*compl, *tmp
;
1017 struct ath10k_pci_pipe
*pipe_info
;
1018 struct sk_buff
*netbuf
;
1021 /* Free pending completions. */
1022 spin_lock_bh(&ar_pci
->compl_lock
);
1023 if (!list_empty(&ar_pci
->compl_process
))
1024 ath10k_warn("pending completions still present! possible memory leaks.\n");
1026 list_for_each_entry_safe(compl, tmp
, &ar_pci
->compl_process
, list
) {
1027 list_del(&compl->list
);
1028 netbuf
= compl->skb
;
1029 dev_kfree_skb_any(netbuf
);
1032 spin_unlock_bh(&ar_pci
->compl_lock
);
1034 /* Free unused completions for each pipe. */
1035 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1036 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1038 spin_lock_bh(&pipe_info
->pipe_lock
);
1039 list_for_each_entry_safe(compl, tmp
,
1040 &pipe_info
->compl_free
, list
) {
1041 list_del(&compl->list
);
1044 spin_unlock_bh(&pipe_info
->pipe_lock
);
1048 static void ath10k_pci_process_ce(struct ath10k
*ar
)
1050 struct ath10k_pci
*ar_pci
= ar
->hif
.priv
;
1051 struct ath10k_hif_cb
*cb
= &ar_pci
->msg_callbacks_current
;
1052 struct ath10k_pci_compl
*compl;
1053 struct sk_buff
*skb
;
1054 unsigned int nbytes
;
1055 int ret
, send_done
= 0;
1057 /* Upper layers aren't ready to handle tx/rx completions in parallel so
1058 * we must serialize all completion processing. */
1060 spin_lock_bh(&ar_pci
->compl_lock
);
1061 if (ar_pci
->compl_processing
) {
1062 spin_unlock_bh(&ar_pci
->compl_lock
);
1065 ar_pci
->compl_processing
= true;
1066 spin_unlock_bh(&ar_pci
->compl_lock
);
1069 spin_lock_bh(&ar_pci
->compl_lock
);
1070 if (list_empty(&ar_pci
->compl_process
)) {
1071 spin_unlock_bh(&ar_pci
->compl_lock
);
1074 compl = list_first_entry(&ar_pci
->compl_process
,
1075 struct ath10k_pci_compl
, list
);
1076 list_del(&compl->list
);
1077 spin_unlock_bh(&ar_pci
->compl_lock
);
1079 switch (compl->state
) {
1080 case ATH10K_PCI_COMPL_SEND
:
1081 cb
->tx_completion(ar
,
1083 compl->transfer_id
);
1086 case ATH10K_PCI_COMPL_RECV
:
1087 ret
= ath10k_pci_post_rx_pipe(compl->pipe_info
, 1);
1089 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1090 compl->pipe_info
->pipe_num
, ret
);
1095 nbytes
= compl->nbytes
;
1097 ath10k_dbg(ATH10K_DBG_PCI
,
1098 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1100 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP
, NULL
,
1101 "ath10k rx: ", skb
->data
, nbytes
);
1103 if (skb
->len
+ skb_tailroom(skb
) >= nbytes
) {
1105 skb_put(skb
, nbytes
);
1106 cb
->rx_completion(ar
, skb
,
1107 compl->pipe_info
->pipe_num
);
1109 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1111 skb
->len
+ skb_tailroom(skb
));
1114 case ATH10K_PCI_COMPL_FREE
:
1115 ath10k_warn("free completion cannot be processed\n");
1118 ath10k_warn("invalid completion state (%d)\n",
1123 compl->state
= ATH10K_PCI_COMPL_FREE
;
1126 * Add completion back to the pipe's free list.
1128 spin_lock_bh(&compl->pipe_info
->pipe_lock
);
1129 list_add_tail(&compl->list
, &compl->pipe_info
->compl_free
);
1130 spin_unlock_bh(&compl->pipe_info
->pipe_lock
);
1133 spin_lock_bh(&ar_pci
->compl_lock
);
1134 ar_pci
->compl_processing
= false;
1135 spin_unlock_bh(&ar_pci
->compl_lock
);
1138 /* TODO - temporary mapping while we have too few CE's */
1139 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k
*ar
,
1140 u16 service_id
, u8
*ul_pipe
,
1141 u8
*dl_pipe
, int *ul_is_polled
,
1146 /* polling for received messages not supported */
1149 switch (service_id
) {
1150 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG
:
1152 * Host->target HTT gets its own pipe, so it can be polled
1153 * while other pipes are interrupt driven.
1157 * Use the same target->host pipe for HTC ctrl, HTC raw
1163 case ATH10K_HTC_SVC_ID_RSVD_CTRL
:
1164 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
:
1166 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1167 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1168 * WMI services. So, if another CE is needed, change
1169 * this to *ul_pipe = 3, which frees up CE 0.
1176 case ATH10K_HTC_SVC_ID_WMI_DATA_BK
:
1177 case ATH10K_HTC_SVC_ID_WMI_DATA_BE
:
1178 case ATH10K_HTC_SVC_ID_WMI_DATA_VI
:
1179 case ATH10K_HTC_SVC_ID_WMI_DATA_VO
:
1181 case ATH10K_HTC_SVC_ID_WMI_CONTROL
:
1187 /* pipe 6 reserved */
1188 /* pipe 7 reserved */
1195 (host_ce_config_wlan
[*ul_pipe
].flags
& CE_ATTR_DIS_INTR
) != 0;
1200 static void ath10k_pci_hif_get_default_pipe(struct ath10k
*ar
,
1201 u8
*ul_pipe
, u8
*dl_pipe
)
1203 int ul_is_polled
, dl_is_polled
;
1205 (void)ath10k_pci_hif_map_service_to_pipe(ar
,
1206 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1213 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe
*pipe_info
,
1216 struct ath10k
*ar
= pipe_info
->hif_ce_state
;
1217 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1218 struct ath10k_ce_pipe
*ce_state
= pipe_info
->ce_hdl
;
1219 struct sk_buff
*skb
;
1223 if (pipe_info
->buf_sz
== 0)
1226 for (i
= 0; i
< num
; i
++) {
1227 skb
= dev_alloc_skb(pipe_info
->buf_sz
);
1229 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1235 WARN_ONCE((unsigned long)skb
->data
& 3, "unaligned skb");
1237 ce_data
= dma_map_single(ar
->dev
, skb
->data
,
1238 skb
->len
+ skb_tailroom(skb
),
1241 if (unlikely(dma_mapping_error(ar
->dev
, ce_data
))) {
1242 ath10k_warn("failed to DMA map sk_buff\n");
1243 dev_kfree_skb_any(skb
);
1248 ATH10K_SKB_CB(skb
)->paddr
= ce_data
;
1250 pci_dma_sync_single_for_device(ar_pci
->pdev
, ce_data
,
1252 PCI_DMA_FROMDEVICE
);
1254 ret
= ath10k_ce_recv_buf_enqueue(ce_state
, (void *)skb
,
1257 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1266 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1270 static int ath10k_pci_post_rx(struct ath10k
*ar
)
1272 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1273 struct ath10k_pci_pipe
*pipe_info
;
1274 const struct ce_attr
*attr
;
1275 int pipe_num
, ret
= 0;
1277 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1278 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1279 attr
= &host_ce_config_wlan
[pipe_num
];
1281 if (attr
->dest_nentries
== 0)
1284 ret
= ath10k_pci_post_rx_pipe(pipe_info
,
1285 attr
->dest_nentries
- 1);
1287 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1290 for (; pipe_num
>= 0; pipe_num
--) {
1291 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1292 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1301 static int ath10k_pci_hif_start(struct ath10k
*ar
)
1303 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1306 ath10k_pci_free_early_irq(ar
);
1307 ath10k_pci_kill_tasklet(ar
);
1309 ret
= ath10k_pci_alloc_compl(ar
);
1311 ath10k_warn("failed to allocate CE completions: %d\n", ret
);
1315 ret
= ath10k_pci_request_irq(ar
);
1317 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1319 goto err_free_compl
;
1322 ret
= ath10k_pci_setup_ce_irq(ar
);
1324 ath10k_warn("failed to setup CE interrupts: %d\n", ret
);
1328 /* Post buffers once to start things off. */
1329 ret
= ath10k_pci_post_rx(ar
);
1331 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1336 ar_pci
->started
= 1;
1340 ath10k_ce_disable_interrupts(ar
);
1341 ath10k_pci_free_irq(ar
);
1342 ath10k_pci_kill_tasklet(ar
);
1343 ath10k_pci_stop_ce(ar
);
1344 ath10k_pci_process_ce(ar
);
1346 ath10k_pci_cleanup_ce(ar
);
1348 /* Though there should be no interrupts (device was reset)
1349 * power_down() expects the early IRQ to be installed as per the
1350 * driver lifecycle. */
1351 ret_early
= ath10k_pci_request_early_irq(ar
);
1353 ath10k_warn("failed to re-enable early irq: %d\n", ret_early
);
1358 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe
*pipe_info
)
1361 struct ath10k_pci
*ar_pci
;
1362 struct ath10k_ce_pipe
*ce_hdl
;
1364 struct sk_buff
*netbuf
;
1367 buf_sz
= pipe_info
->buf_sz
;
1369 /* Unused Copy Engine */
1373 ar
= pipe_info
->hif_ce_state
;
1374 ar_pci
= ath10k_pci_priv(ar
);
1376 if (!ar_pci
->started
)
1379 ce_hdl
= pipe_info
->ce_hdl
;
1381 while (ath10k_ce_revoke_recv_next(ce_hdl
, (void **)&netbuf
,
1383 dma_unmap_single(ar
->dev
, ATH10K_SKB_CB(netbuf
)->paddr
,
1384 netbuf
->len
+ skb_tailroom(netbuf
),
1386 dev_kfree_skb_any(netbuf
);
1390 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe
*pipe_info
)
1393 struct ath10k_pci
*ar_pci
;
1394 struct ath10k_ce_pipe
*ce_hdl
;
1395 struct sk_buff
*netbuf
;
1397 unsigned int nbytes
;
1401 buf_sz
= pipe_info
->buf_sz
;
1403 /* Unused Copy Engine */
1407 ar
= pipe_info
->hif_ce_state
;
1408 ar_pci
= ath10k_pci_priv(ar
);
1410 if (!ar_pci
->started
)
1413 ce_hdl
= pipe_info
->ce_hdl
;
1415 while (ath10k_ce_cancel_send_next(ce_hdl
, (void **)&netbuf
,
1416 &ce_data
, &nbytes
, &id
) == 0) {
1418 * Indicate the completion to higer layer to free
1423 ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1428 ATH10K_SKB_CB(netbuf
)->is_aborted
= true;
1429 ar_pci
->msg_callbacks_current
.tx_completion(ar
,
1436 * Cleanup residual buffers for device shutdown:
1437 * buffers that were enqueued for receive
1438 * buffers that were to be sent
1439 * Note: Buffers that had completed but which were
1440 * not yet processed are on a completion queue. They
1441 * are handled when the completion thread shuts down.
1443 static void ath10k_pci_buffer_cleanup(struct ath10k
*ar
)
1445 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1448 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1449 struct ath10k_pci_pipe
*pipe_info
;
1451 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1452 ath10k_pci_rx_pipe_cleanup(pipe_info
);
1453 ath10k_pci_tx_pipe_cleanup(pipe_info
);
1457 static void ath10k_pci_ce_deinit(struct ath10k
*ar
)
1459 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1460 struct ath10k_pci_pipe
*pipe_info
;
1463 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1464 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1465 if (pipe_info
->ce_hdl
) {
1466 ath10k_ce_deinit(pipe_info
->ce_hdl
);
1467 pipe_info
->ce_hdl
= NULL
;
1468 pipe_info
->buf_sz
= 0;
1473 static void ath10k_pci_hif_stop(struct ath10k
*ar
)
1475 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1478 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
1480 ret
= ath10k_ce_disable_interrupts(ar
);
1482 ath10k_warn("failed to disable CE interrupts: %d\n", ret
);
1484 ath10k_pci_free_irq(ar
);
1485 ath10k_pci_kill_tasklet(ar
);
1486 ath10k_pci_stop_ce(ar
);
1488 ret
= ath10k_pci_request_early_irq(ar
);
1490 ath10k_warn("failed to re-enable early irq: %d\n", ret
);
1492 /* At this point, asynchronous threads are stopped, the target should
1493 * not DMA nor interrupt. We process the leftovers and then free
1494 * everything else up. */
1496 ath10k_pci_process_ce(ar
);
1497 ath10k_pci_cleanup_ce(ar
);
1498 ath10k_pci_buffer_cleanup(ar
);
1500 /* Make the sure the device won't access any structures on the host by
1501 * resetting it. The device was fed with PCI CE ringbuffer
1502 * configuration during init. If ringbuffers are freed and the device
1503 * were to access them this could lead to memory corruption on the
1505 ath10k_pci_device_reset(ar
);
1507 ar_pci
->started
= 0;
1510 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k
*ar
,
1511 void *req
, u32 req_len
,
1512 void *resp
, u32
*resp_len
)
1514 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1515 struct ath10k_pci_pipe
*pci_tx
= &ar_pci
->pipe_info
[BMI_CE_NUM_TO_TARG
];
1516 struct ath10k_pci_pipe
*pci_rx
= &ar_pci
->pipe_info
[BMI_CE_NUM_TO_HOST
];
1517 struct ath10k_ce_pipe
*ce_tx
= pci_tx
->ce_hdl
;
1518 struct ath10k_ce_pipe
*ce_rx
= pci_rx
->ce_hdl
;
1519 dma_addr_t req_paddr
= 0;
1520 dma_addr_t resp_paddr
= 0;
1521 struct bmi_xfer xfer
= {};
1522 void *treq
, *tresp
= NULL
;
1527 if (resp
&& !resp_len
)
1530 if (resp
&& resp_len
&& *resp_len
== 0)
1533 treq
= kmemdup(req
, req_len
, GFP_KERNEL
);
1537 req_paddr
= dma_map_single(ar
->dev
, treq
, req_len
, DMA_TO_DEVICE
);
1538 ret
= dma_mapping_error(ar
->dev
, req_paddr
);
1542 if (resp
&& resp_len
) {
1543 tresp
= kzalloc(*resp_len
, GFP_KERNEL
);
1549 resp_paddr
= dma_map_single(ar
->dev
, tresp
, *resp_len
,
1551 ret
= dma_mapping_error(ar
->dev
, resp_paddr
);
1555 xfer
.wait_for_resp
= true;
1558 ath10k_ce_recv_buf_enqueue(ce_rx
, &xfer
, resp_paddr
);
1561 init_completion(&xfer
.done
);
1563 ret
= ath10k_ce_send(ce_tx
, &xfer
, req_paddr
, req_len
, -1, 0);
1567 ret
= ath10k_pci_bmi_wait(ce_tx
, ce_rx
, &xfer
);
1570 unsigned int unused_nbytes
;
1571 unsigned int unused_id
;
1573 ath10k_ce_cancel_send_next(ce_tx
, NULL
, &unused_buffer
,
1574 &unused_nbytes
, &unused_id
);
1576 /* non-zero means we did not time out */
1584 ath10k_ce_revoke_recv_next(ce_rx
, NULL
, &unused_buffer
);
1585 dma_unmap_single(ar
->dev
, resp_paddr
,
1586 *resp_len
, DMA_FROM_DEVICE
);
1589 dma_unmap_single(ar
->dev
, req_paddr
, req_len
, DMA_TO_DEVICE
);
1591 if (ret
== 0 && resp_len
) {
1592 *resp_len
= min(*resp_len
, xfer
.resp_len
);
1593 memcpy(resp
, tresp
, xfer
.resp_len
);
1602 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe
*ce_state
)
1604 struct bmi_xfer
*xfer
;
1606 unsigned int nbytes
;
1607 unsigned int transfer_id
;
1609 if (ath10k_ce_completed_send_next(ce_state
, (void **)&xfer
, &ce_data
,
1610 &nbytes
, &transfer_id
))
1613 if (xfer
->wait_for_resp
)
1616 complete(&xfer
->done
);
1619 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe
*ce_state
)
1621 struct bmi_xfer
*xfer
;
1623 unsigned int nbytes
;
1624 unsigned int transfer_id
;
1627 if (ath10k_ce_completed_recv_next(ce_state
, (void **)&xfer
, &ce_data
,
1628 &nbytes
, &transfer_id
, &flags
))
1631 if (!xfer
->wait_for_resp
) {
1632 ath10k_warn("unexpected: BMI data received; ignoring\n");
1636 xfer
->resp_len
= nbytes
;
1637 complete(&xfer
->done
);
1640 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe
*tx_pipe
,
1641 struct ath10k_ce_pipe
*rx_pipe
,
1642 struct bmi_xfer
*xfer
)
1644 unsigned long timeout
= jiffies
+ BMI_COMMUNICATION_TIMEOUT_HZ
;
1646 while (time_before_eq(jiffies
, timeout
)) {
1647 ath10k_pci_bmi_send_done(tx_pipe
);
1648 ath10k_pci_bmi_recv_data(rx_pipe
);
1650 if (completion_done(&xfer
->done
))
1660 * Map from service/endpoint to Copy Engine.
1661 * This table is derived from the CE_PCI TABLE, above.
1662 * It is passed to the Target at startup for use by firmware.
1664 static const struct service_to_pipe target_service_to_ce_map_wlan
[] = {
1666 ATH10K_HTC_SVC_ID_WMI_DATA_VO
,
1667 PIPEDIR_OUT
, /* out = UL = host -> target */
1671 ATH10K_HTC_SVC_ID_WMI_DATA_VO
,
1672 PIPEDIR_IN
, /* in = DL = target -> host */
1676 ATH10K_HTC_SVC_ID_WMI_DATA_BK
,
1677 PIPEDIR_OUT
, /* out = UL = host -> target */
1681 ATH10K_HTC_SVC_ID_WMI_DATA_BK
,
1682 PIPEDIR_IN
, /* in = DL = target -> host */
1686 ATH10K_HTC_SVC_ID_WMI_DATA_BE
,
1687 PIPEDIR_OUT
, /* out = UL = host -> target */
1691 ATH10K_HTC_SVC_ID_WMI_DATA_BE
,
1692 PIPEDIR_IN
, /* in = DL = target -> host */
1696 ATH10K_HTC_SVC_ID_WMI_DATA_VI
,
1697 PIPEDIR_OUT
, /* out = UL = host -> target */
1701 ATH10K_HTC_SVC_ID_WMI_DATA_VI
,
1702 PIPEDIR_IN
, /* in = DL = target -> host */
1706 ATH10K_HTC_SVC_ID_WMI_CONTROL
,
1707 PIPEDIR_OUT
, /* out = UL = host -> target */
1711 ATH10K_HTC_SVC_ID_WMI_CONTROL
,
1712 PIPEDIR_IN
, /* in = DL = target -> host */
1716 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1717 PIPEDIR_OUT
, /* out = UL = host -> target */
1718 0, /* could be moved to 3 (share with WMI) */
1721 ATH10K_HTC_SVC_ID_RSVD_CTRL
,
1722 PIPEDIR_IN
, /* in = DL = target -> host */
1726 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
, /* not currently used */
1727 PIPEDIR_OUT
, /* out = UL = host -> target */
1731 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS
, /* not currently used */
1732 PIPEDIR_IN
, /* in = DL = target -> host */
1736 ATH10K_HTC_SVC_ID_HTT_DATA_MSG
,
1737 PIPEDIR_OUT
, /* out = UL = host -> target */
1741 ATH10K_HTC_SVC_ID_HTT_DATA_MSG
,
1742 PIPEDIR_IN
, /* in = DL = target -> host */
1746 /* (Additions here) */
1748 { /* Must be last */
1756 * Send an interrupt to the device to wake up the Target CPU
1757 * so it has an opportunity to notice any changed state.
1759 static int ath10k_pci_wake_target_cpu(struct ath10k
*ar
)
1764 ret
= ath10k_pci_diag_read_access(ar
, SOC_CORE_BASE_ADDRESS
|
1768 ath10k_warn("failed to read core_ctrl: %d\n", ret
);
1772 /* A_INUM_FIRMWARE interrupt to Target CPU */
1773 core_ctrl
|= CORE_CTRL_CPU_INTR_MASK
;
1775 ret
= ath10k_pci_diag_write_access(ar
, SOC_CORE_BASE_ADDRESS
|
1779 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1787 static int ath10k_pci_init_config(struct ath10k
*ar
)
1789 u32 interconnect_targ_addr
;
1790 u32 pcie_state_targ_addr
= 0;
1791 u32 pipe_cfg_targ_addr
= 0;
1792 u32 svc_to_pipe_map
= 0;
1793 u32 pcie_config_flags
= 0;
1795 u32 ealloc_targ_addr
;
1797 u32 flag2_targ_addr
;
1800 /* Download to Target the CE Config and the service-to-CE map */
1801 interconnect_targ_addr
=
1802 host_interest_item_address(HI_ITEM(hi_interconnect_state
));
1804 /* Supply Target-side CE configuration */
1805 ret
= ath10k_pci_diag_read_access(ar
, interconnect_targ_addr
,
1806 &pcie_state_targ_addr
);
1808 ath10k_err("Failed to get pcie state addr: %d\n", ret
);
1812 if (pcie_state_targ_addr
== 0) {
1814 ath10k_err("Invalid pcie state addr\n");
1818 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1819 offsetof(struct pcie_state
,
1821 &pipe_cfg_targ_addr
);
1823 ath10k_err("Failed to get pipe cfg addr: %d\n", ret
);
1827 if (pipe_cfg_targ_addr
== 0) {
1829 ath10k_err("Invalid pipe cfg addr\n");
1833 ret
= ath10k_pci_diag_write_mem(ar
, pipe_cfg_targ_addr
,
1834 target_ce_config_wlan
,
1835 sizeof(target_ce_config_wlan
));
1838 ath10k_err("Failed to write pipe cfg: %d\n", ret
);
1842 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1843 offsetof(struct pcie_state
,
1847 ath10k_err("Failed to get svc/pipe map: %d\n", ret
);
1851 if (svc_to_pipe_map
== 0) {
1853 ath10k_err("Invalid svc_to_pipe map\n");
1857 ret
= ath10k_pci_diag_write_mem(ar
, svc_to_pipe_map
,
1858 target_service_to_ce_map_wlan
,
1859 sizeof(target_service_to_ce_map_wlan
));
1861 ath10k_err("Failed to write svc/pipe map: %d\n", ret
);
1865 ret
= ath10k_pci_diag_read_access(ar
, pcie_state_targ_addr
+
1866 offsetof(struct pcie_state
,
1868 &pcie_config_flags
);
1870 ath10k_err("Failed to get pcie config_flags: %d\n", ret
);
1874 pcie_config_flags
&= ~PCIE_CONFIG_FLAG_ENABLE_L1
;
1876 ret
= ath10k_pci_diag_write_mem(ar
, pcie_state_targ_addr
+
1877 offsetof(struct pcie_state
, config_flags
),
1879 sizeof(pcie_config_flags
));
1881 ath10k_err("Failed to write pcie config_flags: %d\n", ret
);
1885 /* configure early allocation */
1886 ealloc_targ_addr
= host_interest_item_address(HI_ITEM(hi_early_alloc
));
1888 ret
= ath10k_pci_diag_read_access(ar
, ealloc_targ_addr
, &ealloc_value
);
1890 ath10k_err("Faile to get early alloc val: %d\n", ret
);
1894 /* first bank is switched to IRAM */
1895 ealloc_value
|= ((HI_EARLY_ALLOC_MAGIC
<< HI_EARLY_ALLOC_MAGIC_SHIFT
) &
1896 HI_EARLY_ALLOC_MAGIC_MASK
);
1897 ealloc_value
|= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT
) &
1898 HI_EARLY_ALLOC_IRAM_BANKS_MASK
);
1900 ret
= ath10k_pci_diag_write_access(ar
, ealloc_targ_addr
, ealloc_value
);
1902 ath10k_err("Failed to set early alloc val: %d\n", ret
);
1906 /* Tell Target to proceed with initialization */
1907 flag2_targ_addr
= host_interest_item_address(HI_ITEM(hi_option_flag2
));
1909 ret
= ath10k_pci_diag_read_access(ar
, flag2_targ_addr
, &flag2_value
);
1911 ath10k_err("Failed to get option val: %d\n", ret
);
1915 flag2_value
|= HI_OPTION_EARLY_CFG_DONE
;
1917 ret
= ath10k_pci_diag_write_access(ar
, flag2_targ_addr
, flag2_value
);
1919 ath10k_err("Failed to set option val: %d\n", ret
);
1928 static int ath10k_pci_ce_init(struct ath10k
*ar
)
1930 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1931 struct ath10k_pci_pipe
*pipe_info
;
1932 const struct ce_attr
*attr
;
1935 for (pipe_num
= 0; pipe_num
< CE_COUNT
; pipe_num
++) {
1936 pipe_info
= &ar_pci
->pipe_info
[pipe_num
];
1937 pipe_info
->pipe_num
= pipe_num
;
1938 pipe_info
->hif_ce_state
= ar
;
1939 attr
= &host_ce_config_wlan
[pipe_num
];
1941 pipe_info
->ce_hdl
= ath10k_ce_init(ar
, pipe_num
, attr
);
1942 if (pipe_info
->ce_hdl
== NULL
) {
1943 ath10k_err("failed to initialize CE for pipe: %d\n",
1946 /* It is safe to call it here. It checks if ce_hdl is
1947 * valid for each pipe */
1948 ath10k_pci_ce_deinit(ar
);
1952 if (pipe_num
== CE_COUNT
- 1) {
1954 * Reserve the ultimate CE for
1955 * diagnostic Window support
1957 ar_pci
->ce_diag
= pipe_info
->ce_hdl
;
1961 pipe_info
->buf_sz
= (size_t) (attr
->src_sz_max
);
1967 static void ath10k_pci_fw_interrupt_handler(struct ath10k
*ar
)
1969 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1970 u32 fw_indicator_address
, fw_indicator
;
1972 ath10k_pci_wake(ar
);
1974 fw_indicator_address
= ar_pci
->fw_indicator_address
;
1975 fw_indicator
= ath10k_pci_read32(ar
, fw_indicator_address
);
1977 if (fw_indicator
& FW_IND_EVENT_PENDING
) {
1978 /* ACK: clear Target-side pending event */
1979 ath10k_pci_write32(ar
, fw_indicator_address
,
1980 fw_indicator
& ~FW_IND_EVENT_PENDING
);
1982 if (ar_pci
->started
) {
1983 ath10k_pci_hif_dump_area(ar
);
1986 * Probable Target failure before we're prepared
1987 * to handle it. Generally unexpected.
1989 ath10k_warn("early firmware event indicated\n");
1993 ath10k_pci_sleep(ar
);
1996 static int ath10k_pci_hif_power_up(struct ath10k
*ar
)
1998 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
1999 const char *irq_mode
;
2003 * Bring the target up cleanly.
2005 * The target may be in an undefined state with an AUX-powered Target
2006 * and a Host in WoW mode. If the Host crashes, loses power, or is
2007 * restarted (without unloading the driver) then the Target is left
2008 * (aux) powered and running. On a subsequent driver load, the Target
2009 * is in an unexpected state. We try to catch that here in order to
2010 * reset the Target and retry the probe.
2012 ret
= ath10k_pci_device_reset(ar
);
2014 ath10k_err("failed to reset target: %d\n", ret
);
2018 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
2019 /* Force AWAKE forever */
2020 ath10k_do_pci_wake(ar
);
2022 ret
= ath10k_pci_ce_init(ar
);
2024 ath10k_err("failed to initialize CE: %d\n", ret
);
2028 ret
= ath10k_ce_disable_interrupts(ar
);
2030 ath10k_err("failed to disable CE interrupts: %d\n", ret
);
2034 ret
= ath10k_pci_init_irq(ar
);
2036 ath10k_err("failed to init irqs: %d\n", ret
);
2040 ret
= ath10k_pci_request_early_irq(ar
);
2042 ath10k_err("failed to request early irq: %d\n", ret
);
2043 goto err_deinit_irq
;
2046 ret
= ath10k_pci_wait_for_target_init(ar
);
2048 ath10k_err("failed to wait for target to init: %d\n", ret
);
2049 goto err_free_early_irq
;
2052 ret
= ath10k_pci_init_config(ar
);
2054 ath10k_err("failed to setup init config: %d\n", ret
);
2055 goto err_free_early_irq
;
2058 ret
= ath10k_pci_wake_target_cpu(ar
);
2060 ath10k_err("could not wake up target CPU: %d\n", ret
);
2061 goto err_free_early_irq
;
2064 if (ar_pci
->num_msi_intrs
> 1)
2066 else if (ar_pci
->num_msi_intrs
== 1)
2069 irq_mode
= "legacy";
2071 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE
, &ar
->dev_flags
))
2072 ath10k_info("pci irq %s\n", irq_mode
);
2077 ath10k_pci_free_early_irq(ar
);
2079 ath10k_pci_deinit_irq(ar
);
2081 ath10k_pci_ce_deinit(ar
);
2082 ath10k_pci_device_reset(ar
);
2084 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
2085 ath10k_do_pci_sleep(ar
);
2090 static void ath10k_pci_hif_power_down(struct ath10k
*ar
)
2092 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2094 ath10k_pci_free_early_irq(ar
);
2095 ath10k_pci_kill_tasklet(ar
);
2096 ath10k_pci_deinit_irq(ar
);
2097 ath10k_pci_device_reset(ar
);
2099 ath10k_pci_ce_deinit(ar
);
2100 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
2101 ath10k_do_pci_sleep(ar
);
2106 #define ATH10K_PCI_PM_CONTROL 0x44
2108 static int ath10k_pci_hif_suspend(struct ath10k
*ar
)
2110 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2111 struct pci_dev
*pdev
= ar_pci
->pdev
;
2114 pci_read_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
, &val
);
2116 if ((val
& 0x000000ff) != 0x3) {
2117 pci_save_state(pdev
);
2118 pci_disable_device(pdev
);
2119 pci_write_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
,
2120 (val
& 0xffffff00) | 0x03);
2126 static int ath10k_pci_hif_resume(struct ath10k
*ar
)
2128 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2129 struct pci_dev
*pdev
= ar_pci
->pdev
;
2132 pci_read_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
, &val
);
2134 if ((val
& 0x000000ff) != 0) {
2135 pci_restore_state(pdev
);
2136 pci_write_config_dword(pdev
, ATH10K_PCI_PM_CONTROL
,
2139 * Suspend/Resume resets the PCI configuration space,
2140 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2141 * to keep PCI Tx retries from interfering with C3 CPU state
2143 pci_read_config_dword(pdev
, 0x40, &val
);
2145 if ((val
& 0x0000ff00) != 0)
2146 pci_write_config_dword(pdev
, 0x40, val
& 0xffff00ff);
2153 static const struct ath10k_hif_ops ath10k_pci_hif_ops
= {
2154 .send_head
= ath10k_pci_hif_send_head
,
2155 .exchange_bmi_msg
= ath10k_pci_hif_exchange_bmi_msg
,
2156 .start
= ath10k_pci_hif_start
,
2157 .stop
= ath10k_pci_hif_stop
,
2158 .map_service_to_pipe
= ath10k_pci_hif_map_service_to_pipe
,
2159 .get_default_pipe
= ath10k_pci_hif_get_default_pipe
,
2160 .send_complete_check
= ath10k_pci_hif_send_complete_check
,
2161 .set_callbacks
= ath10k_pci_hif_set_callbacks
,
2162 .get_free_queue_number
= ath10k_pci_hif_get_free_queue_number
,
2163 .power_up
= ath10k_pci_hif_power_up
,
2164 .power_down
= ath10k_pci_hif_power_down
,
2166 .suspend
= ath10k_pci_hif_suspend
,
2167 .resume
= ath10k_pci_hif_resume
,
2171 static void ath10k_pci_ce_tasklet(unsigned long ptr
)
2173 struct ath10k_pci_pipe
*pipe
= (struct ath10k_pci_pipe
*)ptr
;
2174 struct ath10k_pci
*ar_pci
= pipe
->ar_pci
;
2176 ath10k_ce_per_engine_service(ar_pci
->ar
, pipe
->pipe_num
);
2179 static void ath10k_msi_err_tasklet(unsigned long data
)
2181 struct ath10k
*ar
= (struct ath10k
*)data
;
2183 ath10k_pci_fw_interrupt_handler(ar
);
2187 * Handler for a per-engine interrupt on a PARTICULAR CE.
2188 * This is used in cases where each CE has a private MSI interrupt.
2190 static irqreturn_t
ath10k_pci_per_engine_handler(int irq
, void *arg
)
2192 struct ath10k
*ar
= arg
;
2193 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2194 int ce_id
= irq
- ar_pci
->pdev
->irq
- MSI_ASSIGN_CE_INITIAL
;
2196 if (ce_id
< 0 || ce_id
>= ARRAY_SIZE(ar_pci
->pipe_info
)) {
2197 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq
, ce_id
);
2202 * NOTE: We are able to derive ce_id from irq because we
2203 * use a one-to-one mapping for CE's 0..5.
2204 * CE's 6 & 7 do not use interrupts at all.
2206 * This mapping must be kept in sync with the mapping
2209 tasklet_schedule(&ar_pci
->pipe_info
[ce_id
].intr
);
2213 static irqreturn_t
ath10k_pci_msi_fw_handler(int irq
, void *arg
)
2215 struct ath10k
*ar
= arg
;
2216 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2218 tasklet_schedule(&ar_pci
->msi_fw_err
);
2223 * Top-level interrupt handler for all PCI interrupts from a Target.
2224 * When a block of MSI interrupts is allocated, this top-level handler
2225 * is not used; instead, we directly call the correct sub-handler.
2227 static irqreturn_t
ath10k_pci_interrupt_handler(int irq
, void *arg
)
2229 struct ath10k
*ar
= arg
;
2230 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2232 if (ar_pci
->num_msi_intrs
== 0) {
2233 if (!ath10k_pci_irq_pending(ar
))
2236 ath10k_pci_disable_and_clear_legacy_irq(ar
);
2239 tasklet_schedule(&ar_pci
->intr_tq
);
2244 static void ath10k_pci_early_irq_tasklet(unsigned long data
)
2246 struct ath10k
*ar
= (struct ath10k
*)data
;
2247 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2251 ret
= ath10k_pci_wake(ar
);
2253 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2258 fw_ind
= ath10k_pci_read32(ar
, ar_pci
->fw_indicator_address
);
2259 if (fw_ind
& FW_IND_EVENT_PENDING
) {
2260 ath10k_pci_write32(ar
, ar_pci
->fw_indicator_address
,
2261 fw_ind
& ~FW_IND_EVENT_PENDING
);
2263 /* Some structures are unavailable during early boot or at
2264 * driver teardown so just print that the device has crashed. */
2265 ath10k_warn("device crashed - no diagnostics available\n");
2268 ath10k_pci_sleep(ar
);
2269 ath10k_pci_enable_legacy_irq(ar
);
2272 static void ath10k_pci_tasklet(unsigned long data
)
2274 struct ath10k
*ar
= (struct ath10k
*)data
;
2275 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2277 ath10k_pci_fw_interrupt_handler(ar
); /* FIXME: Handle FW error */
2278 ath10k_ce_per_engine_service_any(ar
);
2280 /* Re-enable legacy irq that was disabled in the irq handler */
2281 if (ar_pci
->num_msi_intrs
== 0)
2282 ath10k_pci_enable_legacy_irq(ar
);
2285 static int ath10k_pci_request_irq_msix(struct ath10k
*ar
)
2287 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2290 ret
= request_irq(ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
,
2291 ath10k_pci_msi_fw_handler
,
2292 IRQF_SHARED
, "ath10k_pci", ar
);
2294 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2295 ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
, ret
);
2299 for (i
= MSI_ASSIGN_CE_INITIAL
; i
<= MSI_ASSIGN_CE_MAX
; i
++) {
2300 ret
= request_irq(ar_pci
->pdev
->irq
+ i
,
2301 ath10k_pci_per_engine_handler
,
2302 IRQF_SHARED
, "ath10k_pci", ar
);
2304 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2305 ar_pci
->pdev
->irq
+ i
, ret
);
2307 for (i
--; i
>= MSI_ASSIGN_CE_INITIAL
; i
--)
2308 free_irq(ar_pci
->pdev
->irq
+ i
, ar
);
2310 free_irq(ar_pci
->pdev
->irq
+ MSI_ASSIGN_FW
, ar
);
2318 static int ath10k_pci_request_irq_msi(struct ath10k
*ar
)
2320 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2323 ret
= request_irq(ar_pci
->pdev
->irq
,
2324 ath10k_pci_interrupt_handler
,
2325 IRQF_SHARED
, "ath10k_pci", ar
);
2327 ath10k_warn("failed to request MSI irq %d: %d\n",
2328 ar_pci
->pdev
->irq
, ret
);
2335 static int ath10k_pci_request_irq_legacy(struct ath10k
*ar
)
2337 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2340 ret
= request_irq(ar_pci
->pdev
->irq
,
2341 ath10k_pci_interrupt_handler
,
2342 IRQF_SHARED
, "ath10k_pci", ar
);
2344 ath10k_warn("failed to request legacy irq %d: %d\n",
2345 ar_pci
->pdev
->irq
, ret
);
2352 static int ath10k_pci_request_irq(struct ath10k
*ar
)
2354 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2356 switch (ar_pci
->num_msi_intrs
) {
2358 return ath10k_pci_request_irq_legacy(ar
);
2360 return ath10k_pci_request_irq_msi(ar
);
2361 case MSI_NUM_REQUEST
:
2362 return ath10k_pci_request_irq_msix(ar
);
2365 ath10k_warn("unknown irq configuration upon request\n");
2369 static void ath10k_pci_free_irq(struct ath10k
*ar
)
2371 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2374 /* There's at least one interrupt irregardless whether its legacy INTR
2375 * or MSI or MSI-X */
2376 for (i
= 0; i
< max(1, ar_pci
->num_msi_intrs
); i
++)
2377 free_irq(ar_pci
->pdev
->irq
+ i
, ar
);
2380 static void ath10k_pci_init_irq_tasklets(struct ath10k
*ar
)
2382 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2385 tasklet_init(&ar_pci
->intr_tq
, ath10k_pci_tasklet
, (unsigned long)ar
);
2386 tasklet_init(&ar_pci
->msi_fw_err
, ath10k_msi_err_tasklet
,
2388 tasklet_init(&ar_pci
->early_irq_tasklet
, ath10k_pci_early_irq_tasklet
,
2391 for (i
= 0; i
< CE_COUNT
; i
++) {
2392 ar_pci
->pipe_info
[i
].ar_pci
= ar_pci
;
2393 tasklet_init(&ar_pci
->pipe_info
[i
].intr
, ath10k_pci_ce_tasklet
,
2394 (unsigned long)&ar_pci
->pipe_info
[i
]);
2398 static int ath10k_pci_init_irq(struct ath10k
*ar
)
2400 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2401 bool msix_supported
= test_bit(ATH10K_PCI_FEATURE_MSI_X
,
2405 ath10k_pci_init_irq_tasklets(ar
);
2407 if (ath10k_pci_irq_mode
!= ATH10K_PCI_IRQ_AUTO
&&
2408 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE
, &ar
->dev_flags
))
2409 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode
);
2412 if (ath10k_pci_irq_mode
== ATH10K_PCI_IRQ_AUTO
&& msix_supported
) {
2413 ar_pci
->num_msi_intrs
= MSI_NUM_REQUEST
;
2414 ret
= pci_enable_msi_block(ar_pci
->pdev
, ar_pci
->num_msi_intrs
);
2418 pci_disable_msi(ar_pci
->pdev
);
2424 if (ath10k_pci_irq_mode
!= ATH10K_PCI_IRQ_LEGACY
) {
2425 ar_pci
->num_msi_intrs
= 1;
2426 ret
= pci_enable_msi(ar_pci
->pdev
);
2435 * A potential race occurs here: The CORE_BASE write
2436 * depends on target correctly decoding AXI address but
2437 * host won't know when target writes BAR to CORE_CTRL.
2438 * This write might get lost if target has NOT written BAR.
2439 * For now, fix the race by repeating the write in below
2440 * synchronization checking. */
2441 ar_pci
->num_msi_intrs
= 0;
2443 ret
= ath10k_pci_wake(ar
);
2445 ath10k_warn("failed to wake target: %d\n", ret
);
2449 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_ENABLE_ADDRESS
,
2450 PCIE_INTR_FIRMWARE_MASK
| PCIE_INTR_CE_MASK_ALL
);
2451 ath10k_pci_sleep(ar
);
2456 static int ath10k_pci_deinit_irq_legacy(struct ath10k
*ar
)
2460 ret
= ath10k_pci_wake(ar
);
2462 ath10k_warn("failed to wake target: %d\n", ret
);
2466 ath10k_pci_write32(ar
, SOC_CORE_BASE_ADDRESS
+ PCIE_INTR_ENABLE_ADDRESS
,
2468 ath10k_pci_sleep(ar
);
2473 static int ath10k_pci_deinit_irq(struct ath10k
*ar
)
2475 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2477 switch (ar_pci
->num_msi_intrs
) {
2479 return ath10k_pci_deinit_irq_legacy(ar
);
2482 case MSI_NUM_REQUEST
:
2483 pci_disable_msi(ar_pci
->pdev
);
2487 ath10k_warn("unknown irq configuration upon deinit\n");
2491 static int ath10k_pci_wait_for_target_init(struct ath10k
*ar
)
2493 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
2494 int wait_limit
= 300; /* 3 sec */
2497 ret
= ath10k_pci_wake(ar
);
2499 ath10k_err("failed to wake up target: %d\n", ret
);
2503 while (wait_limit
-- &&
2504 !(ioread32(ar_pci
->mem
+ FW_INDICATOR_ADDRESS
) &
2505 FW_IND_INITIALIZED
)) {
2506 if (ar_pci
->num_msi_intrs
== 0)
2507 /* Fix potential race by repeating CORE_BASE writes */
2508 iowrite32(PCIE_INTR_FIRMWARE_MASK
|
2509 PCIE_INTR_CE_MASK_ALL
,
2510 ar_pci
->mem
+ (SOC_CORE_BASE_ADDRESS
|
2511 PCIE_INTR_ENABLE_ADDRESS
));
2515 if (wait_limit
< 0) {
2516 ath10k_err("target stalled\n");
2522 ath10k_pci_sleep(ar
);
2526 static int ath10k_pci_device_reset(struct ath10k
*ar
)
2531 ret
= ath10k_do_pci_wake(ar
);
2533 ath10k_err("failed to wake up target: %d\n",
2538 /* Put Target, including PCIe, into RESET. */
2539 val
= ath10k_pci_reg_read32(ar
, SOC_GLOBAL_RESET_ADDRESS
);
2541 ath10k_pci_reg_write32(ar
, SOC_GLOBAL_RESET_ADDRESS
, val
);
2543 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2544 if (ath10k_pci_reg_read32(ar
, RTC_STATE_ADDRESS
) &
2545 RTC_STATE_COLD_RESET_MASK
)
2550 /* Pull Target, including PCIe, out of RESET. */
2552 ath10k_pci_reg_write32(ar
, SOC_GLOBAL_RESET_ADDRESS
, val
);
2554 for (i
= 0; i
< ATH_PCI_RESET_WAIT_MAX
; i
++) {
2555 if (!(ath10k_pci_reg_read32(ar
, RTC_STATE_ADDRESS
) &
2556 RTC_STATE_COLD_RESET_MASK
))
2561 ath10k_do_pci_sleep(ar
);
2565 static void ath10k_pci_dump_features(struct ath10k_pci
*ar_pci
)
2569 for (i
= 0; i
< ATH10K_PCI_FEATURE_COUNT
; i
++) {
2570 if (!test_bit(i
, ar_pci
->features
))
2574 case ATH10K_PCI_FEATURE_MSI_X
:
2575 ath10k_dbg(ATH10K_DBG_BOOT
, "device supports MSI-X\n");
2577 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE
:
2578 ath10k_dbg(ATH10K_DBG_BOOT
, "QCA98XX SoC power save enabled\n");
2584 static int ath10k_pci_probe(struct pci_dev
*pdev
,
2585 const struct pci_device_id
*pci_dev
)
2590 struct ath10k_pci
*ar_pci
;
2591 u32 lcr_val
, chip_id
;
2593 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
2595 ar_pci
= kzalloc(sizeof(*ar_pci
), GFP_KERNEL
);
2599 ar_pci
->pdev
= pdev
;
2600 ar_pci
->dev
= &pdev
->dev
;
2602 switch (pci_dev
->device
) {
2603 case QCA988X_2_0_DEVICE_ID
:
2604 set_bit(ATH10K_PCI_FEATURE_MSI_X
, ar_pci
->features
);
2608 ath10k_err("Unknown device ID: %d\n", pci_dev
->device
);
2612 if (ath10k_target_ps
)
2613 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
);
2615 ath10k_pci_dump_features(ar_pci
);
2617 ar
= ath10k_core_create(ar_pci
, ar_pci
->dev
, &ath10k_pci_hif_ops
);
2619 ath10k_err("failed to create driver core\n");
2625 ar_pci
->fw_indicator_address
= FW_INDICATOR_ADDRESS
;
2626 atomic_set(&ar_pci
->keep_awake_count
, 0);
2628 pci_set_drvdata(pdev
, ar
);
2631 * Without any knowledge of the Host, the Target may have been reset or
2632 * power cycled and its Config Space may no longer reflect the PCI
2633 * address space that was assigned earlier by the PCI infrastructure.
2636 ret
= pci_assign_resource(pdev
, BAR_NUM
);
2638 ath10k_err("failed to assign PCI space: %d\n", ret
);
2642 ret
= pci_enable_device(pdev
);
2644 ath10k_err("failed to enable PCI device: %d\n", ret
);
2648 /* Request MMIO resources */
2649 ret
= pci_request_region(pdev
, BAR_NUM
, "ath");
2651 ath10k_err("failed to request MMIO region: %d\n", ret
);
2656 * Target structures have a limit of 32 bit DMA pointers.
2657 * DMA pointers can be wider than 32 bits by default on some systems.
2659 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2661 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret
);
2665 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2667 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2671 /* Set bus master bit in PCI_COMMAND to enable DMA */
2672 pci_set_master(pdev
);
2675 * Temporary FIX: disable ASPM
2676 * Will be removed after the OTP is programmed
2678 pci_read_config_dword(pdev
, 0x80, &lcr_val
);
2679 pci_write_config_dword(pdev
, 0x80, (lcr_val
& 0xffffff00));
2681 /* Arrange for access to Target SoC registers. */
2682 mem
= pci_iomap(pdev
, BAR_NUM
, 0);
2684 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM
);
2691 spin_lock_init(&ar_pci
->ce_lock
);
2693 ret
= ath10k_do_pci_wake(ar
);
2695 ath10k_err("Failed to get chip id: %d\n", ret
);
2699 chip_id
= ath10k_pci_soc_read32(ar
, SOC_CHIP_ID_ADDRESS
);
2701 ath10k_do_pci_sleep(ar
);
2703 ath10k_dbg(ATH10K_DBG_BOOT
, "boot pci_mem 0x%p\n", ar_pci
->mem
);
2705 ret
= ath10k_core_register(ar
, chip_id
);
2707 ath10k_err("failed to register driver core: %d\n", ret
);
2714 pci_iounmap(pdev
, mem
);
2716 pci_clear_master(pdev
);
2718 pci_release_region(pdev
, BAR_NUM
);
2720 pci_disable_device(pdev
);
2722 ath10k_core_destroy(ar
);
2724 /* call HIF PCI free here */
2730 static void ath10k_pci_remove(struct pci_dev
*pdev
)
2732 struct ath10k
*ar
= pci_get_drvdata(pdev
);
2733 struct ath10k_pci
*ar_pci
;
2735 ath10k_dbg(ATH10K_DBG_PCI
, "%s\n", __func__
);
2740 ar_pci
= ath10k_pci_priv(ar
);
2745 tasklet_kill(&ar_pci
->msi_fw_err
);
2747 ath10k_core_unregister(ar
);
2749 pci_iounmap(pdev
, ar_pci
->mem
);
2750 pci_release_region(pdev
, BAR_NUM
);
2751 pci_clear_master(pdev
);
2752 pci_disable_device(pdev
);
2754 ath10k_core_destroy(ar
);
2758 MODULE_DEVICE_TABLE(pci
, ath10k_pci_id_table
);
2760 static struct pci_driver ath10k_pci_driver
= {
2761 .name
= "ath10k_pci",
2762 .id_table
= ath10k_pci_id_table
,
2763 .probe
= ath10k_pci_probe
,
2764 .remove
= ath10k_pci_remove
,
2767 static int __init
ath10k_pci_init(void)
2771 ret
= pci_register_driver(&ath10k_pci_driver
);
2773 ath10k_err("failed to register PCI driver: %d\n", ret
);
2777 module_init(ath10k_pci_init
);
2779 static void __exit
ath10k_pci_exit(void)
2781 pci_unregister_driver(&ath10k_pci_driver
);
2784 module_exit(ath10k_pci_exit
);
2786 MODULE_AUTHOR("Qualcomm Atheros");
2787 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2788 MODULE_LICENSE("Dual BSD/GPL");
2789 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_FW_FILE
);
2790 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_OTP_FILE
);
2791 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR
"/" QCA988X_HW_2_0_BOARD_DATA_FILE
);