2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <linux/interrupt.h>
27 #define REG_DUMP_COUNT_QCA988X 60
30 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
32 #define DIAG_TRANSFER_LIMIT 2048
35 * maximum number of bytes that can be
36 * handled atomically by DiagRead/DiagWrite
38 #define DIAG_TRANSFER_LIMIT 2048
41 struct completion done
;
46 struct ath10k_pci_compl
{
47 struct list_head list
;
49 struct ce_state
*ce_state
;
50 struct hif_ce_pipe_info
*pipe_info
;
51 void *transfer_context
;
53 unsigned int transfer_id
;
57 /* compl_state.send_or_recv */
58 #define HIF_CE_COMPLETE_FREE 0
59 #define HIF_CE_COMPLETE_SEND 1
60 #define HIF_CE_COMPLETE_RECV 2
63 * PCI-specific Target state
65 * NOTE: Structure is shared between Host software and Target firmware!
67 * Much of this may be of interest to the Host so
68 * HOST_INTEREST->hi_interconnect_state points here
69 * (and all members are 32-bit quantities in order to
70 * facilitate Host access). In particular, Host software is
71 * required to initialize pipe_cfg_addr and svc_to_pipe_map.
74 /* Pipe configuration Target address */
75 /* NB: ce_pipe_config[CE_COUNT] */
78 /* Service to pipe map Target address */
79 /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
82 /* number of MSI interrupts requested */
85 /* number of MSI interrupts granted */
88 /* Message Signalled Interrupt address */
95 * Data for firmware interrupt;
96 * MSI data for other interrupts are
97 * in various SoC registers
101 /* PCIE_PWR_METHOD_* */
102 u32 power_mgmt_method
;
104 /* PCIE_CONFIG_FLAG_* */
108 /* PCIE_CONFIG_FLAG definitions */
109 #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
111 /* Host software's Copy Engine configuration. */
112 #define CE_ATTR_FLAGS 0
115 * Configuration information for a Copy Engine pipe.
116 * Passed from Host to Target during startup (one per CE).
118 * NOTE: Structure is shared between Host software and Target firmware!
120 struct ce_pipe_config
{
130 * Directions for interconnect pipe configuration.
131 * These definitions may be used during configuration and are shared
132 * between Host and Target.
134 * Pipe Directions are relative to the Host, so PIPEDIR_IN means
135 * "coming IN over air through Target to Host" as with a WiFi Rx operation.
136 * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
137 * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
138 * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
139 * over the interconnect.
141 #define PIPEDIR_NONE 0
142 #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
143 #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
144 #define PIPEDIR_INOUT 3 /* bidirectional */
146 /* Establish a mapping between a service/direction and a pipe. */
147 struct service_to_pipe
{
153 enum ath10k_pci_features
{
154 ATH10K_PCI_FEATURE_MSI_X
= 0,
155 ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND
= 1,
158 ATH10K_PCI_FEATURE_COUNT
161 /* Per-pipe state. */
162 struct hif_ce_pipe_info
{
163 /* Handle of underlying Copy Engine */
164 struct ce_state
*ce_hdl
;
166 /* Our pipe number; facilitiates use of pipe_info ptrs. */
169 /* Convenience back pointer to hif_ce_state. */
170 struct ath10k
*hif_ce_state
;
174 /* protects compl_free and num_send_allowed */
175 spinlock_t pipe_lock
;
177 /* List of free CE completion slots */
178 struct list_head compl_free
;
180 /* Limit the number of outstanding send requests. */
181 int num_sends_allowed
;
183 struct ath10k_pci
*ar_pci
;
184 struct tasklet_struct intr
;
188 struct pci_dev
*pdev
;
194 DECLARE_BITMAP(features
, ATH10K_PCI_FEATURE_COUNT
);
197 * Number of MSI interrupts granted, 0 --> using legacy PCI line
202 struct tasklet_struct intr_tq
;
203 struct tasklet_struct msi_fw_err
;
205 /* Number of Copy Engines supported */
206 unsigned int ce_count
;
210 atomic_t keep_awake_count
;
213 /* List of CE completions to be processed */
214 struct list_head compl_process
;
216 /* protects compl_processing and compl_process */
217 spinlock_t compl_lock
;
219 bool compl_processing
;
221 struct hif_ce_pipe_info pipe_info
[CE_COUNT_MAX
];
223 struct ath10k_hif_cb msg_callbacks_current
;
225 /* Target address used to signal a pending firmware event */
226 u32 fw_indicator_address
;
228 /* Copy Engine used for Diagnostic Accesses */
229 struct ce_state
*ce_diag
;
231 /* FIXME: document what this really protects */
234 /* Map CE id to ce_state */
235 struct ce_state
*ce_id_to_state
[CE_COUNT_MAX
];
237 /* makes sure that dummy reads are atomic */
238 spinlock_t hw_v1_workaround_lock
;
241 static inline struct ath10k_pci
*ath10k_pci_priv(struct ath10k
*ar
)
246 static inline u32
ath10k_pci_reg_read32(void __iomem
*mem
, u32 addr
)
248 return ioread32(mem
+ PCIE_LOCAL_BASE_ADDRESS
+ addr
);
251 static inline void ath10k_pci_reg_write32(void __iomem
*mem
, u32 addr
, u32 val
)
253 iowrite32(val
, mem
+ PCIE_LOCAL_BASE_ADDRESS
+ addr
);
256 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
257 #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
261 #define CDC_WAR_MAGIC_STR 0xceef0000
262 #define CDC_WAR_DATA_CE 4
265 * TODO: Should be a function call specific to each Target-type.
266 * This convoluted macro converts from Target CPU Virtual Address Space to CE
267 * Address Space. As part of this process, we conservatively fetch the current
268 * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
269 * for this device; but that's not guaranteed.
271 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
272 (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
273 CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
274 0x100000 | ((addr) & 0xfffff))
276 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
277 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
280 * This API allows the Host to access Target registers directly
281 * and relatively efficiently over PCIe.
282 * This allows the Host to avoid extra overhead associated with
283 * sending a message to firmware and waiting for a response message
284 * from firmware, as is done on other interconnects.
286 * Yet there is some complexity with direct accesses because the
287 * Target's power state is not known a priori. The Host must issue
288 * special PCIe reads/writes in order to explicitly wake the Target
289 * and to verify that it is awake and will remain awake.
293 * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
294 * These calls must be bracketed by ath10k_pci_wake and
295 * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
296 * multiple READ/WRITE operations.
298 * Use ath10k_pci_wake to put the Target in a state in
299 * which it is legal for the Host to directly access it. This
300 * may involve waking the Target from a low power state, which
301 * may take up to 2Ms!
303 * Use ath10k_pci_sleep to tell the Target that as far as
304 * this code path is concerned, it no longer needs to remain
305 * directly accessible. BEGIN/END is under a reference counter;
306 * multiple code paths may issue BEGIN/END on a single targid.
308 static inline void ath10k_pci_write32(struct ath10k
*ar
, u32 offset
,
311 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
312 void __iomem
*addr
= ar_pci
->mem
;
314 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND
, ar_pci
->features
)) {
315 unsigned long irq_flags
;
317 spin_lock_irqsave(&ar_pci
->hw_v1_workaround_lock
, irq_flags
);
319 ioread32(addr
+offset
+4); /* 3rd read prior to write */
320 ioread32(addr
+offset
+4); /* 2nd read prior to write */
321 ioread32(addr
+offset
+4); /* 1st read prior to write */
322 iowrite32(value
, addr
+offset
);
324 spin_unlock_irqrestore(&ar_pci
->hw_v1_workaround_lock
,
327 iowrite32(value
, addr
+offset
);
331 static inline u32
ath10k_pci_read32(struct ath10k
*ar
, u32 offset
)
333 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
335 return ioread32(ar_pci
->mem
+ offset
);
338 extern unsigned int ath10k_target_ps
;
340 void ath10k_do_pci_wake(struct ath10k
*ar
);
341 void ath10k_do_pci_sleep(struct ath10k
*ar
);
343 static inline void ath10k_pci_wake(struct ath10k
*ar
)
345 if (ath10k_target_ps
)
346 ath10k_do_pci_wake(ar
);
349 static inline void ath10k_pci_sleep(struct ath10k
*ar
)
351 if (ath10k_target_ps
)
352 ath10k_do_pci_sleep(ar
);