2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <linux/interrupt.h>
27 #define REG_DUMP_COUNT_QCA988X 60
30 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
32 #define DIAG_TRANSFER_LIMIT 2048
35 * maximum number of bytes that can be
36 * handled atomically by DiagRead/DiagWrite
38 #define DIAG_TRANSFER_LIMIT 2048
41 struct completion done
;
46 struct ath10k_pci_compl
{
47 struct list_head list
;
49 struct ce_state
*ce_state
;
50 struct hif_ce_pipe_info
*pipe_info
;
51 void *transfer_context
;
53 unsigned int transfer_id
;
57 /* compl_state.send_or_recv */
58 #define HIF_CE_COMPLETE_FREE 0
59 #define HIF_CE_COMPLETE_SEND 1
60 #define HIF_CE_COMPLETE_RECV 2
63 * PCI-specific Target state
65 * NOTE: Structure is shared between Host software and Target firmware!
67 * Much of this may be of interest to the Host so
68 * HOST_INTEREST->hi_interconnect_state points here
69 * (and all members are 32-bit quantities in order to
70 * facilitate Host access). In particular, Host software is
71 * required to initialize pipe_cfg_addr and svc_to_pipe_map.
74 /* Pipe configuration Target address */
75 /* NB: ce_pipe_config[CE_COUNT] */
78 /* Service to pipe map Target address */
79 /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
82 /* number of MSI interrupts requested */
85 /* number of MSI interrupts granted */
88 /* Message Signalled Interrupt address */
95 * Data for firmware interrupt;
96 * MSI data for other interrupts are
97 * in various SoC registers
101 /* PCIE_PWR_METHOD_* */
102 u32 power_mgmt_method
;
104 /* PCIE_CONFIG_FLAG_* */
108 /* PCIE_CONFIG_FLAG definitions */
109 #define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
111 /* Host software's Copy Engine configuration. */
112 #define CE_ATTR_FLAGS 0
115 * Configuration information for a Copy Engine pipe.
116 * Passed from Host to Target during startup (one per CE).
118 * NOTE: Structure is shared between Host software and Target firmware!
120 struct ce_pipe_config
{
130 * Directions for interconnect pipe configuration.
131 * These definitions may be used during configuration and are shared
132 * between Host and Target.
134 * Pipe Directions are relative to the Host, so PIPEDIR_IN means
135 * "coming IN over air through Target to Host" as with a WiFi Rx operation.
136 * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
137 * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
138 * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
139 * over the interconnect.
141 #define PIPEDIR_NONE 0
142 #define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
143 #define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
144 #define PIPEDIR_INOUT 3 /* bidirectional */
146 /* Establish a mapping between a service/direction and a pipe. */
147 struct service_to_pipe
{
153 enum ath10k_pci_features
{
154 ATH10K_PCI_FEATURE_MSI_X
= 0,
155 ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND
= 1,
156 ATH10K_PCI_FEATURE_SOC_POWER_SAVE
= 2,
159 ATH10K_PCI_FEATURE_COUNT
162 /* Per-pipe state. */
163 struct hif_ce_pipe_info
{
164 /* Handle of underlying Copy Engine */
165 struct ce_state
*ce_hdl
;
167 /* Our pipe number; facilitiates use of pipe_info ptrs. */
170 /* Convenience back pointer to hif_ce_state. */
171 struct ath10k
*hif_ce_state
;
175 /* protects compl_free and num_send_allowed */
176 spinlock_t pipe_lock
;
178 /* List of free CE completion slots */
179 struct list_head compl_free
;
181 /* Limit the number of outstanding send requests. */
182 int num_sends_allowed
;
184 struct ath10k_pci
*ar_pci
;
185 struct tasklet_struct intr
;
189 struct pci_dev
*pdev
;
195 DECLARE_BITMAP(features
, ATH10K_PCI_FEATURE_COUNT
);
198 * Number of MSI interrupts granted, 0 --> using legacy PCI line
203 struct tasklet_struct intr_tq
;
204 struct tasklet_struct msi_fw_err
;
206 /* Number of Copy Engines supported */
207 unsigned int ce_count
;
211 atomic_t keep_awake_count
;
214 /* List of CE completions to be processed */
215 struct list_head compl_process
;
217 /* protects compl_processing and compl_process */
218 spinlock_t compl_lock
;
220 bool compl_processing
;
222 struct hif_ce_pipe_info pipe_info
[CE_COUNT_MAX
];
224 struct ath10k_hif_cb msg_callbacks_current
;
226 /* Target address used to signal a pending firmware event */
227 u32 fw_indicator_address
;
229 /* Copy Engine used for Diagnostic Accesses */
230 struct ce_state
*ce_diag
;
232 /* FIXME: document what this really protects */
235 /* Map CE id to ce_state */
236 struct ce_state
*ce_id_to_state
[CE_COUNT_MAX
];
238 /* makes sure that dummy reads are atomic */
239 spinlock_t hw_v1_workaround_lock
;
242 static inline struct ath10k_pci
*ath10k_pci_priv(struct ath10k
*ar
)
247 static inline u32
ath10k_pci_reg_read32(void __iomem
*mem
, u32 addr
)
249 return ioread32(mem
+ PCIE_LOCAL_BASE_ADDRESS
+ addr
);
252 static inline void ath10k_pci_reg_write32(void __iomem
*mem
, u32 addr
, u32 val
)
254 iowrite32(val
, mem
+ PCIE_LOCAL_BASE_ADDRESS
+ addr
);
257 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
258 #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
262 #define CDC_WAR_MAGIC_STR 0xceef0000
263 #define CDC_WAR_DATA_CE 4
266 * TODO: Should be a function call specific to each Target-type.
267 * This convoluted macro converts from Target CPU Virtual Address Space to CE
268 * Address Space. As part of this process, we conservatively fetch the current
269 * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
270 * for this device; but that's not guaranteed.
272 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
273 (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
274 CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
275 0x100000 | ((addr) & 0xfffff))
277 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
278 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
281 * This API allows the Host to access Target registers directly
282 * and relatively efficiently over PCIe.
283 * This allows the Host to avoid extra overhead associated with
284 * sending a message to firmware and waiting for a response message
285 * from firmware, as is done on other interconnects.
287 * Yet there is some complexity with direct accesses because the
288 * Target's power state is not known a priori. The Host must issue
289 * special PCIe reads/writes in order to explicitly wake the Target
290 * and to verify that it is awake and will remain awake.
294 * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
295 * These calls must be bracketed by ath10k_pci_wake and
296 * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
297 * multiple READ/WRITE operations.
299 * Use ath10k_pci_wake to put the Target in a state in
300 * which it is legal for the Host to directly access it. This
301 * may involve waking the Target from a low power state, which
302 * may take up to 2Ms!
304 * Use ath10k_pci_sleep to tell the Target that as far as
305 * this code path is concerned, it no longer needs to remain
306 * directly accessible. BEGIN/END is under a reference counter;
307 * multiple code paths may issue BEGIN/END on a single targid.
309 static inline void ath10k_pci_write32(struct ath10k
*ar
, u32 offset
,
312 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
313 void __iomem
*addr
= ar_pci
->mem
;
315 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND
, ar_pci
->features
)) {
316 unsigned long irq_flags
;
318 spin_lock_irqsave(&ar_pci
->hw_v1_workaround_lock
, irq_flags
);
320 ioread32(addr
+offset
+4); /* 3rd read prior to write */
321 ioread32(addr
+offset
+4); /* 2nd read prior to write */
322 ioread32(addr
+offset
+4); /* 1st read prior to write */
323 iowrite32(value
, addr
+offset
);
325 spin_unlock_irqrestore(&ar_pci
->hw_v1_workaround_lock
,
328 iowrite32(value
, addr
+offset
);
332 static inline u32
ath10k_pci_read32(struct ath10k
*ar
, u32 offset
)
334 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
336 return ioread32(ar_pci
->mem
+ offset
);
339 void ath10k_do_pci_wake(struct ath10k
*ar
);
340 void ath10k_do_pci_sleep(struct ath10k
*ar
);
342 static inline void ath10k_pci_wake(struct ath10k
*ar
)
344 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
346 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
347 ath10k_do_pci_wake(ar
);
350 static inline void ath10k_pci_sleep(struct ath10k
*ar
)
352 struct ath10k_pci
*ar_pci
= ath10k_pci_priv(ar
);
354 if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE
, ar_pci
->features
))
355 ath10k_do_pci_sleep(ar
);