1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/coresight.h>
8 #include <linux/dma-mapping.h>
9 #include "coresight-priv.h"
10 #include "coresight-tmc.h"
12 static void tmc_etr_enable_hw(struct tmc_drvdata
*drvdata
)
16 /* Zero out the memory to help with debug */
17 memset(drvdata
->vaddr
, 0, drvdata
->size
);
19 CS_UNLOCK(drvdata
->base
);
21 /* Wait for TMCSReady bit to be set */
22 tmc_wait_for_tmcready(drvdata
);
24 writel_relaxed(drvdata
->size
/ 4, drvdata
->base
+ TMC_RSZ
);
25 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER
, drvdata
->base
+ TMC_MODE
);
27 axictl
= readl_relaxed(drvdata
->base
+ TMC_AXICTL
);
28 axictl
&= ~TMC_AXICTL_CLEAR_MASK
;
29 axictl
|= (TMC_AXICTL_PROT_CTL_B1
| TMC_AXICTL_WR_BURST_16
);
30 axictl
|= TMC_AXICTL_AXCACHE_OS
;
32 if (tmc_etr_has_cap(drvdata
, TMC_ETR_AXI_ARCACHE
)) {
33 axictl
&= ~TMC_AXICTL_ARCACHE_MASK
;
34 axictl
|= TMC_AXICTL_ARCACHE_OS
;
37 writel_relaxed(axictl
, drvdata
->base
+ TMC_AXICTL
);
38 tmc_write_dba(drvdata
, drvdata
->paddr
);
40 * If the TMC pointers must be programmed before the session,
41 * we have to set it properly (i.e, RRP/RWP to base address and
44 if (tmc_etr_has_cap(drvdata
, TMC_ETR_SAVE_RESTORE
)) {
45 tmc_write_rrp(drvdata
, drvdata
->paddr
);
46 tmc_write_rwp(drvdata
, drvdata
->paddr
);
47 sts
= readl_relaxed(drvdata
->base
+ TMC_STS
) & ~TMC_STS_FULL
;
48 writel_relaxed(sts
, drvdata
->base
+ TMC_STS
);
51 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
|
52 TMC_FFCR_FON_FLIN
| TMC_FFCR_FON_TRIG_EVT
|
53 TMC_FFCR_TRIGON_TRIGIN
,
54 drvdata
->base
+ TMC_FFCR
);
55 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ TMC_TRG
);
56 tmc_enable_hw(drvdata
);
58 CS_LOCK(drvdata
->base
);
61 static void tmc_etr_dump_hw(struct tmc_drvdata
*drvdata
)
68 rwp
= tmc_read_rwp(drvdata
);
69 val
= readl_relaxed(drvdata
->base
+ TMC_STS
);
72 * Adjust the buffer to point to the beginning of the trace data
73 * and update the available trace data.
75 if (val
& TMC_STS_FULL
) {
76 drvdata
->buf
= drvdata
->vaddr
+ rwp
- drvdata
->paddr
;
77 drvdata
->len
= drvdata
->size
;
79 barrier
= barrier_pkt
;
80 temp
= (u32
*)drvdata
->buf
;
89 drvdata
->buf
= drvdata
->vaddr
;
90 drvdata
->len
= rwp
- drvdata
->paddr
;
94 static void tmc_etr_disable_hw(struct tmc_drvdata
*drvdata
)
96 CS_UNLOCK(drvdata
->base
);
98 tmc_flush_and_stop(drvdata
);
100 * When operating in sysFS mode the content of the buffer needs to be
101 * read before the TMC is disabled.
103 if (drvdata
->mode
== CS_MODE_SYSFS
)
104 tmc_etr_dump_hw(drvdata
);
105 tmc_disable_hw(drvdata
);
107 CS_LOCK(drvdata
->base
);
110 static int tmc_enable_etr_sink_sysfs(struct coresight_device
*csdev
)
115 void __iomem
*vaddr
= NULL
;
116 dma_addr_t paddr
= 0;
117 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
120 * If we don't have a buffer release the lock and allocate memory.
121 * Otherwise keep the lock and move along.
123 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
124 if (!drvdata
->vaddr
) {
125 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
128 * Contiguous memory can't be allocated while a spinlock is
129 * held. As such allocate memory here and free it if a buffer
130 * has already been allocated (from a previous session).
132 vaddr
= dma_alloc_coherent(drvdata
->dev
, drvdata
->size
,
137 /* Let's try again */
138 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
141 if (drvdata
->reading
) {
147 * In sysFS mode we can have multiple writers per sink. Since this
148 * sink is already enabled no memory is needed and the HW need not be
151 if (drvdata
->mode
== CS_MODE_SYSFS
)
155 * If drvdata::vaddr == NULL, use the memory allocated above.
156 * Otherwise a buffer still exists from a previous session, so
159 if (drvdata
->vaddr
== NULL
) {
161 drvdata
->vaddr
= vaddr
;
162 drvdata
->paddr
= paddr
;
163 drvdata
->buf
= drvdata
->vaddr
;
166 drvdata
->mode
= CS_MODE_SYSFS
;
167 tmc_etr_enable_hw(drvdata
);
169 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
171 /* Free memory outside the spinlock if need be */
173 dma_free_coherent(drvdata
->dev
, drvdata
->size
, vaddr
, paddr
);
176 dev_info(drvdata
->dev
, "TMC-ETR enabled\n");
181 static int tmc_enable_etr_sink_perf(struct coresight_device
*csdev
)
185 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
187 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
188 if (drvdata
->reading
) {
194 * In Perf mode there can be only one writer per sink. There
195 * is also no need to continue if the ETR is already operated
198 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
203 drvdata
->mode
= CS_MODE_PERF
;
204 tmc_etr_enable_hw(drvdata
);
206 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
211 static int tmc_enable_etr_sink(struct coresight_device
*csdev
, u32 mode
)
215 return tmc_enable_etr_sink_sysfs(csdev
);
217 return tmc_enable_etr_sink_perf(csdev
);
220 /* We shouldn't be here */
224 static void tmc_disable_etr_sink(struct coresight_device
*csdev
)
227 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
229 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
230 if (drvdata
->reading
) {
231 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
235 /* Disable the TMC only if it needs to */
236 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
237 tmc_etr_disable_hw(drvdata
);
238 drvdata
->mode
= CS_MODE_DISABLED
;
241 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
243 dev_info(drvdata
->dev
, "TMC-ETR disabled\n");
246 static const struct coresight_ops_sink tmc_etr_sink_ops
= {
247 .enable
= tmc_enable_etr_sink
,
248 .disable
= tmc_disable_etr_sink
,
251 const struct coresight_ops tmc_etr_cs_ops
= {
252 .sink_ops
= &tmc_etr_sink_ops
,
255 int tmc_read_prepare_etr(struct tmc_drvdata
*drvdata
)
260 /* config types are set a boot time and never change */
261 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETR
))
264 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
265 if (drvdata
->reading
) {
270 /* Don't interfere if operated from Perf */
271 if (drvdata
->mode
== CS_MODE_PERF
) {
276 /* If drvdata::buf is NULL the trace data has been read already */
277 if (drvdata
->buf
== NULL
) {
282 /* Disable the TMC if need be */
283 if (drvdata
->mode
== CS_MODE_SYSFS
)
284 tmc_etr_disable_hw(drvdata
);
286 drvdata
->reading
= true;
288 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
293 int tmc_read_unprepare_etr(struct tmc_drvdata
*drvdata
)
297 void __iomem
*vaddr
= NULL
;
299 /* config types are set a boot time and never change */
300 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETR
))
303 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
305 /* RE-enable the TMC if need be */
306 if (drvdata
->mode
== CS_MODE_SYSFS
) {
308 * The trace run will continue with the same allocated trace
309 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
310 * so we don't have to explicitly clear it. Also, since the
311 * tracer is still enabled drvdata::buf can't be NULL.
313 tmc_etr_enable_hw(drvdata
);
316 * The ETR is not tracing and the buffer was just read.
317 * As such prepare to free the trace buffer.
319 vaddr
= drvdata
->vaddr
;
320 paddr
= drvdata
->paddr
;
321 drvdata
->buf
= drvdata
->vaddr
= NULL
;
324 drvdata
->reading
= false;
325 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
327 /* Free allocated memory out side of the spinlock */
329 dma_free_coherent(drvdata
->dev
, drvdata
->size
, vaddr
, paddr
);