2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/coresight.h>
19 #include <linux/dma-mapping.h>
20 #include "coresight-priv.h"
21 #include "coresight-tmc.h"
23 static void tmc_etr_enable_hw(struct tmc_drvdata
*drvdata
)
27 /* Zero out the memory to help with debug */
28 memset(drvdata
->vaddr
, 0, drvdata
->size
);
30 CS_UNLOCK(drvdata
->base
);
32 /* Wait for TMCSReady bit to be set */
33 tmc_wait_for_tmcready(drvdata
);
35 writel_relaxed(drvdata
->size
/ 4, drvdata
->base
+ TMC_RSZ
);
36 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER
, drvdata
->base
+ TMC_MODE
);
38 axictl
= readl_relaxed(drvdata
->base
+ TMC_AXICTL
);
39 axictl
|= TMC_AXICTL_WR_BURST_16
;
40 writel_relaxed(axictl
, drvdata
->base
+ TMC_AXICTL
);
41 axictl
&= ~TMC_AXICTL_SCT_GAT_MODE
;
42 writel_relaxed(axictl
, drvdata
->base
+ TMC_AXICTL
);
44 ~(TMC_AXICTL_PROT_CTL_B0
| TMC_AXICTL_PROT_CTL_B1
)) |
45 TMC_AXICTL_PROT_CTL_B1
;
46 writel_relaxed(axictl
, drvdata
->base
+ TMC_AXICTL
);
48 writel_relaxed(drvdata
->paddr
, drvdata
->base
+ TMC_DBALO
);
49 writel_relaxed(0x0, drvdata
->base
+ TMC_DBAHI
);
50 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
|
51 TMC_FFCR_FON_FLIN
| TMC_FFCR_FON_TRIG_EVT
|
52 TMC_FFCR_TRIGON_TRIGIN
,
53 drvdata
->base
+ TMC_FFCR
);
54 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ TMC_TRG
);
55 tmc_enable_hw(drvdata
);
57 CS_LOCK(drvdata
->base
);
60 static void tmc_etr_dump_hw(struct tmc_drvdata
*drvdata
)
64 rwp
= readl_relaxed(drvdata
->base
+ TMC_RWP
);
65 val
= readl_relaxed(drvdata
->base
+ TMC_STS
);
68 * Adjust the buffer to point to the beginning of the trace data
69 * and update the available trace data.
71 if (val
& TMC_STS_FULL
) {
72 drvdata
->buf
= drvdata
->vaddr
+ rwp
- drvdata
->paddr
;
73 drvdata
->len
= drvdata
->size
;
75 drvdata
->buf
= drvdata
->vaddr
;
76 drvdata
->len
= rwp
- drvdata
->paddr
;
80 static void tmc_etr_disable_hw(struct tmc_drvdata
*drvdata
)
82 CS_UNLOCK(drvdata
->base
);
84 tmc_flush_and_stop(drvdata
);
86 * When operating in sysFS mode the content of the buffer needs to be
87 * read before the TMC is disabled.
89 if (drvdata
->mode
== CS_MODE_SYSFS
)
90 tmc_etr_dump_hw(drvdata
);
91 tmc_disable_hw(drvdata
);
93 CS_LOCK(drvdata
->base
);
96 static int tmc_enable_etr_sink_sysfs(struct coresight_device
*csdev
)
101 void __iomem
*vaddr
= NULL
;
103 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
107 * If we don't have a buffer release the lock and allocate memory.
108 * Otherwise keep the lock and move along.
110 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
111 if (!drvdata
->vaddr
) {
112 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
115 * Contiguous memory can't be allocated while a spinlock is
116 * held. As such allocate memory here and free it if a buffer
117 * has already been allocated (from a previous session).
119 vaddr
= dma_alloc_coherent(drvdata
->dev
, drvdata
->size
,
124 /* Let's try again */
125 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
128 if (drvdata
->reading
) {
134 * In sysFS mode we can have multiple writers per sink. Since this
135 * sink is already enabled no memory is needed and the HW need not be
138 if (drvdata
->mode
== CS_MODE_SYSFS
)
142 * If drvdata::buf == NULL, use the memory allocated above.
143 * Otherwise a buffer still exists from a previous session, so
146 if (drvdata
->buf
== NULL
) {
148 drvdata
->vaddr
= vaddr
;
149 drvdata
->paddr
= paddr
;
150 drvdata
->buf
= drvdata
->vaddr
;
153 drvdata
->mode
= CS_MODE_SYSFS
;
154 tmc_etr_enable_hw(drvdata
);
156 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
158 /* Free memory outside the spinlock if need be */
160 dma_free_coherent(drvdata
->dev
, drvdata
->size
, vaddr
, paddr
);
163 dev_info(drvdata
->dev
, "TMC-ETR enabled\n");
168 static int tmc_enable_etr_sink_perf(struct coresight_device
*csdev
)
172 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
174 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
175 if (drvdata
->reading
) {
181 * In Perf mode there can be only one writer per sink. There
182 * is also no need to continue if the ETR is already operated
185 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
190 drvdata
->mode
= CS_MODE_PERF
;
191 tmc_etr_enable_hw(drvdata
);
193 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
198 static int tmc_enable_etr_sink(struct coresight_device
*csdev
, u32 mode
)
202 return tmc_enable_etr_sink_sysfs(csdev
);
204 return tmc_enable_etr_sink_perf(csdev
);
207 /* We shouldn't be here */
211 static void tmc_disable_etr_sink(struct coresight_device
*csdev
)
214 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
216 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
217 if (drvdata
->reading
) {
218 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
222 /* Disable the TMC only if it needs to */
223 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
224 tmc_etr_disable_hw(drvdata
);
225 drvdata
->mode
= CS_MODE_DISABLED
;
228 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
230 dev_info(drvdata
->dev
, "TMC-ETR disabled\n");
233 static const struct coresight_ops_sink tmc_etr_sink_ops
= {
234 .enable
= tmc_enable_etr_sink
,
235 .disable
= tmc_disable_etr_sink
,
238 const struct coresight_ops tmc_etr_cs_ops
= {
239 .sink_ops
= &tmc_etr_sink_ops
,
242 int tmc_read_prepare_etr(struct tmc_drvdata
*drvdata
)
247 /* config types are set a boot time and never change */
248 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETR
))
251 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
252 if (drvdata
->reading
) {
257 /* Don't interfere if operated from Perf */
258 if (drvdata
->mode
== CS_MODE_PERF
) {
263 /* If drvdata::buf is NULL the trace data has been read already */
264 if (drvdata
->buf
== NULL
) {
269 /* Disable the TMC if need be */
270 if (drvdata
->mode
== CS_MODE_SYSFS
)
271 tmc_etr_disable_hw(drvdata
);
273 drvdata
->reading
= true;
275 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
280 int tmc_read_unprepare_etr(struct tmc_drvdata
*drvdata
)
284 void __iomem
*vaddr
= NULL
;
286 /* config types are set a boot time and never change */
287 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETR
))
290 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
292 /* RE-enable the TMC if need be */
293 if (drvdata
->mode
== CS_MODE_SYSFS
) {
295 * The trace run will continue with the same allocated trace
296 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
297 * so we don't have to explicitly clear it. Also, since the
298 * tracer is still enabled drvdata::buf can't be NULL.
300 tmc_etr_enable_hw(drvdata
);
303 * The ETR is not tracing and the buffer was just read.
304 * As such prepare to free the trace buffer.
306 vaddr
= drvdata
->vaddr
;
307 paddr
= drvdata
->paddr
;
308 drvdata
->buf
= drvdata
->vaddr
= NULL
;
311 drvdata
->reading
= false;
312 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
314 /* Free allocated memory out side of the spinlock */
316 dma_free_coherent(drvdata
->dev
, drvdata
->size
, vaddr
, paddr
);