2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/coresight.h>
19 #include <linux/dma-mapping.h>
20 #include "coresight-priv.h"
21 #include "coresight-tmc.h"
23 static void tmc_etr_enable_hw(struct tmc_drvdata
*drvdata
)
27 /* Zero out the memory to help with debug */
28 memset(drvdata
->vaddr
, 0, drvdata
->size
);
30 CS_UNLOCK(drvdata
->base
);
32 /* Wait for TMCSReady bit to be set */
33 tmc_wait_for_tmcready(drvdata
);
35 writel_relaxed(drvdata
->size
/ 4, drvdata
->base
+ TMC_RSZ
);
36 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER
, drvdata
->base
+ TMC_MODE
);
38 axictl
= readl_relaxed(drvdata
->base
+ TMC_AXICTL
);
39 axictl
&= ~TMC_AXICTL_CLEAR_MASK
;
40 axictl
|= (TMC_AXICTL_PROT_CTL_B1
| TMC_AXICTL_WR_BURST_16
);
41 axictl
|= TMC_AXICTL_AXCACHE_OS
;
43 if (tmc_etr_has_cap(drvdata
, TMC_ETR_AXI_ARCACHE
)) {
44 axictl
&= ~TMC_AXICTL_ARCACHE_MASK
;
45 axictl
|= TMC_AXICTL_ARCACHE_OS
;
48 writel_relaxed(axictl
, drvdata
->base
+ TMC_AXICTL
);
49 tmc_write_dba(drvdata
, drvdata
->paddr
);
51 * If the TMC pointers must be programmed before the session,
52 * we have to set it properly (i.e, RRP/RWP to base address and
55 if (tmc_etr_has_cap(drvdata
, TMC_ETR_SAVE_RESTORE
)) {
56 tmc_write_rrp(drvdata
, drvdata
->paddr
);
57 tmc_write_rwp(drvdata
, drvdata
->paddr
);
58 sts
= readl_relaxed(drvdata
->base
+ TMC_STS
) & ~TMC_STS_FULL
;
59 writel_relaxed(sts
, drvdata
->base
+ TMC_STS
);
62 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
|
63 TMC_FFCR_FON_FLIN
| TMC_FFCR_FON_TRIG_EVT
|
64 TMC_FFCR_TRIGON_TRIGIN
,
65 drvdata
->base
+ TMC_FFCR
);
66 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ TMC_TRG
);
67 tmc_enable_hw(drvdata
);
69 CS_LOCK(drvdata
->base
);
72 static void tmc_etr_dump_hw(struct tmc_drvdata
*drvdata
)
79 rwp
= tmc_read_rwp(drvdata
);
80 val
= readl_relaxed(drvdata
->base
+ TMC_STS
);
83 * Adjust the buffer to point to the beginning of the trace data
84 * and update the available trace data.
86 if (val
& TMC_STS_FULL
) {
87 drvdata
->buf
= drvdata
->vaddr
+ rwp
- drvdata
->paddr
;
88 drvdata
->len
= drvdata
->size
;
90 barrier
= barrier_pkt
;
91 temp
= (u32
*)drvdata
->buf
;
100 drvdata
->buf
= drvdata
->vaddr
;
101 drvdata
->len
= rwp
- drvdata
->paddr
;
105 static void tmc_etr_disable_hw(struct tmc_drvdata
*drvdata
)
107 CS_UNLOCK(drvdata
->base
);
109 tmc_flush_and_stop(drvdata
);
111 * When operating in sysFS mode the content of the buffer needs to be
112 * read before the TMC is disabled.
114 if (drvdata
->mode
== CS_MODE_SYSFS
)
115 tmc_etr_dump_hw(drvdata
);
116 tmc_disable_hw(drvdata
);
118 CS_LOCK(drvdata
->base
);
121 static int tmc_enable_etr_sink_sysfs(struct coresight_device
*csdev
)
126 void __iomem
*vaddr
= NULL
;
128 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
132 * If we don't have a buffer release the lock and allocate memory.
133 * Otherwise keep the lock and move along.
135 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
136 if (!drvdata
->vaddr
) {
137 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
140 * Contiguous memory can't be allocated while a spinlock is
141 * held. As such allocate memory here and free it if a buffer
142 * has already been allocated (from a previous session).
144 vaddr
= dma_alloc_coherent(drvdata
->dev
, drvdata
->size
,
149 /* Let's try again */
150 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
153 if (drvdata
->reading
) {
159 * In sysFS mode we can have multiple writers per sink. Since this
160 * sink is already enabled no memory is needed and the HW need not be
163 if (drvdata
->mode
== CS_MODE_SYSFS
)
167 * If drvdata::buf == NULL, use the memory allocated above.
168 * Otherwise a buffer still exists from a previous session, so
171 if (drvdata
->buf
== NULL
) {
173 drvdata
->vaddr
= vaddr
;
174 drvdata
->paddr
= paddr
;
175 drvdata
->buf
= drvdata
->vaddr
;
178 drvdata
->mode
= CS_MODE_SYSFS
;
179 tmc_etr_enable_hw(drvdata
);
181 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
183 /* Free memory outside the spinlock if need be */
185 dma_free_coherent(drvdata
->dev
, drvdata
->size
, vaddr
, paddr
);
188 dev_info(drvdata
->dev
, "TMC-ETR enabled\n");
193 static int tmc_enable_etr_sink_perf(struct coresight_device
*csdev
)
197 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
199 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
200 if (drvdata
->reading
) {
206 * In Perf mode there can be only one writer per sink. There
207 * is also no need to continue if the ETR is already operated
210 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
215 drvdata
->mode
= CS_MODE_PERF
;
216 tmc_etr_enable_hw(drvdata
);
218 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
223 static int tmc_enable_etr_sink(struct coresight_device
*csdev
, u32 mode
)
227 return tmc_enable_etr_sink_sysfs(csdev
);
229 return tmc_enable_etr_sink_perf(csdev
);
232 /* We shouldn't be here */
236 static void tmc_disable_etr_sink(struct coresight_device
*csdev
)
239 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
241 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
242 if (drvdata
->reading
) {
243 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
247 /* Disable the TMC only if it needs to */
248 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
249 tmc_etr_disable_hw(drvdata
);
250 drvdata
->mode
= CS_MODE_DISABLED
;
253 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
255 dev_info(drvdata
->dev
, "TMC-ETR disabled\n");
258 static const struct coresight_ops_sink tmc_etr_sink_ops
= {
259 .enable
= tmc_enable_etr_sink
,
260 .disable
= tmc_disable_etr_sink
,
263 const struct coresight_ops tmc_etr_cs_ops
= {
264 .sink_ops
= &tmc_etr_sink_ops
,
267 int tmc_read_prepare_etr(struct tmc_drvdata
*drvdata
)
272 /* config types are set a boot time and never change */
273 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETR
))
276 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
277 if (drvdata
->reading
) {
282 /* Don't interfere if operated from Perf */
283 if (drvdata
->mode
== CS_MODE_PERF
) {
288 /* If drvdata::buf is NULL the trace data has been read already */
289 if (drvdata
->buf
== NULL
) {
294 /* Disable the TMC if need be */
295 if (drvdata
->mode
== CS_MODE_SYSFS
)
296 tmc_etr_disable_hw(drvdata
);
298 drvdata
->reading
= true;
300 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
305 int tmc_read_unprepare_etr(struct tmc_drvdata
*drvdata
)
309 void __iomem
*vaddr
= NULL
;
311 /* config types are set a boot time and never change */
312 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETR
))
315 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
317 /* RE-enable the TMC if need be */
318 if (drvdata
->mode
== CS_MODE_SYSFS
) {
320 * The trace run will continue with the same allocated trace
321 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
322 * so we don't have to explicitly clear it. Also, since the
323 * tracer is still enabled drvdata::buf can't be NULL.
325 tmc_etr_enable_hw(drvdata
);
328 * The ETR is not tracing and the buffer was just read.
329 * As such prepare to free the trace buffer.
331 vaddr
= drvdata
->vaddr
;
332 paddr
= drvdata
->paddr
;
333 drvdata
->buf
= drvdata
->vaddr
= NULL
;
336 drvdata
->reading
= false;
337 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
339 /* Free allocated memory out side of the spinlock */
341 dma_free_coherent(drvdata
->dev
, drvdata
->size
, vaddr
, paddr
);