1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
16 static int tmc_set_etf_buffer(struct coresight_device
*csdev
,
17 struct perf_output_handle
*handle
);
19 static void __tmc_etb_enable_hw(struct tmc_drvdata
*drvdata
)
21 CS_UNLOCK(drvdata
->base
);
23 /* Wait for TMCSReady bit to be set */
24 tmc_wait_for_tmcready(drvdata
);
26 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER
, drvdata
->base
+ TMC_MODE
);
27 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
|
28 TMC_FFCR_FON_FLIN
| TMC_FFCR_FON_TRIG_EVT
|
29 TMC_FFCR_TRIGON_TRIGIN
,
30 drvdata
->base
+ TMC_FFCR
);
32 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ TMC_TRG
);
33 tmc_enable_hw(drvdata
);
35 CS_LOCK(drvdata
->base
);
38 static int tmc_etb_enable_hw(struct tmc_drvdata
*drvdata
)
40 int rc
= coresight_claim_device(drvdata
->base
);
45 __tmc_etb_enable_hw(drvdata
);
49 static void tmc_etb_dump_hw(struct tmc_drvdata
*drvdata
)
54 /* Check if the buffer wrapped around. */
55 lost
= readl_relaxed(drvdata
->base
+ TMC_STS
) & TMC_STS_FULL
;
59 read_data
= readl_relaxed(drvdata
->base
+ TMC_RRD
);
60 if (read_data
== 0xFFFFFFFF)
62 memcpy(bufp
, &read_data
, 4);
68 coresight_insert_barrier_packet(drvdata
->buf
);
72 static void __tmc_etb_disable_hw(struct tmc_drvdata
*drvdata
)
74 CS_UNLOCK(drvdata
->base
);
76 tmc_flush_and_stop(drvdata
);
78 * When operating in sysFS mode the content of the buffer needs to be
79 * read before the TMC is disabled.
81 if (drvdata
->mode
== CS_MODE_SYSFS
)
82 tmc_etb_dump_hw(drvdata
);
83 tmc_disable_hw(drvdata
);
85 CS_LOCK(drvdata
->base
);
88 static void tmc_etb_disable_hw(struct tmc_drvdata
*drvdata
)
90 __tmc_etb_disable_hw(drvdata
);
91 coresight_disclaim_device(drvdata
->base
);
94 static void __tmc_etf_enable_hw(struct tmc_drvdata
*drvdata
)
96 CS_UNLOCK(drvdata
->base
);
98 /* Wait for TMCSReady bit to be set */
99 tmc_wait_for_tmcready(drvdata
);
101 writel_relaxed(TMC_MODE_HARDWARE_FIFO
, drvdata
->base
+ TMC_MODE
);
102 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
,
103 drvdata
->base
+ TMC_FFCR
);
104 writel_relaxed(0x0, drvdata
->base
+ TMC_BUFWM
);
105 tmc_enable_hw(drvdata
);
107 CS_LOCK(drvdata
->base
);
110 static int tmc_etf_enable_hw(struct tmc_drvdata
*drvdata
)
112 int rc
= coresight_claim_device(drvdata
->base
);
117 __tmc_etf_enable_hw(drvdata
);
121 static void tmc_etf_disable_hw(struct tmc_drvdata
*drvdata
)
123 CS_UNLOCK(drvdata
->base
);
125 tmc_flush_and_stop(drvdata
);
126 tmc_disable_hw(drvdata
);
127 coresight_disclaim_device_unlocked(drvdata
->base
);
128 CS_LOCK(drvdata
->base
);
132 * Return the available trace data in the buffer from @pos, with
133 * a maximum limit of @len, updating the @bufpp on where to
136 ssize_t
tmc_etb_get_sysfs_trace(struct tmc_drvdata
*drvdata
,
137 loff_t pos
, size_t len
, char **bufpp
)
139 ssize_t actual
= len
;
141 /* Adjust the len to available size @pos */
142 if (pos
+ actual
> drvdata
->len
)
143 actual
= drvdata
->len
- pos
;
145 *bufpp
= drvdata
->buf
+ pos
;
149 static int tmc_enable_etf_sink_sysfs(struct coresight_device
*csdev
)
155 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
158 * If we don't have a buffer release the lock and allocate memory.
159 * Otherwise keep the lock and move along.
161 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
163 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
165 /* Allocating the memory here while outside of the spinlock */
166 buf
= kzalloc(drvdata
->size
, GFP_KERNEL
);
170 /* Let's try again */
171 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
174 if (drvdata
->reading
) {
180 * In sysFS mode we can have multiple writers per sink. Since this
181 * sink is already enabled no memory is needed and the HW need not be
184 if (drvdata
->mode
== CS_MODE_SYSFS
) {
185 atomic_inc(csdev
->refcnt
);
190 * If drvdata::buf isn't NULL, memory was allocated for a previous
191 * trace run but wasn't read. If so simply zero-out the memory.
192 * Otherwise use the memory allocated above.
194 * The memory is freed when users read the buffer using the
195 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
199 memset(drvdata
->buf
, 0, drvdata
->size
);
205 ret
= tmc_etb_enable_hw(drvdata
);
207 drvdata
->mode
= CS_MODE_SYSFS
;
208 atomic_inc(csdev
->refcnt
);
210 /* Free up the buffer if we failed to enable */
214 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
216 /* Free memory outside the spinlock if need be */
223 static int tmc_enable_etf_sink_perf(struct coresight_device
*csdev
, void *data
)
228 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
229 struct perf_output_handle
*handle
= data
;
230 struct cs_buffers
*buf
= etm_perf_sink_config(handle
);
232 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
235 if (drvdata
->reading
)
238 * No need to continue if the ETB/ETF is already operated
241 if (drvdata
->mode
== CS_MODE_SYSFS
) {
246 /* Get a handle on the pid of the process to monitor */
249 if (drvdata
->pid
!= -1 && drvdata
->pid
!= pid
) {
254 ret
= tmc_set_etf_buffer(csdev
, handle
);
259 * No HW configuration is needed if the sink is already in
260 * use for this session.
262 if (drvdata
->pid
== pid
) {
263 atomic_inc(csdev
->refcnt
);
267 ret
= tmc_etb_enable_hw(drvdata
);
269 /* Associate with monitored process. */
271 drvdata
->mode
= CS_MODE_PERF
;
272 atomic_inc(csdev
->refcnt
);
275 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
280 static int tmc_enable_etf_sink(struct coresight_device
*csdev
,
281 u32 mode
, void *data
)
287 ret
= tmc_enable_etf_sink_sysfs(csdev
);
290 ret
= tmc_enable_etf_sink_perf(csdev
, data
);
292 /* We shouldn't be here */
301 dev_dbg(&csdev
->dev
, "TMC-ETB/ETF enabled\n");
305 static int tmc_disable_etf_sink(struct coresight_device
*csdev
)
308 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
310 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
312 if (drvdata
->reading
) {
313 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
317 if (atomic_dec_return(csdev
->refcnt
)) {
318 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
322 /* Complain if we (somehow) got out of sync */
323 WARN_ON_ONCE(drvdata
->mode
== CS_MODE_DISABLED
);
324 tmc_etb_disable_hw(drvdata
);
325 /* Dissociate from monitored process. */
327 drvdata
->mode
= CS_MODE_DISABLED
;
329 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
331 dev_dbg(&csdev
->dev
, "TMC-ETB/ETF disabled\n");
335 static int tmc_enable_etf_link(struct coresight_device
*csdev
,
336 int inport
, int outport
)
340 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
341 bool first_enable
= false;
343 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
344 if (drvdata
->reading
) {
345 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
349 if (atomic_read(&csdev
->refcnt
[0]) == 0) {
350 ret
= tmc_etf_enable_hw(drvdata
);
352 drvdata
->mode
= CS_MODE_SYSFS
;
357 atomic_inc(&csdev
->refcnt
[0]);
358 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
361 dev_dbg(&csdev
->dev
, "TMC-ETF enabled\n");
365 static void tmc_disable_etf_link(struct coresight_device
*csdev
,
366 int inport
, int outport
)
369 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
370 bool last_disable
= false;
372 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
373 if (drvdata
->reading
) {
374 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
378 if (atomic_dec_return(&csdev
->refcnt
[0]) == 0) {
379 tmc_etf_disable_hw(drvdata
);
380 drvdata
->mode
= CS_MODE_DISABLED
;
383 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
386 dev_dbg(&csdev
->dev
, "TMC-ETF disabled\n");
389 static void *tmc_alloc_etf_buffer(struct coresight_device
*csdev
,
390 struct perf_event
*event
, void **pages
,
391 int nr_pages
, bool overwrite
)
394 struct cs_buffers
*buf
;
396 node
= (event
->cpu
== -1) ? NUMA_NO_NODE
: cpu_to_node(event
->cpu
);
398 /* Allocate memory structure for interaction with Perf */
399 buf
= kzalloc_node(sizeof(struct cs_buffers
), GFP_KERNEL
, node
);
403 buf
->pid
= task_pid_nr(event
->owner
);
404 buf
->snapshot
= overwrite
;
405 buf
->nr_pages
= nr_pages
;
406 buf
->data_pages
= pages
;
411 static void tmc_free_etf_buffer(void *config
)
413 struct cs_buffers
*buf
= config
;
418 static int tmc_set_etf_buffer(struct coresight_device
*csdev
,
419 struct perf_output_handle
*handle
)
423 struct cs_buffers
*buf
= etm_perf_sink_config(handle
);
428 /* wrap head around to the amount of space we have */
429 head
= handle
->head
& ((buf
->nr_pages
<< PAGE_SHIFT
) - 1);
431 /* find the page to write to */
432 buf
->cur
= head
/ PAGE_SIZE
;
434 /* and offset within that page */
435 buf
->offset
= head
% PAGE_SIZE
;
437 local_set(&buf
->data_size
, 0);
442 static unsigned long tmc_update_etf_buffer(struct coresight_device
*csdev
,
443 struct perf_output_handle
*handle
,
450 u64 read_ptr
, write_ptr
;
452 unsigned long offset
, to_read
= 0, flags
;
453 struct cs_buffers
*buf
= sink_config
;
454 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
459 /* This shouldn't happen */
460 if (WARN_ON_ONCE(drvdata
->mode
!= CS_MODE_PERF
))
463 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
465 /* Don't do anything if another tracer is using this sink */
466 if (atomic_read(csdev
->refcnt
) != 1)
469 CS_UNLOCK(drvdata
->base
);
471 tmc_flush_and_stop(drvdata
);
473 read_ptr
= tmc_read_rrp(drvdata
);
474 write_ptr
= tmc_read_rwp(drvdata
);
477 * Get a hold of the status register and see if a wrap around
478 * has occurred. If so adjust things accordingly.
480 status
= readl_relaxed(drvdata
->base
+ TMC_STS
);
481 if (status
& TMC_STS_FULL
) {
483 to_read
= drvdata
->size
;
485 to_read
= CIRC_CNT(write_ptr
, read_ptr
, drvdata
->size
);
489 * The TMC RAM buffer may be bigger than the space available in the
490 * perf ring buffer (handle->size). If so advance the RRP so that we
491 * get the latest trace data. In snapshot mode none of that matters
492 * since we are expected to clobber stale data in favour of the latest
495 if (!buf
->snapshot
&& to_read
> handle
->size
) {
496 u32 mask
= tmc_get_memwidth_mask(drvdata
);
499 * Make sure the new size is aligned in accordance with the
500 * requirement explained in function tmc_get_memwidth_mask().
502 to_read
= handle
->size
& mask
;
503 /* Move the RAM read pointer up */
504 read_ptr
= (write_ptr
+ drvdata
->size
) - to_read
;
505 /* Make sure we are still within our limits */
506 if (read_ptr
> (drvdata
->size
- 1))
507 read_ptr
-= drvdata
->size
;
509 tmc_write_rrp(drvdata
, read_ptr
);
514 * Don't set the TRUNCATED flag in snapshot mode because 1) the
515 * captured buffer is expected to be truncated and 2) a full buffer
516 * prevents the event from being re-enabled by the perf core,
517 * resulting in stale data being send to user space.
519 if (!buf
->snapshot
&& lost
)
520 perf_aux_output_flag(handle
, PERF_AUX_FLAG_TRUNCATED
);
523 offset
= buf
->offset
;
524 barrier
= coresight_barrier_pkt
;
526 /* for every byte to read */
527 for (i
= 0; i
< to_read
; i
+= 4) {
528 buf_ptr
= buf
->data_pages
[cur
] + offset
;
529 *buf_ptr
= readl_relaxed(drvdata
->base
+ TMC_RRD
);
531 if (lost
&& *barrier
) {
537 if (offset
>= PAGE_SIZE
) {
540 /* wrap around at the end of the buffer */
541 cur
&= buf
->nr_pages
- 1;
546 * In snapshot mode we simply increment the head by the number of byte
547 * that were written. User space function cs_etm_find_snapshot() will
548 * figure out how many bytes to get from the AUX buffer based on the
549 * position of the head.
552 handle
->head
+= to_read
;
554 CS_LOCK(drvdata
->base
);
556 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
561 static const struct coresight_ops_sink tmc_etf_sink_ops
= {
562 .enable
= tmc_enable_etf_sink
,
563 .disable
= tmc_disable_etf_sink
,
564 .alloc_buffer
= tmc_alloc_etf_buffer
,
565 .free_buffer
= tmc_free_etf_buffer
,
566 .update_buffer
= tmc_update_etf_buffer
,
569 static const struct coresight_ops_link tmc_etf_link_ops
= {
570 .enable
= tmc_enable_etf_link
,
571 .disable
= tmc_disable_etf_link
,
574 const struct coresight_ops tmc_etb_cs_ops
= {
575 .sink_ops
= &tmc_etf_sink_ops
,
578 const struct coresight_ops tmc_etf_cs_ops
= {
579 .sink_ops
= &tmc_etf_sink_ops
,
580 .link_ops
= &tmc_etf_link_ops
,
583 int tmc_read_prepare_etb(struct tmc_drvdata
*drvdata
)
589 /* config types are set a boot time and never change */
590 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETB
&&
591 drvdata
->config_type
!= TMC_CONFIG_TYPE_ETF
))
594 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
596 if (drvdata
->reading
) {
601 /* Don't interfere if operated from Perf */
602 if (drvdata
->mode
== CS_MODE_PERF
) {
607 /* If drvdata::buf is NULL the trace data has been read already */
608 if (drvdata
->buf
== NULL
) {
613 /* Disable the TMC if need be */
614 if (drvdata
->mode
== CS_MODE_SYSFS
) {
615 /* There is no point in reading a TMC in HW FIFO mode */
616 mode
= readl_relaxed(drvdata
->base
+ TMC_MODE
);
617 if (mode
!= TMC_MODE_CIRCULAR_BUFFER
) {
621 __tmc_etb_disable_hw(drvdata
);
624 drvdata
->reading
= true;
626 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
631 int tmc_read_unprepare_etb(struct tmc_drvdata
*drvdata
)
637 /* config types are set a boot time and never change */
638 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETB
&&
639 drvdata
->config_type
!= TMC_CONFIG_TYPE_ETF
))
642 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
644 /* Re-enable the TMC if need be */
645 if (drvdata
->mode
== CS_MODE_SYSFS
) {
646 /* There is no point in reading a TMC in HW FIFO mode */
647 mode
= readl_relaxed(drvdata
->base
+ TMC_MODE
);
648 if (mode
!= TMC_MODE_CIRCULAR_BUFFER
) {
649 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
653 * The trace run will continue with the same allocated trace
654 * buffer. As such zero-out the buffer so that we don't end
655 * up with stale data.
657 * Since the tracer is still enabled drvdata::buf
660 memset(drvdata
->buf
, 0, drvdata
->size
);
661 __tmc_etb_enable_hw(drvdata
);
664 * The ETB/ETF is not tracing and the buffer was just read.
665 * As such prepare to free the trace buffer.
671 drvdata
->reading
= false;
672 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
675 * Free allocated memory outside of the spinlock. There is no need
676 * to assert the validity of 'buf' since calling kfree(NULL) is safe.