1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/circ_buf.h>
8 #include <linux/coresight.h>
9 #include <linux/perf_event.h>
10 #include <linux/slab.h>
11 #include "coresight-priv.h"
12 #include "coresight-tmc.h"
14 static void tmc_etb_enable_hw(struct tmc_drvdata
*drvdata
)
16 CS_UNLOCK(drvdata
->base
);
18 /* Wait for TMCSReady bit to be set */
19 tmc_wait_for_tmcready(drvdata
);
21 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER
, drvdata
->base
+ TMC_MODE
);
22 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
|
23 TMC_FFCR_FON_FLIN
| TMC_FFCR_FON_TRIG_EVT
|
24 TMC_FFCR_TRIGON_TRIGIN
,
25 drvdata
->base
+ TMC_FFCR
);
27 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ TMC_TRG
);
28 tmc_enable_hw(drvdata
);
30 CS_LOCK(drvdata
->base
);
33 static void tmc_etb_dump_hw(struct tmc_drvdata
*drvdata
)
38 u32 read_data
, status
;
42 * Get a hold of the status register and see if a wrap around
45 status
= readl_relaxed(drvdata
->base
+ TMC_STS
);
46 if (status
& TMC_STS_FULL
)
51 barrier
= barrier_pkt
;
53 for (i
= 0; i
< drvdata
->memwidth
; i
++) {
54 read_data
= readl_relaxed(drvdata
->base
+ TMC_RRD
);
55 if (read_data
== 0xFFFFFFFF)
58 if (lost
&& *barrier
) {
63 memcpy(bufp
, &read_data
, 4);
70 static void tmc_etb_disable_hw(struct tmc_drvdata
*drvdata
)
72 CS_UNLOCK(drvdata
->base
);
74 tmc_flush_and_stop(drvdata
);
76 * When operating in sysFS mode the content of the buffer needs to be
77 * read before the TMC is disabled.
79 if (drvdata
->mode
== CS_MODE_SYSFS
)
80 tmc_etb_dump_hw(drvdata
);
81 tmc_disable_hw(drvdata
);
83 CS_LOCK(drvdata
->base
);
86 static void tmc_etf_enable_hw(struct tmc_drvdata
*drvdata
)
88 CS_UNLOCK(drvdata
->base
);
90 /* Wait for TMCSReady bit to be set */
91 tmc_wait_for_tmcready(drvdata
);
93 writel_relaxed(TMC_MODE_HARDWARE_FIFO
, drvdata
->base
+ TMC_MODE
);
94 writel_relaxed(TMC_FFCR_EN_FMT
| TMC_FFCR_EN_TI
,
95 drvdata
->base
+ TMC_FFCR
);
96 writel_relaxed(0x0, drvdata
->base
+ TMC_BUFWM
);
97 tmc_enable_hw(drvdata
);
99 CS_LOCK(drvdata
->base
);
102 static void tmc_etf_disable_hw(struct tmc_drvdata
*drvdata
)
104 CS_UNLOCK(drvdata
->base
);
106 tmc_flush_and_stop(drvdata
);
107 tmc_disable_hw(drvdata
);
109 CS_LOCK(drvdata
->base
);
112 static int tmc_enable_etf_sink_sysfs(struct coresight_device
*csdev
)
118 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
121 * If we don't have a buffer release the lock and allocate memory.
122 * Otherwise keep the lock and move along.
124 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
126 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
128 /* Allocating the memory here while outside of the spinlock */
129 buf
= kzalloc(drvdata
->size
, GFP_KERNEL
);
133 /* Let's try again */
134 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
137 if (drvdata
->reading
) {
143 * In sysFS mode we can have multiple writers per sink. Since this
144 * sink is already enabled no memory is needed and the HW need not be
147 if (drvdata
->mode
== CS_MODE_SYSFS
)
151 * If drvdata::buf isn't NULL, memory was allocated for a previous
152 * trace run but wasn't read. If so simply zero-out the memory.
153 * Otherwise use the memory allocated above.
155 * The memory is freed when users read the buffer using the
156 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
160 memset(drvdata
->buf
, 0, drvdata
->size
);
166 drvdata
->mode
= CS_MODE_SYSFS
;
167 tmc_etb_enable_hw(drvdata
);
169 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
171 /* Free memory outside the spinlock if need be */
178 static int tmc_enable_etf_sink_perf(struct coresight_device
*csdev
)
182 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
184 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
185 if (drvdata
->reading
) {
191 * In Perf mode there can be only one writer per sink. There
192 * is also no need to continue if the ETB/ETR is already operated
195 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
200 drvdata
->mode
= CS_MODE_PERF
;
201 tmc_etb_enable_hw(drvdata
);
203 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
208 static int tmc_enable_etf_sink(struct coresight_device
*csdev
, u32 mode
)
211 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
215 ret
= tmc_enable_etf_sink_sysfs(csdev
);
218 ret
= tmc_enable_etf_sink_perf(csdev
);
220 /* We shouldn't be here */
229 dev_info(drvdata
->dev
, "TMC-ETB/ETF enabled\n");
233 static void tmc_disable_etf_sink(struct coresight_device
*csdev
)
236 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
238 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
239 if (drvdata
->reading
) {
240 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
244 /* Disable the TMC only if it needs to */
245 if (drvdata
->mode
!= CS_MODE_DISABLED
) {
246 tmc_etb_disable_hw(drvdata
);
247 drvdata
->mode
= CS_MODE_DISABLED
;
250 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
252 dev_info(drvdata
->dev
, "TMC-ETB/ETF disabled\n");
255 static int tmc_enable_etf_link(struct coresight_device
*csdev
,
256 int inport
, int outport
)
259 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
261 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
262 if (drvdata
->reading
) {
263 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
267 tmc_etf_enable_hw(drvdata
);
268 drvdata
->mode
= CS_MODE_SYSFS
;
269 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
271 dev_info(drvdata
->dev
, "TMC-ETF enabled\n");
275 static void tmc_disable_etf_link(struct coresight_device
*csdev
,
276 int inport
, int outport
)
279 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
281 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
282 if (drvdata
->reading
) {
283 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
287 tmc_etf_disable_hw(drvdata
);
288 drvdata
->mode
= CS_MODE_DISABLED
;
289 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
291 dev_info(drvdata
->dev
, "TMC-ETF disabled\n");
294 static void *tmc_alloc_etf_buffer(struct coresight_device
*csdev
, int cpu
,
295 void **pages
, int nr_pages
, bool overwrite
)
298 struct cs_buffers
*buf
;
301 cpu
= smp_processor_id();
302 node
= cpu_to_node(cpu
);
304 /* Allocate memory structure for interaction with Perf */
305 buf
= kzalloc_node(sizeof(struct cs_buffers
), GFP_KERNEL
, node
);
309 buf
->snapshot
= overwrite
;
310 buf
->nr_pages
= nr_pages
;
311 buf
->data_pages
= pages
;
316 static void tmc_free_etf_buffer(void *config
)
318 struct cs_buffers
*buf
= config
;
323 static int tmc_set_etf_buffer(struct coresight_device
*csdev
,
324 struct perf_output_handle
*handle
,
329 struct cs_buffers
*buf
= sink_config
;
331 /* wrap head around to the amount of space we have */
332 head
= handle
->head
& ((buf
->nr_pages
<< PAGE_SHIFT
) - 1);
334 /* find the page to write to */
335 buf
->cur
= head
/ PAGE_SIZE
;
337 /* and offset within that page */
338 buf
->offset
= head
% PAGE_SIZE
;
340 local_set(&buf
->data_size
, 0);
345 static unsigned long tmc_reset_etf_buffer(struct coresight_device
*csdev
,
346 struct perf_output_handle
*handle
,
350 struct cs_buffers
*buf
= sink_config
;
354 * In snapshot mode ->data_size holds the new address of the
355 * ring buffer's head. The size itself is the whole address
356 * range since we want the latest information.
359 handle
->head
= local_xchg(&buf
->data_size
,
360 buf
->nr_pages
<< PAGE_SHIFT
);
362 * Tell the tracer PMU how much we got in this run and if
363 * something went wrong along the way. Nobody else can use
364 * this cs_buffers instance until we are done. As such
365 * resetting parameters here and squaring off with the ring
366 * buffer API in the tracer PMU is fine.
368 size
= local_xchg(&buf
->data_size
, 0);
374 static void tmc_update_etf_buffer(struct coresight_device
*csdev
,
375 struct perf_output_handle
*handle
,
382 u64 read_ptr
, write_ptr
;
384 unsigned long offset
;
385 struct cs_buffers
*buf
= sink_config
;
386 struct tmc_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
391 /* This shouldn't happen */
392 if (WARN_ON_ONCE(drvdata
->mode
!= CS_MODE_PERF
))
395 CS_UNLOCK(drvdata
->base
);
397 tmc_flush_and_stop(drvdata
);
399 read_ptr
= tmc_read_rrp(drvdata
);
400 write_ptr
= tmc_read_rwp(drvdata
);
403 * Get a hold of the status register and see if a wrap around
404 * has occurred. If so adjust things accordingly.
406 status
= readl_relaxed(drvdata
->base
+ TMC_STS
);
407 if (status
& TMC_STS_FULL
) {
409 to_read
= drvdata
->size
;
411 to_read
= CIRC_CNT(write_ptr
, read_ptr
, drvdata
->size
);
415 * The TMC RAM buffer may be bigger than the space available in the
416 * perf ring buffer (handle->size). If so advance the RRP so that we
417 * get the latest trace data.
419 if (to_read
> handle
->size
) {
423 * The value written to RRP must be byte-address aligned to
424 * the width of the trace memory databus _and_ to a frame
425 * boundary (16 byte), whichever is the biggest. For example,
426 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
427 * LSBs must be 0s. For 256-bit wide trace memory, the five
430 switch (drvdata
->memwidth
) {
431 case TMC_MEM_INTF_WIDTH_32BITS
:
432 case TMC_MEM_INTF_WIDTH_64BITS
:
433 case TMC_MEM_INTF_WIDTH_128BITS
:
434 mask
= GENMASK(31, 5);
436 case TMC_MEM_INTF_WIDTH_256BITS
:
437 mask
= GENMASK(31, 6);
442 * Make sure the new size is aligned in accordance with the
443 * requirement explained above.
445 to_read
= handle
->size
& mask
;
446 /* Move the RAM read pointer up */
447 read_ptr
= (write_ptr
+ drvdata
->size
) - to_read
;
448 /* Make sure we are still within our limits */
449 if (read_ptr
> (drvdata
->size
- 1))
450 read_ptr
-= drvdata
->size
;
452 tmc_write_rrp(drvdata
, read_ptr
);
457 perf_aux_output_flag(handle
, PERF_AUX_FLAG_TRUNCATED
);
460 offset
= buf
->offset
;
461 barrier
= barrier_pkt
;
463 /* for every byte to read */
464 for (i
= 0; i
< to_read
; i
+= 4) {
465 buf_ptr
= buf
->data_pages
[cur
] + offset
;
466 *buf_ptr
= readl_relaxed(drvdata
->base
+ TMC_RRD
);
468 if (lost
&& *barrier
) {
474 if (offset
>= PAGE_SIZE
) {
477 /* wrap around at the end of the buffer */
478 cur
&= buf
->nr_pages
- 1;
483 * In snapshot mode all we have to do is communicate to
484 * perf_aux_output_end() the address of the current head. In full
485 * trace mode the same function expects a size to move rb->aux_head
489 local_set(&buf
->data_size
, (cur
* PAGE_SIZE
) + offset
);
491 local_add(to_read
, &buf
->data_size
);
493 CS_LOCK(drvdata
->base
);
496 static const struct coresight_ops_sink tmc_etf_sink_ops
= {
497 .enable
= tmc_enable_etf_sink
,
498 .disable
= tmc_disable_etf_sink
,
499 .alloc_buffer
= tmc_alloc_etf_buffer
,
500 .free_buffer
= tmc_free_etf_buffer
,
501 .set_buffer
= tmc_set_etf_buffer
,
502 .reset_buffer
= tmc_reset_etf_buffer
,
503 .update_buffer
= tmc_update_etf_buffer
,
506 static const struct coresight_ops_link tmc_etf_link_ops
= {
507 .enable
= tmc_enable_etf_link
,
508 .disable
= tmc_disable_etf_link
,
511 const struct coresight_ops tmc_etb_cs_ops
= {
512 .sink_ops
= &tmc_etf_sink_ops
,
515 const struct coresight_ops tmc_etf_cs_ops
= {
516 .sink_ops
= &tmc_etf_sink_ops
,
517 .link_ops
= &tmc_etf_link_ops
,
520 int tmc_read_prepare_etb(struct tmc_drvdata
*drvdata
)
526 /* config types are set a boot time and never change */
527 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETB
&&
528 drvdata
->config_type
!= TMC_CONFIG_TYPE_ETF
))
531 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
533 if (drvdata
->reading
) {
538 /* There is no point in reading a TMC in HW FIFO mode */
539 mode
= readl_relaxed(drvdata
->base
+ TMC_MODE
);
540 if (mode
!= TMC_MODE_CIRCULAR_BUFFER
) {
545 /* Don't interfere if operated from Perf */
546 if (drvdata
->mode
== CS_MODE_PERF
) {
551 /* If drvdata::buf is NULL the trace data has been read already */
552 if (drvdata
->buf
== NULL
) {
557 /* Disable the TMC if need be */
558 if (drvdata
->mode
== CS_MODE_SYSFS
)
559 tmc_etb_disable_hw(drvdata
);
561 drvdata
->reading
= true;
563 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
568 int tmc_read_unprepare_etb(struct tmc_drvdata
*drvdata
)
574 /* config types are set a boot time and never change */
575 if (WARN_ON_ONCE(drvdata
->config_type
!= TMC_CONFIG_TYPE_ETB
&&
576 drvdata
->config_type
!= TMC_CONFIG_TYPE_ETF
))
579 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
581 /* There is no point in reading a TMC in HW FIFO mode */
582 mode
= readl_relaxed(drvdata
->base
+ TMC_MODE
);
583 if (mode
!= TMC_MODE_CIRCULAR_BUFFER
) {
584 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
588 /* Re-enable the TMC if need be */
589 if (drvdata
->mode
== CS_MODE_SYSFS
) {
591 * The trace run will continue with the same allocated trace
592 * buffer. As such zero-out the buffer so that we don't end
593 * up with stale data.
595 * Since the tracer is still enabled drvdata::buf
598 memset(drvdata
->buf
, 0, drvdata
->size
);
599 tmc_etb_enable_hw(drvdata
);
602 * The ETB/ETF is not tracing and the buffer was just read.
603 * As such prepare to free the trace buffer.
609 drvdata
->reading
= false;
610 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
613 * Free allocated memory outside of the spinlock. There is no need
614 * to assert the validity of 'buf' since calling kfree(NULL) is safe.