1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
5 * Description: CoreSight Embedded Trace Buffer driver
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/device.h>
14 #include <linux/err.h>
16 #include <linux/miscdevice.h>
17 #include <linux/uaccess.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/seq_file.h>
22 #include <linux/coresight.h>
23 #include <linux/amba/bus.h>
24 #include <linux/clk.h>
25 #include <linux/circ_buf.h>
27 #include <linux/perf_event.h>
30 #include "coresight-priv.h"
32 #define ETB_RAM_DEPTH_REG 0x004
33 #define ETB_STATUS_REG 0x00c
34 #define ETB_RAM_READ_DATA_REG 0x010
35 #define ETB_RAM_READ_POINTER 0x014
36 #define ETB_RAM_WRITE_POINTER 0x018
38 #define ETB_CTL_REG 0x020
39 #define ETB_RWD_REG 0x024
40 #define ETB_FFSR 0x300
41 #define ETB_FFCR 0x304
42 #define ETB_ITMISCOP0 0xee0
43 #define ETB_ITTRFLINACK 0xee4
44 #define ETB_ITTRFLIN 0xee8
45 #define ETB_ITATBDATA0 0xeeC
46 #define ETB_ITATBCTR2 0xef0
47 #define ETB_ITATBCTR1 0xef4
48 #define ETB_ITATBCTR0 0xef8
50 /* register description */
52 #define ETB_STATUS_RAM_FULL BIT(0)
54 #define ETB_CTL_CAPT_EN BIT(0)
56 #define ETB_FFCR_EN_FTC BIT(0)
57 #define ETB_FFCR_FON_MAN BIT(6)
58 #define ETB_FFCR_STOP_FI BIT(12)
59 #define ETB_FFCR_STOP_TRIGGER BIT(13)
61 #define ETB_FFCR_BIT 6
62 #define ETB_FFSR_BIT 1
63 #define ETB_FRAME_SIZE_WORDS 4
66 * struct etb_drvdata - specifics associated to an ETB component
67 * @base: memory mapped base address for this component.
68 * @dev: the device entity associated to this component.
69 * @atclk: optional clock for the core parts of the ETB.
70 * @csdev: component vitals needed by the framework.
71 * @miscdev: specifics to handle "/dev/xyz.etb" entry.
72 * @spinlock: only one at a time pls.
73 * @reading: synchronise user space access to etb buffer.
74 * @mode: this ETB is being used.
75 * @buf: area of memory where ETB buffer content gets sent.
76 * @buffer_depth: size of @buf.
77 * @trigger_cntr: amount of words to store after a trigger.
83 struct coresight_device
*csdev
;
84 struct miscdevice miscdev
;
93 static unsigned int etb_get_buffer_depth(struct etb_drvdata
*drvdata
)
97 pm_runtime_get_sync(drvdata
->dev
);
99 /* RO registers don't need locking */
100 depth
= readl_relaxed(drvdata
->base
+ ETB_RAM_DEPTH_REG
);
102 pm_runtime_put(drvdata
->dev
);
106 static void etb_enable_hw(struct etb_drvdata
*drvdata
)
111 CS_UNLOCK(drvdata
->base
);
113 depth
= drvdata
->buffer_depth
;
114 /* reset write RAM pointer address */
115 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
116 /* clear entire RAM buffer */
117 for (i
= 0; i
< depth
; i
++)
118 writel_relaxed(0x0, drvdata
->base
+ ETB_RWD_REG
);
120 /* reset write RAM pointer address */
121 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
122 /* reset read RAM pointer address */
123 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_READ_POINTER
);
125 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ ETB_TRG
);
126 writel_relaxed(ETB_FFCR_EN_FTC
| ETB_FFCR_STOP_TRIGGER
,
127 drvdata
->base
+ ETB_FFCR
);
128 /* ETB trace capture enable */
129 writel_relaxed(ETB_CTL_CAPT_EN
, drvdata
->base
+ ETB_CTL_REG
);
131 CS_LOCK(drvdata
->base
);
134 static int etb_enable(struct coresight_device
*csdev
, u32 mode
)
138 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
140 val
= local_cmpxchg(&drvdata
->mode
,
141 CS_MODE_DISABLED
, mode
);
143 * When accessing from Perf, a HW buffer can be handled
144 * by a single trace entity. In sysFS mode many tracers
145 * can be logging to the same HW buffer.
147 if (val
== CS_MODE_PERF
)
150 /* Nothing to do, the tracer is already enabled. */
151 if (val
== CS_MODE_SYSFS
)
154 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
155 etb_enable_hw(drvdata
);
156 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
159 dev_info(drvdata
->dev
, "ETB enabled\n");
163 static void etb_disable_hw(struct etb_drvdata
*drvdata
)
167 CS_UNLOCK(drvdata
->base
);
169 ffcr
= readl_relaxed(drvdata
->base
+ ETB_FFCR
);
170 /* stop formatter when a stop has completed */
171 ffcr
|= ETB_FFCR_STOP_FI
;
172 writel_relaxed(ffcr
, drvdata
->base
+ ETB_FFCR
);
173 /* manually generate a flush of the system */
174 ffcr
|= ETB_FFCR_FON_MAN
;
175 writel_relaxed(ffcr
, drvdata
->base
+ ETB_FFCR
);
177 if (coresight_timeout(drvdata
->base
, ETB_FFCR
, ETB_FFCR_BIT
, 0)) {
178 dev_err(drvdata
->dev
,
179 "timeout while waiting for completion of Manual Flush\n");
182 /* disable trace capture */
183 writel_relaxed(0x0, drvdata
->base
+ ETB_CTL_REG
);
185 if (coresight_timeout(drvdata
->base
, ETB_FFSR
, ETB_FFSR_BIT
, 1)) {
186 dev_err(drvdata
->dev
,
187 "timeout while waiting for Formatter to Stop\n");
190 CS_LOCK(drvdata
->base
);
193 static void etb_dump_hw(struct etb_drvdata
*drvdata
)
199 u32 read_data
, depth
;
200 u32 read_ptr
, write_ptr
;
201 u32 frame_off
, frame_endoff
;
203 CS_UNLOCK(drvdata
->base
);
205 read_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_READ_POINTER
);
206 write_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
208 frame_off
= write_ptr
% ETB_FRAME_SIZE_WORDS
;
209 frame_endoff
= ETB_FRAME_SIZE_WORDS
- frame_off
;
211 dev_err(drvdata
->dev
,
212 "write_ptr: %lu not aligned to formatter frame size\n",
213 (unsigned long)write_ptr
);
214 dev_err(drvdata
->dev
, "frameoff: %lu, frame_endoff: %lu\n",
215 (unsigned long)frame_off
, (unsigned long)frame_endoff
);
216 write_ptr
+= frame_endoff
;
219 if ((readl_relaxed(drvdata
->base
+ ETB_STATUS_REG
)
220 & ETB_STATUS_RAM_FULL
) == 0) {
221 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_READ_POINTER
);
223 writel_relaxed(write_ptr
, drvdata
->base
+ ETB_RAM_READ_POINTER
);
227 depth
= drvdata
->buffer_depth
;
228 buf_ptr
= drvdata
->buf
;
229 barrier
= barrier_pkt
;
230 for (i
= 0; i
< depth
; i
++) {
231 read_data
= readl_relaxed(drvdata
->base
+
232 ETB_RAM_READ_DATA_REG
);
233 if (lost
&& *barrier
) {
234 read_data
= *barrier
;
238 *(u32
*)buf_ptr
= read_data
;
243 buf_ptr
-= (frame_endoff
* 4);
244 for (i
= 0; i
< frame_endoff
; i
++) {
252 writel_relaxed(read_ptr
, drvdata
->base
+ ETB_RAM_READ_POINTER
);
254 CS_LOCK(drvdata
->base
);
257 static void etb_disable(struct coresight_device
*csdev
)
259 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
262 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
263 etb_disable_hw(drvdata
);
264 etb_dump_hw(drvdata
);
265 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
267 local_set(&drvdata
->mode
, CS_MODE_DISABLED
);
269 dev_info(drvdata
->dev
, "ETB disabled\n");
272 static void *etb_alloc_buffer(struct coresight_device
*csdev
, int cpu
,
273 void **pages
, int nr_pages
, bool overwrite
)
276 struct cs_buffers
*buf
;
279 cpu
= smp_processor_id();
280 node
= cpu_to_node(cpu
);
282 buf
= kzalloc_node(sizeof(struct cs_buffers
), GFP_KERNEL
, node
);
286 buf
->snapshot
= overwrite
;
287 buf
->nr_pages
= nr_pages
;
288 buf
->data_pages
= pages
;
293 static void etb_free_buffer(void *config
)
295 struct cs_buffers
*buf
= config
;
300 static int etb_set_buffer(struct coresight_device
*csdev
,
301 struct perf_output_handle
*handle
,
306 struct cs_buffers
*buf
= sink_config
;
308 /* wrap head around to the amount of space we have */
309 head
= handle
->head
& ((buf
->nr_pages
<< PAGE_SHIFT
) - 1);
311 /* find the page to write to */
312 buf
->cur
= head
/ PAGE_SIZE
;
314 /* and offset within that page */
315 buf
->offset
= head
% PAGE_SIZE
;
317 local_set(&buf
->data_size
, 0);
322 static unsigned long etb_reset_buffer(struct coresight_device
*csdev
,
323 struct perf_output_handle
*handle
,
326 unsigned long size
= 0;
327 struct cs_buffers
*buf
= sink_config
;
331 * In snapshot mode ->data_size holds the new address of the
332 * ring buffer's head. The size itself is the whole address
333 * range since we want the latest information.
336 handle
->head
= local_xchg(&buf
->data_size
,
337 buf
->nr_pages
<< PAGE_SHIFT
);
340 * Tell the tracer PMU how much we got in this run and if
341 * something went wrong along the way. Nobody else can use
342 * this cs_buffers instance until we are done. As such
343 * resetting parameters here and squaring off with the ring
344 * buffer API in the tracer PMU is fine.
346 size
= local_xchg(&buf
->data_size
, 0);
352 static void etb_update_buffer(struct coresight_device
*csdev
,
353 struct perf_output_handle
*handle
,
360 u32 read_ptr
, write_ptr
, capacity
;
361 u32 status
, read_data
, to_read
;
362 unsigned long offset
;
363 struct cs_buffers
*buf
= sink_config
;
364 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
369 capacity
= drvdata
->buffer_depth
* ETB_FRAME_SIZE_WORDS
;
371 etb_disable_hw(drvdata
);
372 CS_UNLOCK(drvdata
->base
);
374 /* unit is in words, not bytes */
375 read_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_READ_POINTER
);
376 write_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
379 * Entries should be aligned to the frame size. If they are not
380 * go back to the last alignment point to give decoding tools a
381 * chance to fix things.
383 if (write_ptr
% ETB_FRAME_SIZE_WORDS
) {
384 dev_err(drvdata
->dev
,
385 "write_ptr: %lu not aligned to formatter frame size\n",
386 (unsigned long)write_ptr
);
388 write_ptr
&= ~(ETB_FRAME_SIZE_WORDS
- 1);
393 * Get a hold of the status register and see if a wrap around
394 * has occurred. If so adjust things accordingly. Otherwise
395 * start at the beginning and go until the write pointer has
398 status
= readl_relaxed(drvdata
->base
+ ETB_STATUS_REG
);
399 if (status
& ETB_STATUS_RAM_FULL
) {
402 read_ptr
= write_ptr
;
404 to_read
= CIRC_CNT(write_ptr
, read_ptr
, drvdata
->buffer_depth
);
405 to_read
*= ETB_FRAME_SIZE_WORDS
;
409 * Make sure we don't overwrite data that hasn't been consumed yet.
410 * It is entirely possible that the HW buffer has more data than the
411 * ring buffer can currently handle. If so adjust the start address
412 * to take only the last traces.
414 * In snapshot mode we are looking to get the latest traces only and as
415 * such, we don't care about not overwriting data that hasn't been
416 * processed by user space.
418 if (!buf
->snapshot
&& to_read
> handle
->size
) {
419 u32 mask
= ~(ETB_FRAME_SIZE_WORDS
- 1);
421 /* The new read pointer must be frame size aligned */
422 to_read
= handle
->size
& mask
;
424 * Move the RAM read pointer up, keeping in mind that
425 * everything is in frame size units.
427 read_ptr
= (write_ptr
+ drvdata
->buffer_depth
) -
428 to_read
/ ETB_FRAME_SIZE_WORDS
;
429 /* Wrap around if need be*/
430 if (read_ptr
> (drvdata
->buffer_depth
- 1))
431 read_ptr
-= drvdata
->buffer_depth
;
432 /* let the decoder know we've skipped ahead */
437 perf_aux_output_flag(handle
, PERF_AUX_FLAG_TRUNCATED
);
439 /* finally tell HW where we want to start reading from */
440 writel_relaxed(read_ptr
, drvdata
->base
+ ETB_RAM_READ_POINTER
);
443 offset
= buf
->offset
;
444 barrier
= barrier_pkt
;
446 for (i
= 0; i
< to_read
; i
+= 4) {
447 buf_ptr
= buf
->data_pages
[cur
] + offset
;
448 read_data
= readl_relaxed(drvdata
->base
+
449 ETB_RAM_READ_DATA_REG
);
450 if (lost
&& *barrier
) {
451 read_data
= *barrier
;
455 *(u32
*)buf_ptr
= read_data
;
459 if (offset
>= PAGE_SIZE
) {
462 /* wrap around at the end of the buffer */
463 cur
&= buf
->nr_pages
- 1;
467 /* reset ETB buffer for next run */
468 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_READ_POINTER
);
469 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
472 * In snapshot mode all we have to do is communicate to
473 * perf_aux_output_end() the address of the current head. In full
474 * trace mode the same function expects a size to move rb->aux_head
478 local_set(&buf
->data_size
, (cur
* PAGE_SIZE
) + offset
);
480 local_add(to_read
, &buf
->data_size
);
482 etb_enable_hw(drvdata
);
483 CS_LOCK(drvdata
->base
);
486 static const struct coresight_ops_sink etb_sink_ops
= {
487 .enable
= etb_enable
,
488 .disable
= etb_disable
,
489 .alloc_buffer
= etb_alloc_buffer
,
490 .free_buffer
= etb_free_buffer
,
491 .set_buffer
= etb_set_buffer
,
492 .reset_buffer
= etb_reset_buffer
,
493 .update_buffer
= etb_update_buffer
,
496 static const struct coresight_ops etb_cs_ops
= {
497 .sink_ops
= &etb_sink_ops
,
500 static void etb_dump(struct etb_drvdata
*drvdata
)
504 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
505 if (local_read(&drvdata
->mode
) == CS_MODE_SYSFS
) {
506 etb_disable_hw(drvdata
);
507 etb_dump_hw(drvdata
);
508 etb_enable_hw(drvdata
);
510 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
512 dev_info(drvdata
->dev
, "ETB dumped\n");
515 static int etb_open(struct inode
*inode
, struct file
*file
)
517 struct etb_drvdata
*drvdata
= container_of(file
->private_data
,
518 struct etb_drvdata
, miscdev
);
520 if (local_cmpxchg(&drvdata
->reading
, 0, 1))
523 dev_dbg(drvdata
->dev
, "%s: successfully opened\n", __func__
);
527 static ssize_t
etb_read(struct file
*file
, char __user
*data
,
528 size_t len
, loff_t
*ppos
)
531 struct etb_drvdata
*drvdata
= container_of(file
->private_data
,
532 struct etb_drvdata
, miscdev
);
536 depth
= drvdata
->buffer_depth
;
537 if (*ppos
+ len
> depth
* 4)
538 len
= depth
* 4 - *ppos
;
540 if (copy_to_user(data
, drvdata
->buf
+ *ppos
, len
)) {
541 dev_dbg(drvdata
->dev
, "%s: copy_to_user failed\n", __func__
);
547 dev_dbg(drvdata
->dev
, "%s: %zu bytes copied, %d bytes left\n",
548 __func__
, len
, (int)(depth
* 4 - *ppos
));
552 static int etb_release(struct inode
*inode
, struct file
*file
)
554 struct etb_drvdata
*drvdata
= container_of(file
->private_data
,
555 struct etb_drvdata
, miscdev
);
556 local_set(&drvdata
->reading
, 0);
558 dev_dbg(drvdata
->dev
, "%s: released\n", __func__
);
562 static const struct file_operations etb_fops
= {
563 .owner
= THIS_MODULE
,
566 .release
= etb_release
,
570 #define coresight_etb10_reg(name, offset) \
571 coresight_simple_reg32(struct etb_drvdata, name, offset)
573 coresight_etb10_reg(rdp
, ETB_RAM_DEPTH_REG
);
574 coresight_etb10_reg(sts
, ETB_STATUS_REG
);
575 coresight_etb10_reg(rrp
, ETB_RAM_READ_POINTER
);
576 coresight_etb10_reg(rwp
, ETB_RAM_WRITE_POINTER
);
577 coresight_etb10_reg(trg
, ETB_TRG
);
578 coresight_etb10_reg(ctl
, ETB_CTL_REG
);
579 coresight_etb10_reg(ffsr
, ETB_FFSR
);
580 coresight_etb10_reg(ffcr
, ETB_FFCR
);
582 static struct attribute
*coresight_etb_mgmt_attrs
[] = {
594 static ssize_t
trigger_cntr_show(struct device
*dev
,
595 struct device_attribute
*attr
, char *buf
)
597 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
598 unsigned long val
= drvdata
->trigger_cntr
;
600 return sprintf(buf
, "%#lx\n", val
);
603 static ssize_t
trigger_cntr_store(struct device
*dev
,
604 struct device_attribute
*attr
,
605 const char *buf
, size_t size
)
609 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
611 ret
= kstrtoul(buf
, 16, &val
);
615 drvdata
->trigger_cntr
= val
;
618 static DEVICE_ATTR_RW(trigger_cntr
);
620 static struct attribute
*coresight_etb_attrs
[] = {
621 &dev_attr_trigger_cntr
.attr
,
625 static const struct attribute_group coresight_etb_group
= {
626 .attrs
= coresight_etb_attrs
,
629 static const struct attribute_group coresight_etb_mgmt_group
= {
630 .attrs
= coresight_etb_mgmt_attrs
,
634 const struct attribute_group
*coresight_etb_groups
[] = {
635 &coresight_etb_group
,
636 &coresight_etb_mgmt_group
,
640 static int etb_probe(struct amba_device
*adev
, const struct amba_id
*id
)
644 struct device
*dev
= &adev
->dev
;
645 struct coresight_platform_data
*pdata
= NULL
;
646 struct etb_drvdata
*drvdata
;
647 struct resource
*res
= &adev
->res
;
648 struct coresight_desc desc
= { 0 };
649 struct device_node
*np
= adev
->dev
.of_node
;
652 pdata
= of_get_coresight_platform_data(dev
, np
);
654 return PTR_ERR(pdata
);
655 adev
->dev
.platform_data
= pdata
;
658 drvdata
= devm_kzalloc(dev
, sizeof(*drvdata
), GFP_KERNEL
);
662 drvdata
->dev
= &adev
->dev
;
663 drvdata
->atclk
= devm_clk_get(&adev
->dev
, "atclk"); /* optional */
664 if (!IS_ERR(drvdata
->atclk
)) {
665 ret
= clk_prepare_enable(drvdata
->atclk
);
669 dev_set_drvdata(dev
, drvdata
);
671 /* validity for the resource is already checked by the AMBA core */
672 base
= devm_ioremap_resource(dev
, res
);
674 return PTR_ERR(base
);
676 drvdata
->base
= base
;
678 spin_lock_init(&drvdata
->spinlock
);
680 drvdata
->buffer_depth
= etb_get_buffer_depth(drvdata
);
681 pm_runtime_put(&adev
->dev
);
683 if (drvdata
->buffer_depth
& 0x80000000)
686 drvdata
->buf
= devm_kcalloc(dev
,
687 drvdata
->buffer_depth
, 4, GFP_KERNEL
);
691 desc
.type
= CORESIGHT_DEV_TYPE_SINK
;
692 desc
.subtype
.sink_subtype
= CORESIGHT_DEV_SUBTYPE_SINK_BUFFER
;
693 desc
.ops
= &etb_cs_ops
;
696 desc
.groups
= coresight_etb_groups
;
697 drvdata
->csdev
= coresight_register(&desc
);
698 if (IS_ERR(drvdata
->csdev
))
699 return PTR_ERR(drvdata
->csdev
);
701 drvdata
->miscdev
.name
= pdata
->name
;
702 drvdata
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
703 drvdata
->miscdev
.fops
= &etb_fops
;
704 ret
= misc_register(&drvdata
->miscdev
);
706 goto err_misc_register
;
711 coresight_unregister(drvdata
->csdev
);
716 static int etb_runtime_suspend(struct device
*dev
)
718 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
);
720 if (drvdata
&& !IS_ERR(drvdata
->atclk
))
721 clk_disable_unprepare(drvdata
->atclk
);
726 static int etb_runtime_resume(struct device
*dev
)
728 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
);
730 if (drvdata
&& !IS_ERR(drvdata
->atclk
))
731 clk_prepare_enable(drvdata
->atclk
);
737 static const struct dev_pm_ops etb_dev_pm_ops
= {
738 SET_RUNTIME_PM_OPS(etb_runtime_suspend
, etb_runtime_resume
, NULL
)
741 static const struct amba_id etb_ids
[] = {
749 static struct amba_driver etb_driver
= {
751 .name
= "coresight-etb10",
752 .owner
= THIS_MODULE
,
753 .pm
= &etb_dev_pm_ops
,
754 .suppress_bind_attrs
= true,
760 builtin_amba_driver(etb_driver
);