1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
5 * Description: CoreSight Embedded Trace Buffer driver
8 #include <linux/atomic.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/device.h>
14 #include <linux/err.h>
16 #include <linux/miscdevice.h>
17 #include <linux/uaccess.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/seq_file.h>
22 #include <linux/coresight.h>
23 #include <linux/amba/bus.h>
24 #include <linux/clk.h>
25 #include <linux/circ_buf.h>
27 #include <linux/perf_event.h>
30 #include "coresight-priv.h"
31 #include "coresight-etm-perf.h"
33 #define ETB_RAM_DEPTH_REG 0x004
34 #define ETB_STATUS_REG 0x00c
35 #define ETB_RAM_READ_DATA_REG 0x010
36 #define ETB_RAM_READ_POINTER 0x014
37 #define ETB_RAM_WRITE_POINTER 0x018
39 #define ETB_CTL_REG 0x020
40 #define ETB_RWD_REG 0x024
41 #define ETB_FFSR 0x300
42 #define ETB_FFCR 0x304
43 #define ETB_ITMISCOP0 0xee0
44 #define ETB_ITTRFLINACK 0xee4
45 #define ETB_ITTRFLIN 0xee8
46 #define ETB_ITATBDATA0 0xeeC
47 #define ETB_ITATBCTR2 0xef0
48 #define ETB_ITATBCTR1 0xef4
49 #define ETB_ITATBCTR0 0xef8
51 /* register description */
53 #define ETB_STATUS_RAM_FULL BIT(0)
55 #define ETB_CTL_CAPT_EN BIT(0)
57 #define ETB_FFCR_EN_FTC BIT(0)
58 #define ETB_FFCR_FON_MAN BIT(6)
59 #define ETB_FFCR_STOP_FI BIT(12)
60 #define ETB_FFCR_STOP_TRIGGER BIT(13)
62 #define ETB_FFCR_BIT 6
63 #define ETB_FFSR_BIT 1
64 #define ETB_FRAME_SIZE_WORDS 4
66 DEFINE_CORESIGHT_DEVLIST(etb_devs
, "etb");
69 * struct etb_drvdata - specifics associated to an ETB component
70 * @base: memory mapped base address for this component.
71 * @atclk: optional clock for the core parts of the ETB.
72 * @csdev: component vitals needed by the framework.
73 * @miscdev: specifics to handle "/dev/xyz.etb" entry.
74 * @spinlock: only one at a time pls.
75 * @reading: synchronise user space access to etb buffer.
76 * @pid: Process ID of the process being monitored by the session
77 * that is using this component.
78 * @buf: area of memory where ETB buffer content gets sent.
79 * @mode: this ETB is being used.
80 * @buffer_depth: size of @buf.
81 * @trigger_cntr: amount of words to store after a trigger.
86 struct coresight_device
*csdev
;
87 struct miscdevice miscdev
;
97 static int etb_set_buffer(struct coresight_device
*csdev
,
98 struct perf_output_handle
*handle
);
100 static inline unsigned int etb_get_buffer_depth(struct etb_drvdata
*drvdata
)
102 return readl_relaxed(drvdata
->base
+ ETB_RAM_DEPTH_REG
);
105 static void __etb_enable_hw(struct etb_drvdata
*drvdata
)
110 CS_UNLOCK(drvdata
->base
);
112 depth
= drvdata
->buffer_depth
;
113 /* reset write RAM pointer address */
114 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
115 /* clear entire RAM buffer */
116 for (i
= 0; i
< depth
; i
++)
117 writel_relaxed(0x0, drvdata
->base
+ ETB_RWD_REG
);
119 /* reset write RAM pointer address */
120 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
121 /* reset read RAM pointer address */
122 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_READ_POINTER
);
124 writel_relaxed(drvdata
->trigger_cntr
, drvdata
->base
+ ETB_TRG
);
125 writel_relaxed(ETB_FFCR_EN_FTC
| ETB_FFCR_STOP_TRIGGER
,
126 drvdata
->base
+ ETB_FFCR
);
127 /* ETB trace capture enable */
128 writel_relaxed(ETB_CTL_CAPT_EN
, drvdata
->base
+ ETB_CTL_REG
);
130 CS_LOCK(drvdata
->base
);
133 static int etb_enable_hw(struct etb_drvdata
*drvdata
)
135 int rc
= coresight_claim_device(drvdata
->base
);
140 __etb_enable_hw(drvdata
);
144 static int etb_enable_sysfs(struct coresight_device
*csdev
)
148 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
150 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
152 /* Don't messup with perf sessions. */
153 if (drvdata
->mode
== CS_MODE_PERF
) {
158 if (drvdata
->mode
== CS_MODE_DISABLED
) {
159 ret
= etb_enable_hw(drvdata
);
163 drvdata
->mode
= CS_MODE_SYSFS
;
166 atomic_inc(csdev
->refcnt
);
168 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
172 static int etb_enable_perf(struct coresight_device
*csdev
, void *data
)
177 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
178 struct perf_output_handle
*handle
= data
;
179 struct cs_buffers
*buf
= etm_perf_sink_config(handle
);
181 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
183 /* No need to continue if the component is already in used by sysFS. */
184 if (drvdata
->mode
== CS_MODE_SYSFS
) {
189 /* Get a handle on the pid of the process to monitor */
192 if (drvdata
->pid
!= -1 && drvdata
->pid
!= pid
) {
198 * No HW configuration is needed if the sink is already in
199 * use for this session.
201 if (drvdata
->pid
== pid
) {
202 atomic_inc(csdev
->refcnt
);
207 * We don't have an internal state to clean up if we fail to setup
208 * the perf buffer. So we can perform the step before we turn the
209 * ETB on and leave without cleaning up.
211 ret
= etb_set_buffer(csdev
, handle
);
215 ret
= etb_enable_hw(drvdata
);
217 /* Associate with monitored process. */
219 drvdata
->mode
= CS_MODE_PERF
;
220 atomic_inc(csdev
->refcnt
);
224 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
228 static int etb_enable(struct coresight_device
*csdev
, u32 mode
, void *data
)
234 ret
= etb_enable_sysfs(csdev
);
237 ret
= etb_enable_perf(csdev
, data
);
247 dev_dbg(&csdev
->dev
, "ETB enabled\n");
251 static void __etb_disable_hw(struct etb_drvdata
*drvdata
)
254 struct device
*dev
= &drvdata
->csdev
->dev
;
256 CS_UNLOCK(drvdata
->base
);
258 ffcr
= readl_relaxed(drvdata
->base
+ ETB_FFCR
);
259 /* stop formatter when a stop has completed */
260 ffcr
|= ETB_FFCR_STOP_FI
;
261 writel_relaxed(ffcr
, drvdata
->base
+ ETB_FFCR
);
262 /* manually generate a flush of the system */
263 ffcr
|= ETB_FFCR_FON_MAN
;
264 writel_relaxed(ffcr
, drvdata
->base
+ ETB_FFCR
);
266 if (coresight_timeout(drvdata
->base
, ETB_FFCR
, ETB_FFCR_BIT
, 0)) {
268 "timeout while waiting for completion of Manual Flush\n");
271 /* disable trace capture */
272 writel_relaxed(0x0, drvdata
->base
+ ETB_CTL_REG
);
274 if (coresight_timeout(drvdata
->base
, ETB_FFSR
, ETB_FFSR_BIT
, 1)) {
276 "timeout while waiting for Formatter to Stop\n");
279 CS_LOCK(drvdata
->base
);
282 static void etb_dump_hw(struct etb_drvdata
*drvdata
)
287 u32 read_data
, depth
;
288 u32 read_ptr
, write_ptr
;
289 u32 frame_off
, frame_endoff
;
290 struct device
*dev
= &drvdata
->csdev
->dev
;
292 CS_UNLOCK(drvdata
->base
);
294 read_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_READ_POINTER
);
295 write_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
297 frame_off
= write_ptr
% ETB_FRAME_SIZE_WORDS
;
298 frame_endoff
= ETB_FRAME_SIZE_WORDS
- frame_off
;
301 "write_ptr: %lu not aligned to formatter frame size\n",
302 (unsigned long)write_ptr
);
303 dev_err(dev
, "frameoff: %lu, frame_endoff: %lu\n",
304 (unsigned long)frame_off
, (unsigned long)frame_endoff
);
305 write_ptr
+= frame_endoff
;
308 if ((readl_relaxed(drvdata
->base
+ ETB_STATUS_REG
)
309 & ETB_STATUS_RAM_FULL
) == 0) {
310 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_READ_POINTER
);
312 writel_relaxed(write_ptr
, drvdata
->base
+ ETB_RAM_READ_POINTER
);
316 depth
= drvdata
->buffer_depth
;
317 buf_ptr
= drvdata
->buf
;
318 for (i
= 0; i
< depth
; i
++) {
319 read_data
= readl_relaxed(drvdata
->base
+
320 ETB_RAM_READ_DATA_REG
);
321 *(u32
*)buf_ptr
= read_data
;
326 coresight_insert_barrier_packet(drvdata
->buf
);
329 buf_ptr
-= (frame_endoff
* 4);
330 for (i
= 0; i
< frame_endoff
; i
++) {
338 writel_relaxed(read_ptr
, drvdata
->base
+ ETB_RAM_READ_POINTER
);
340 CS_LOCK(drvdata
->base
);
343 static void etb_disable_hw(struct etb_drvdata
*drvdata
)
345 __etb_disable_hw(drvdata
);
346 etb_dump_hw(drvdata
);
347 coresight_disclaim_device(drvdata
->base
);
350 static int etb_disable(struct coresight_device
*csdev
)
352 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
355 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
357 if (atomic_dec_return(csdev
->refcnt
)) {
358 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
362 /* Complain if we (somehow) got out of sync */
363 WARN_ON_ONCE(drvdata
->mode
== CS_MODE_DISABLED
);
364 etb_disable_hw(drvdata
);
365 /* Dissociate from monitored process. */
367 drvdata
->mode
= CS_MODE_DISABLED
;
368 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
370 dev_dbg(&csdev
->dev
, "ETB disabled\n");
374 static void *etb_alloc_buffer(struct coresight_device
*csdev
,
375 struct perf_event
*event
, void **pages
,
376 int nr_pages
, bool overwrite
)
379 struct cs_buffers
*buf
;
381 node
= (event
->cpu
== -1) ? NUMA_NO_NODE
: cpu_to_node(event
->cpu
);
383 buf
= kzalloc_node(sizeof(struct cs_buffers
), GFP_KERNEL
, node
);
387 buf
->pid
= task_pid_nr(event
->owner
);
388 buf
->snapshot
= overwrite
;
389 buf
->nr_pages
= nr_pages
;
390 buf
->data_pages
= pages
;
395 static void etb_free_buffer(void *config
)
397 struct cs_buffers
*buf
= config
;
402 static int etb_set_buffer(struct coresight_device
*csdev
,
403 struct perf_output_handle
*handle
)
407 struct cs_buffers
*buf
= etm_perf_sink_config(handle
);
412 /* wrap head around to the amount of space we have */
413 head
= handle
->head
& ((buf
->nr_pages
<< PAGE_SHIFT
) - 1);
415 /* find the page to write to */
416 buf
->cur
= head
/ PAGE_SIZE
;
418 /* and offset within that page */
419 buf
->offset
= head
% PAGE_SIZE
;
421 local_set(&buf
->data_size
, 0);
426 static unsigned long etb_update_buffer(struct coresight_device
*csdev
,
427 struct perf_output_handle
*handle
,
434 u32 read_ptr
, write_ptr
, capacity
;
435 u32 status
, read_data
;
436 unsigned long offset
, to_read
= 0, flags
;
437 struct cs_buffers
*buf
= sink_config
;
438 struct etb_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
443 capacity
= drvdata
->buffer_depth
* ETB_FRAME_SIZE_WORDS
;
445 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
447 /* Don't do anything if another tracer is using this sink */
448 if (atomic_read(csdev
->refcnt
) != 1)
451 __etb_disable_hw(drvdata
);
452 CS_UNLOCK(drvdata
->base
);
454 /* unit is in words, not bytes */
455 read_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_READ_POINTER
);
456 write_ptr
= readl_relaxed(drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
459 * Entries should be aligned to the frame size. If they are not
460 * go back to the last alignment point to give decoding tools a
461 * chance to fix things.
463 if (write_ptr
% ETB_FRAME_SIZE_WORDS
) {
465 "write_ptr: %lu not aligned to formatter frame size\n",
466 (unsigned long)write_ptr
);
468 write_ptr
&= ~(ETB_FRAME_SIZE_WORDS
- 1);
473 * Get a hold of the status register and see if a wrap around
474 * has occurred. If so adjust things accordingly. Otherwise
475 * start at the beginning and go until the write pointer has
478 status
= readl_relaxed(drvdata
->base
+ ETB_STATUS_REG
);
479 if (status
& ETB_STATUS_RAM_FULL
) {
482 read_ptr
= write_ptr
;
484 to_read
= CIRC_CNT(write_ptr
, read_ptr
, drvdata
->buffer_depth
);
485 to_read
*= ETB_FRAME_SIZE_WORDS
;
489 * Make sure we don't overwrite data that hasn't been consumed yet.
490 * It is entirely possible that the HW buffer has more data than the
491 * ring buffer can currently handle. If so adjust the start address
492 * to take only the last traces.
494 * In snapshot mode we are looking to get the latest traces only and as
495 * such, we don't care about not overwriting data that hasn't been
496 * processed by user space.
498 if (!buf
->snapshot
&& to_read
> handle
->size
) {
499 u32 mask
= ~(ETB_FRAME_SIZE_WORDS
- 1);
501 /* The new read pointer must be frame size aligned */
502 to_read
= handle
->size
& mask
;
504 * Move the RAM read pointer up, keeping in mind that
505 * everything is in frame size units.
507 read_ptr
= (write_ptr
+ drvdata
->buffer_depth
) -
508 to_read
/ ETB_FRAME_SIZE_WORDS
;
509 /* Wrap around if need be*/
510 if (read_ptr
> (drvdata
->buffer_depth
- 1))
511 read_ptr
-= drvdata
->buffer_depth
;
512 /* let the decoder know we've skipped ahead */
517 * Don't set the TRUNCATED flag in snapshot mode because 1) the
518 * captured buffer is expected to be truncated and 2) a full buffer
519 * prevents the event from being re-enabled by the perf core,
520 * resulting in stale data being send to user space.
522 if (!buf
->snapshot
&& lost
)
523 perf_aux_output_flag(handle
, PERF_AUX_FLAG_TRUNCATED
);
525 /* finally tell HW where we want to start reading from */
526 writel_relaxed(read_ptr
, drvdata
->base
+ ETB_RAM_READ_POINTER
);
529 offset
= buf
->offset
;
530 barrier
= coresight_barrier_pkt
;
532 for (i
= 0; i
< to_read
; i
+= 4) {
533 buf_ptr
= buf
->data_pages
[cur
] + offset
;
534 read_data
= readl_relaxed(drvdata
->base
+
535 ETB_RAM_READ_DATA_REG
);
536 if (lost
&& i
< CORESIGHT_BARRIER_PKT_SIZE
) {
537 read_data
= *barrier
;
541 *(u32
*)buf_ptr
= read_data
;
545 if (offset
>= PAGE_SIZE
) {
548 /* wrap around at the end of the buffer */
549 cur
&= buf
->nr_pages
- 1;
553 /* reset ETB buffer for next run */
554 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_READ_POINTER
);
555 writel_relaxed(0x0, drvdata
->base
+ ETB_RAM_WRITE_POINTER
);
558 * In snapshot mode we simply increment the head by the number of byte
559 * that were written. User space function cs_etm_find_snapshot() will
560 * figure out how many bytes to get from the AUX buffer based on the
561 * position of the head.
564 handle
->head
+= to_read
;
566 __etb_enable_hw(drvdata
);
567 CS_LOCK(drvdata
->base
);
569 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
574 static const struct coresight_ops_sink etb_sink_ops
= {
575 .enable
= etb_enable
,
576 .disable
= etb_disable
,
577 .alloc_buffer
= etb_alloc_buffer
,
578 .free_buffer
= etb_free_buffer
,
579 .update_buffer
= etb_update_buffer
,
582 static const struct coresight_ops etb_cs_ops
= {
583 .sink_ops
= &etb_sink_ops
,
586 static void etb_dump(struct etb_drvdata
*drvdata
)
590 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
591 if (drvdata
->mode
== CS_MODE_SYSFS
) {
592 __etb_disable_hw(drvdata
);
593 etb_dump_hw(drvdata
);
594 __etb_enable_hw(drvdata
);
596 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
598 dev_dbg(&drvdata
->csdev
->dev
, "ETB dumped\n");
601 static int etb_open(struct inode
*inode
, struct file
*file
)
603 struct etb_drvdata
*drvdata
= container_of(file
->private_data
,
604 struct etb_drvdata
, miscdev
);
606 if (local_cmpxchg(&drvdata
->reading
, 0, 1))
609 dev_dbg(&drvdata
->csdev
->dev
, "%s: successfully opened\n", __func__
);
613 static ssize_t
etb_read(struct file
*file
, char __user
*data
,
614 size_t len
, loff_t
*ppos
)
617 struct etb_drvdata
*drvdata
= container_of(file
->private_data
,
618 struct etb_drvdata
, miscdev
);
619 struct device
*dev
= &drvdata
->csdev
->dev
;
623 depth
= drvdata
->buffer_depth
;
624 if (*ppos
+ len
> depth
* 4)
625 len
= depth
* 4 - *ppos
;
627 if (copy_to_user(data
, drvdata
->buf
+ *ppos
, len
)) {
629 "%s: copy_to_user failed\n", __func__
);
635 dev_dbg(dev
, "%s: %zu bytes copied, %d bytes left\n",
636 __func__
, len
, (int)(depth
* 4 - *ppos
));
640 static int etb_release(struct inode
*inode
, struct file
*file
)
642 struct etb_drvdata
*drvdata
= container_of(file
->private_data
,
643 struct etb_drvdata
, miscdev
);
644 local_set(&drvdata
->reading
, 0);
646 dev_dbg(&drvdata
->csdev
->dev
, "%s: released\n", __func__
);
650 static const struct file_operations etb_fops
= {
651 .owner
= THIS_MODULE
,
654 .release
= etb_release
,
658 #define coresight_etb10_reg(name, offset) \
659 coresight_simple_reg32(struct etb_drvdata, name, offset)
661 coresight_etb10_reg(rdp
, ETB_RAM_DEPTH_REG
);
662 coresight_etb10_reg(sts
, ETB_STATUS_REG
);
663 coresight_etb10_reg(rrp
, ETB_RAM_READ_POINTER
);
664 coresight_etb10_reg(rwp
, ETB_RAM_WRITE_POINTER
);
665 coresight_etb10_reg(trg
, ETB_TRG
);
666 coresight_etb10_reg(ctl
, ETB_CTL_REG
);
667 coresight_etb10_reg(ffsr
, ETB_FFSR
);
668 coresight_etb10_reg(ffcr
, ETB_FFCR
);
670 static struct attribute
*coresight_etb_mgmt_attrs
[] = {
682 static ssize_t
trigger_cntr_show(struct device
*dev
,
683 struct device_attribute
*attr
, char *buf
)
685 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
686 unsigned long val
= drvdata
->trigger_cntr
;
688 return sprintf(buf
, "%#lx\n", val
);
691 static ssize_t
trigger_cntr_store(struct device
*dev
,
692 struct device_attribute
*attr
,
693 const char *buf
, size_t size
)
697 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
699 ret
= kstrtoul(buf
, 16, &val
);
703 drvdata
->trigger_cntr
= val
;
706 static DEVICE_ATTR_RW(trigger_cntr
);
708 static struct attribute
*coresight_etb_attrs
[] = {
709 &dev_attr_trigger_cntr
.attr
,
713 static const struct attribute_group coresight_etb_group
= {
714 .attrs
= coresight_etb_attrs
,
717 static const struct attribute_group coresight_etb_mgmt_group
= {
718 .attrs
= coresight_etb_mgmt_attrs
,
722 static const struct attribute_group
*coresight_etb_groups
[] = {
723 &coresight_etb_group
,
724 &coresight_etb_mgmt_group
,
728 static int etb_probe(struct amba_device
*adev
, const struct amba_id
*id
)
732 struct device
*dev
= &adev
->dev
;
733 struct coresight_platform_data
*pdata
= NULL
;
734 struct etb_drvdata
*drvdata
;
735 struct resource
*res
= &adev
->res
;
736 struct coresight_desc desc
= { 0 };
738 desc
.name
= coresight_alloc_device_name(&etb_devs
, dev
);
742 drvdata
= devm_kzalloc(dev
, sizeof(*drvdata
), GFP_KERNEL
);
746 drvdata
->atclk
= devm_clk_get(&adev
->dev
, "atclk"); /* optional */
747 if (!IS_ERR(drvdata
->atclk
)) {
748 ret
= clk_prepare_enable(drvdata
->atclk
);
752 dev_set_drvdata(dev
, drvdata
);
754 /* validity for the resource is already checked by the AMBA core */
755 base
= devm_ioremap_resource(dev
, res
);
757 return PTR_ERR(base
);
759 drvdata
->base
= base
;
761 spin_lock_init(&drvdata
->spinlock
);
763 drvdata
->buffer_depth
= etb_get_buffer_depth(drvdata
);
765 if (drvdata
->buffer_depth
& 0x80000000)
768 drvdata
->buf
= devm_kcalloc(dev
,
769 drvdata
->buffer_depth
, 4, GFP_KERNEL
);
773 /* This device is not associated with a session */
776 pdata
= coresight_get_platform_data(dev
);
778 return PTR_ERR(pdata
);
779 adev
->dev
.platform_data
= pdata
;
781 desc
.type
= CORESIGHT_DEV_TYPE_SINK
;
782 desc
.subtype
.sink_subtype
= CORESIGHT_DEV_SUBTYPE_SINK_BUFFER
;
783 desc
.ops
= &etb_cs_ops
;
786 desc
.groups
= coresight_etb_groups
;
787 drvdata
->csdev
= coresight_register(&desc
);
788 if (IS_ERR(drvdata
->csdev
))
789 return PTR_ERR(drvdata
->csdev
);
791 drvdata
->miscdev
.name
= desc
.name
;
792 drvdata
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
793 drvdata
->miscdev
.fops
= &etb_fops
;
794 ret
= misc_register(&drvdata
->miscdev
);
796 goto err_misc_register
;
798 pm_runtime_put(&adev
->dev
);
802 coresight_unregister(drvdata
->csdev
);
806 static int etb_remove(struct amba_device
*adev
)
808 struct etb_drvdata
*drvdata
= dev_get_drvdata(&adev
->dev
);
811 * Since misc_open() holds a refcount on the f_ops, which is
812 * etb fops in this case, device is there until last file
813 * handler to this device is closed.
815 misc_deregister(&drvdata
->miscdev
);
816 coresight_unregister(drvdata
->csdev
);
822 static int etb_runtime_suspend(struct device
*dev
)
824 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
);
826 if (drvdata
&& !IS_ERR(drvdata
->atclk
))
827 clk_disable_unprepare(drvdata
->atclk
);
832 static int etb_runtime_resume(struct device
*dev
)
834 struct etb_drvdata
*drvdata
= dev_get_drvdata(dev
);
836 if (drvdata
&& !IS_ERR(drvdata
->atclk
))
837 clk_prepare_enable(drvdata
->atclk
);
843 static const struct dev_pm_ops etb_dev_pm_ops
= {
844 SET_RUNTIME_PM_OPS(etb_runtime_suspend
, etb_runtime_resume
, NULL
)
847 static const struct amba_id etb_ids
[] = {
855 MODULE_DEVICE_TABLE(amba
, etb_ids
);
857 static struct amba_driver etb_driver
= {
859 .name
= "coresight-etb10",
860 .owner
= THIS_MODULE
,
861 .pm
= &etb_dev_pm_ops
,
862 .suppress_bind_attrs
= true,
866 .remove
= etb_remove
,
870 module_amba_driver(etb_driver
);
872 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
873 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
874 MODULE_DESCRIPTION("Arm CoreSight Embedded Trace Buffer driver");
875 MODULE_LICENSE("GPL v2");