1 /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
20 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/smp.h>
25 #include <linux/sysfs.h>
26 #include <linux/stat.h>
27 #include <linux/clk.h>
28 #include <linux/cpu.h>
29 #include <linux/coresight.h>
30 #include <linux/pm_wakeup.h>
31 #include <linux/amba/bus.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/pm_runtime.h>
35 #include <asm/sections.h>
37 #include "coresight-etm4x.h"
39 static int boot_enable
;
40 module_param_named(boot_enable
, boot_enable
, int, S_IRUGO
);
42 /* The number of ETMv4 currently registered */
43 static int etm4_count
;
44 static struct etmv4_drvdata
*etmdrvdata
[NR_CPUS
];
46 static void etm4_os_unlock(void *info
)
48 struct etmv4_drvdata
*drvdata
= (struct etmv4_drvdata
*)info
;
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata
->base
+ TRCOSLAR
);
55 static bool etm4_arch_supported(u8 arch
)
66 static int etm4_trace_id(struct coresight_device
*csdev
)
68 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
73 return drvdata
->trcid
;
75 pm_runtime_get_sync(drvdata
->dev
);
76 spin_lock_irqsave(&drvdata
->spinlock
, flags
);
78 CS_UNLOCK(drvdata
->base
);
79 trace_id
= readl_relaxed(drvdata
->base
+ TRCTRACEIDR
);
80 trace_id
&= ETM_TRACEID_MASK
;
81 CS_LOCK(drvdata
->base
);
83 spin_unlock_irqrestore(&drvdata
->spinlock
, flags
);
84 pm_runtime_put(drvdata
->dev
);
89 static void etm4_enable_hw(void *info
)
92 struct etmv4_drvdata
*drvdata
= info
;
94 CS_UNLOCK(drvdata
->base
);
96 etm4_os_unlock(drvdata
);
98 /* Disable the trace unit before programming trace registers */
99 writel_relaxed(0, drvdata
->base
+ TRCPRGCTLR
);
101 /* wait for TRCSTATR.IDLE to go up */
102 if (coresight_timeout(drvdata
->base
, TRCSTATR
, TRCSTATR_IDLE_BIT
, 1))
103 dev_err(drvdata
->dev
,
104 "timeout observed when probing at offset %#x\n",
107 writel_relaxed(drvdata
->pe_sel
, drvdata
->base
+ TRCPROCSELR
);
108 writel_relaxed(drvdata
->cfg
, drvdata
->base
+ TRCCONFIGR
);
109 /* nothing specific implemented */
110 writel_relaxed(0x0, drvdata
->base
+ TRCAUXCTLR
);
111 writel_relaxed(drvdata
->eventctrl0
, drvdata
->base
+ TRCEVENTCTL0R
);
112 writel_relaxed(drvdata
->eventctrl1
, drvdata
->base
+ TRCEVENTCTL1R
);
113 writel_relaxed(drvdata
->stall_ctrl
, drvdata
->base
+ TRCSTALLCTLR
);
114 writel_relaxed(drvdata
->ts_ctrl
, drvdata
->base
+ TRCTSCTLR
);
115 writel_relaxed(drvdata
->syncfreq
, drvdata
->base
+ TRCSYNCPR
);
116 writel_relaxed(drvdata
->ccctlr
, drvdata
->base
+ TRCCCCTLR
);
117 writel_relaxed(drvdata
->bb_ctrl
, drvdata
->base
+ TRCBBCTLR
);
118 writel_relaxed(drvdata
->trcid
, drvdata
->base
+ TRCTRACEIDR
);
119 writel_relaxed(drvdata
->vinst_ctrl
, drvdata
->base
+ TRCVICTLR
);
120 writel_relaxed(drvdata
->viiectlr
, drvdata
->base
+ TRCVIIECTLR
);
121 writel_relaxed(drvdata
->vissctlr
,
122 drvdata
->base
+ TRCVISSCTLR
);
123 writel_relaxed(drvdata
->vipcssctlr
,
124 drvdata
->base
+ TRCVIPCSSCTLR
);
125 for (i
= 0; i
< drvdata
->nrseqstate
- 1; i
++)
126 writel_relaxed(drvdata
->seq_ctrl
[i
],
127 drvdata
->base
+ TRCSEQEVRn(i
));
128 writel_relaxed(drvdata
->seq_rst
, drvdata
->base
+ TRCSEQRSTEVR
);
129 writel_relaxed(drvdata
->seq_state
, drvdata
->base
+ TRCSEQSTR
);
130 writel_relaxed(drvdata
->ext_inp
, drvdata
->base
+ TRCEXTINSELR
);
131 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
132 writel_relaxed(drvdata
->cntrldvr
[i
],
133 drvdata
->base
+ TRCCNTRLDVRn(i
));
134 writel_relaxed(drvdata
->cntr_ctrl
[i
],
135 drvdata
->base
+ TRCCNTCTLRn(i
));
136 writel_relaxed(drvdata
->cntr_val
[i
],
137 drvdata
->base
+ TRCCNTVRn(i
));
140 /* Resource selector pair 0 is always implemented and reserved */
141 for (i
= 2; i
< drvdata
->nr_resource
* 2; i
++)
142 writel_relaxed(drvdata
->res_ctrl
[i
],
143 drvdata
->base
+ TRCRSCTLRn(i
));
145 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
146 writel_relaxed(drvdata
->ss_ctrl
[i
],
147 drvdata
->base
+ TRCSSCCRn(i
));
148 writel_relaxed(drvdata
->ss_status
[i
],
149 drvdata
->base
+ TRCSSCSRn(i
));
150 writel_relaxed(drvdata
->ss_pe_cmp
[i
],
151 drvdata
->base
+ TRCSSPCICRn(i
));
153 for (i
= 0; i
< drvdata
->nr_addr_cmp
; i
++) {
154 writeq_relaxed(drvdata
->addr_val
[i
],
155 drvdata
->base
+ TRCACVRn(i
));
156 writeq_relaxed(drvdata
->addr_acc
[i
],
157 drvdata
->base
+ TRCACATRn(i
));
159 for (i
= 0; i
< drvdata
->numcidc
; i
++)
160 writeq_relaxed(drvdata
->ctxid_pid
[i
],
161 drvdata
->base
+ TRCCIDCVRn(i
));
162 writel_relaxed(drvdata
->ctxid_mask0
, drvdata
->base
+ TRCCIDCCTLR0
);
163 writel_relaxed(drvdata
->ctxid_mask1
, drvdata
->base
+ TRCCIDCCTLR1
);
165 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
166 writeq_relaxed(drvdata
->vmid_val
[i
],
167 drvdata
->base
+ TRCVMIDCVRn(i
));
168 writel_relaxed(drvdata
->vmid_mask0
, drvdata
->base
+ TRCVMIDCCTLR0
);
169 writel_relaxed(drvdata
->vmid_mask1
, drvdata
->base
+ TRCVMIDCCTLR1
);
171 /* Enable the trace unit */
172 writel_relaxed(1, drvdata
->base
+ TRCPRGCTLR
);
174 /* wait for TRCSTATR.IDLE to go back down to '0' */
175 if (coresight_timeout(drvdata
->base
, TRCSTATR
, TRCSTATR_IDLE_BIT
, 0))
176 dev_err(drvdata
->dev
,
177 "timeout observed when probing at offset %#x\n",
180 CS_LOCK(drvdata
->base
);
182 dev_dbg(drvdata
->dev
, "cpu: %d enable smp call done\n", drvdata
->cpu
);
185 static int etm4_enable(struct coresight_device
*csdev
)
187 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
190 pm_runtime_get_sync(drvdata
->dev
);
191 spin_lock(&drvdata
->spinlock
);
194 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
195 * ensures that register writes occur when cpu is powered.
197 ret
= smp_call_function_single(drvdata
->cpu
,
198 etm4_enable_hw
, drvdata
, 1);
201 drvdata
->enable
= true;
202 drvdata
->sticky_enable
= true;
204 spin_unlock(&drvdata
->spinlock
);
206 dev_info(drvdata
->dev
, "ETM tracing enabled\n");
209 spin_unlock(&drvdata
->spinlock
);
210 pm_runtime_put(drvdata
->dev
);
214 static void etm4_disable_hw(void *info
)
217 struct etmv4_drvdata
*drvdata
= info
;
219 CS_UNLOCK(drvdata
->base
);
221 control
= readl_relaxed(drvdata
->base
+ TRCPRGCTLR
);
223 /* EN, bit[0] Trace unit enable bit */
226 /* make sure everything completes before disabling */
229 writel_relaxed(control
, drvdata
->base
+ TRCPRGCTLR
);
231 CS_LOCK(drvdata
->base
);
233 dev_dbg(drvdata
->dev
, "cpu: %d disable smp call done\n", drvdata
->cpu
);
236 static void etm4_disable(struct coresight_device
*csdev
)
238 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(csdev
->dev
.parent
);
241 * Taking hotplug lock here protects from clocks getting disabled
242 * with tracing being left on (crash scenario) if user disable occurs
243 * after cpu online mask indicates the cpu is offline but before the
244 * DYING hotplug callback is serviced by the ETM driver.
247 spin_lock(&drvdata
->spinlock
);
250 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
251 * ensures that register writes occur when cpu is powered.
253 smp_call_function_single(drvdata
->cpu
, etm4_disable_hw
, drvdata
, 1);
254 drvdata
->enable
= false;
256 spin_unlock(&drvdata
->spinlock
);
259 pm_runtime_put(drvdata
->dev
);
261 dev_info(drvdata
->dev
, "ETM tracing disabled\n");
264 static const struct coresight_ops_source etm4_source_ops
= {
265 .trace_id
= etm4_trace_id
,
266 .enable
= etm4_enable
,
267 .disable
= etm4_disable
,
270 static const struct coresight_ops etm4_cs_ops
= {
271 .source_ops
= &etm4_source_ops
,
274 static int etm4_set_mode_exclude(struct etmv4_drvdata
*drvdata
, bool exclude
)
276 u8 idx
= drvdata
->addr_idx
;
279 * TRCACATRn.TYPE bit[1:0]: type of comparison
280 * the trace unit performs
282 if (BMVAL(drvdata
->addr_acc
[idx
], 0, 1) == ETM_INSTR_ADDR
) {
287 * We are performing instruction address comparison. Set the
288 * relevant bit of ViewInst Include/Exclude Control register
289 * for corresponding address comparator pair.
291 if (drvdata
->addr_type
[idx
] != ETM_ADDR_TYPE_RANGE
||
292 drvdata
->addr_type
[idx
+ 1] != ETM_ADDR_TYPE_RANGE
)
295 if (exclude
== true) {
297 * Set exclude bit and unset the include bit
298 * corresponding to comparator pair
300 drvdata
->viiectlr
|= BIT(idx
/ 2 + 16);
301 drvdata
->viiectlr
&= ~BIT(idx
/ 2);
304 * Set include bit and unset exclude bit
305 * corresponding to comparator pair
307 drvdata
->viiectlr
|= BIT(idx
/ 2);
308 drvdata
->viiectlr
&= ~BIT(idx
/ 2 + 16);
314 static ssize_t
nr_pe_cmp_show(struct device
*dev
,
315 struct device_attribute
*attr
,
319 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
321 val
= drvdata
->nr_pe_cmp
;
322 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
324 static DEVICE_ATTR_RO(nr_pe_cmp
);
326 static ssize_t
nr_addr_cmp_show(struct device
*dev
,
327 struct device_attribute
*attr
,
331 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
333 val
= drvdata
->nr_addr_cmp
;
334 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
336 static DEVICE_ATTR_RO(nr_addr_cmp
);
338 static ssize_t
nr_cntr_show(struct device
*dev
,
339 struct device_attribute
*attr
,
343 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
345 val
= drvdata
->nr_cntr
;
346 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
348 static DEVICE_ATTR_RO(nr_cntr
);
350 static ssize_t
nr_ext_inp_show(struct device
*dev
,
351 struct device_attribute
*attr
,
355 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
357 val
= drvdata
->nr_ext_inp
;
358 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
360 static DEVICE_ATTR_RO(nr_ext_inp
);
362 static ssize_t
numcidc_show(struct device
*dev
,
363 struct device_attribute
*attr
,
367 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
369 val
= drvdata
->numcidc
;
370 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
372 static DEVICE_ATTR_RO(numcidc
);
374 static ssize_t
numvmidc_show(struct device
*dev
,
375 struct device_attribute
*attr
,
379 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
381 val
= drvdata
->numvmidc
;
382 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
384 static DEVICE_ATTR_RO(numvmidc
);
386 static ssize_t
nrseqstate_show(struct device
*dev
,
387 struct device_attribute
*attr
,
391 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
393 val
= drvdata
->nrseqstate
;
394 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
396 static DEVICE_ATTR_RO(nrseqstate
);
398 static ssize_t
nr_resource_show(struct device
*dev
,
399 struct device_attribute
*attr
,
403 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
405 val
= drvdata
->nr_resource
;
406 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
408 static DEVICE_ATTR_RO(nr_resource
);
410 static ssize_t
nr_ss_cmp_show(struct device
*dev
,
411 struct device_attribute
*attr
,
415 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
417 val
= drvdata
->nr_ss_cmp
;
418 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
420 static DEVICE_ATTR_RO(nr_ss_cmp
);
422 static ssize_t
reset_store(struct device
*dev
,
423 struct device_attribute
*attr
,
424 const char *buf
, size_t size
)
428 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
430 if (kstrtoul(buf
, 16, &val
))
433 spin_lock(&drvdata
->spinlock
);
437 /* Disable data tracing: do not trace load and store data transfers */
438 drvdata
->mode
&= ~(ETM_MODE_LOAD
| ETM_MODE_STORE
);
439 drvdata
->cfg
&= ~(BIT(1) | BIT(2));
441 /* Disable data value and data address tracing */
442 drvdata
->mode
&= ~(ETM_MODE_DATA_TRACE_ADDR
|
443 ETM_MODE_DATA_TRACE_VAL
);
444 drvdata
->cfg
&= ~(BIT(16) | BIT(17));
446 /* Disable all events tracing */
447 drvdata
->eventctrl0
= 0x0;
448 drvdata
->eventctrl1
= 0x0;
450 /* Disable timestamp event */
451 drvdata
->ts_ctrl
= 0x0;
453 /* Disable stalling */
454 drvdata
->stall_ctrl
= 0x0;
456 /* Reset trace synchronization period to 2^8 = 256 bytes*/
457 if (drvdata
->syncpr
== false)
458 drvdata
->syncfreq
= 0x8;
461 * Enable ViewInst to trace everything with start-stop logic in
462 * started state. ARM recommends start-stop logic is set before
465 drvdata
->vinst_ctrl
|= BIT(0);
466 if (drvdata
->nr_addr_cmp
== true) {
467 drvdata
->mode
|= ETM_MODE_VIEWINST_STARTSTOP
;
468 /* SSSTATUS, bit[9] */
469 drvdata
->vinst_ctrl
|= BIT(9);
472 /* No address range filtering for ViewInst */
473 drvdata
->viiectlr
= 0x0;
475 /* No start-stop filtering for ViewInst */
476 drvdata
->vissctlr
= 0x0;
478 /* Disable seq events */
479 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
480 drvdata
->seq_ctrl
[i
] = 0x0;
481 drvdata
->seq_rst
= 0x0;
482 drvdata
->seq_state
= 0x0;
484 /* Disable external input events */
485 drvdata
->ext_inp
= 0x0;
487 drvdata
->cntr_idx
= 0x0;
488 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
489 drvdata
->cntrldvr
[i
] = 0x0;
490 drvdata
->cntr_ctrl
[i
] = 0x0;
491 drvdata
->cntr_val
[i
] = 0x0;
494 /* Resource selector pair 0 is always implemented and reserved */
495 drvdata
->res_idx
= 0x2;
496 for (i
= 2; i
< drvdata
->nr_resource
* 2; i
++)
497 drvdata
->res_ctrl
[i
] = 0x0;
499 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
500 drvdata
->ss_ctrl
[i
] = 0x0;
501 drvdata
->ss_pe_cmp
[i
] = 0x0;
504 drvdata
->addr_idx
= 0x0;
505 for (i
= 0; i
< drvdata
->nr_addr_cmp
* 2; i
++) {
506 drvdata
->addr_val
[i
] = 0x0;
507 drvdata
->addr_acc
[i
] = 0x0;
508 drvdata
->addr_type
[i
] = ETM_ADDR_TYPE_NONE
;
511 drvdata
->ctxid_idx
= 0x0;
512 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
513 drvdata
->ctxid_pid
[i
] = 0x0;
514 drvdata
->ctxid_vpid
[i
] = 0x0;
517 drvdata
->ctxid_mask0
= 0x0;
518 drvdata
->ctxid_mask1
= 0x0;
520 drvdata
->vmid_idx
= 0x0;
521 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
522 drvdata
->vmid_val
[i
] = 0x0;
523 drvdata
->vmid_mask0
= 0x0;
524 drvdata
->vmid_mask1
= 0x0;
526 drvdata
->trcid
= drvdata
->cpu
+ 1;
527 spin_unlock(&drvdata
->spinlock
);
530 static DEVICE_ATTR_WO(reset
);
532 static ssize_t
mode_show(struct device
*dev
,
533 struct device_attribute
*attr
,
537 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
540 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
543 static ssize_t
mode_store(struct device
*dev
,
544 struct device_attribute
*attr
,
545 const char *buf
, size_t size
)
547 unsigned long val
, mode
;
548 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
550 if (kstrtoul(buf
, 16, &val
))
553 spin_lock(&drvdata
->spinlock
);
554 drvdata
->mode
= val
& ETMv4_MODE_ALL
;
556 if (drvdata
->mode
& ETM_MODE_EXCLUDE
)
557 etm4_set_mode_exclude(drvdata
, true);
559 etm4_set_mode_exclude(drvdata
, false);
561 if (drvdata
->instrp0
== true) {
562 /* start by clearing instruction P0 field */
563 drvdata
->cfg
&= ~(BIT(1) | BIT(2));
564 if (drvdata
->mode
& ETM_MODE_LOAD
)
565 /* 0b01 Trace load instructions as P0 instructions */
566 drvdata
->cfg
|= BIT(1);
567 if (drvdata
->mode
& ETM_MODE_STORE
)
568 /* 0b10 Trace store instructions as P0 instructions */
569 drvdata
->cfg
|= BIT(2);
570 if (drvdata
->mode
& ETM_MODE_LOAD_STORE
)
572 * 0b11 Trace load and store instructions
575 drvdata
->cfg
|= BIT(1) | BIT(2);
578 /* bit[3], Branch broadcast mode */
579 if ((drvdata
->mode
& ETM_MODE_BB
) && (drvdata
->trcbb
== true))
580 drvdata
->cfg
|= BIT(3);
582 drvdata
->cfg
&= ~BIT(3);
584 /* bit[4], Cycle counting instruction trace bit */
585 if ((drvdata
->mode
& ETMv4_MODE_CYCACC
) &&
586 (drvdata
->trccci
== true))
587 drvdata
->cfg
|= BIT(4);
589 drvdata
->cfg
&= ~BIT(4);
591 /* bit[6], Context ID tracing bit */
592 if ((drvdata
->mode
& ETMv4_MODE_CTXID
) && (drvdata
->ctxid_size
))
593 drvdata
->cfg
|= BIT(6);
595 drvdata
->cfg
&= ~BIT(6);
597 if ((drvdata
->mode
& ETM_MODE_VMID
) && (drvdata
->vmid_size
))
598 drvdata
->cfg
|= BIT(7);
600 drvdata
->cfg
&= ~BIT(7);
602 /* bits[10:8], Conditional instruction tracing bit */
603 mode
= ETM_MODE_COND(drvdata
->mode
);
604 if (drvdata
->trccond
== true) {
605 drvdata
->cfg
&= ~(BIT(8) | BIT(9) | BIT(10));
606 drvdata
->cfg
|= mode
<< 8;
609 /* bit[11], Global timestamp tracing bit */
610 if ((drvdata
->mode
& ETMv4_MODE_TIMESTAMP
) && (drvdata
->ts_size
))
611 drvdata
->cfg
|= BIT(11);
613 drvdata
->cfg
&= ~BIT(11);
615 /* bit[12], Return stack enable bit */
616 if ((drvdata
->mode
& ETM_MODE_RETURNSTACK
) &&
617 (drvdata
->retstack
== true))
618 drvdata
->cfg
|= BIT(12);
620 drvdata
->cfg
&= ~BIT(12);
622 /* bits[14:13], Q element enable field */
623 mode
= ETM_MODE_QELEM(drvdata
->mode
);
624 /* start by clearing QE bits */
625 drvdata
->cfg
&= ~(BIT(13) | BIT(14));
626 /* if supported, Q elements with instruction counts are enabled */
627 if ((mode
& BIT(0)) && (drvdata
->q_support
& BIT(0)))
628 drvdata
->cfg
|= BIT(13);
630 * if supported, Q elements with and without instruction
633 if ((mode
& BIT(1)) && (drvdata
->q_support
& BIT(1)))
634 drvdata
->cfg
|= BIT(14);
636 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
637 if ((drvdata
->mode
& ETM_MODE_ATB_TRIGGER
) &&
638 (drvdata
->atbtrig
== true))
639 drvdata
->eventctrl1
|= BIT(11);
641 drvdata
->eventctrl1
&= ~BIT(11);
643 /* bit[12], Low-power state behavior override bit */
644 if ((drvdata
->mode
& ETM_MODE_LPOVERRIDE
) &&
645 (drvdata
->lpoverride
== true))
646 drvdata
->eventctrl1
|= BIT(12);
648 drvdata
->eventctrl1
&= ~BIT(12);
650 /* bit[8], Instruction stall bit */
651 if (drvdata
->mode
& ETM_MODE_ISTALL_EN
)
652 drvdata
->stall_ctrl
|= BIT(8);
654 drvdata
->stall_ctrl
&= ~BIT(8);
656 /* bit[10], Prioritize instruction trace bit */
657 if (drvdata
->mode
& ETM_MODE_INSTPRIO
)
658 drvdata
->stall_ctrl
|= BIT(10);
660 drvdata
->stall_ctrl
&= ~BIT(10);
662 /* bit[13], Trace overflow prevention bit */
663 if ((drvdata
->mode
& ETM_MODE_NOOVERFLOW
) &&
664 (drvdata
->nooverflow
== true))
665 drvdata
->stall_ctrl
|= BIT(13);
667 drvdata
->stall_ctrl
&= ~BIT(13);
669 /* bit[9] Start/stop logic control bit */
670 if (drvdata
->mode
& ETM_MODE_VIEWINST_STARTSTOP
)
671 drvdata
->vinst_ctrl
|= BIT(9);
673 drvdata
->vinst_ctrl
&= ~BIT(9);
675 /* bit[10], Whether a trace unit must trace a Reset exception */
676 if (drvdata
->mode
& ETM_MODE_TRACE_RESET
)
677 drvdata
->vinst_ctrl
|= BIT(10);
679 drvdata
->vinst_ctrl
&= ~BIT(10);
681 /* bit[11], Whether a trace unit must trace a system error exception */
682 if ((drvdata
->mode
& ETM_MODE_TRACE_ERR
) &&
683 (drvdata
->trc_error
== true))
684 drvdata
->vinst_ctrl
|= BIT(11);
686 drvdata
->vinst_ctrl
&= ~BIT(11);
688 spin_unlock(&drvdata
->spinlock
);
691 static DEVICE_ATTR_RW(mode
);
693 static ssize_t
pe_show(struct device
*dev
,
694 struct device_attribute
*attr
,
698 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
700 val
= drvdata
->pe_sel
;
701 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
704 static ssize_t
pe_store(struct device
*dev
,
705 struct device_attribute
*attr
,
706 const char *buf
, size_t size
)
709 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
711 if (kstrtoul(buf
, 16, &val
))
714 spin_lock(&drvdata
->spinlock
);
715 if (val
> drvdata
->nr_pe
) {
716 spin_unlock(&drvdata
->spinlock
);
720 drvdata
->pe_sel
= val
;
721 spin_unlock(&drvdata
->spinlock
);
724 static DEVICE_ATTR_RW(pe
);
726 static ssize_t
event_show(struct device
*dev
,
727 struct device_attribute
*attr
,
731 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
733 val
= drvdata
->eventctrl0
;
734 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
737 static ssize_t
event_store(struct device
*dev
,
738 struct device_attribute
*attr
,
739 const char *buf
, size_t size
)
742 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
744 if (kstrtoul(buf
, 16, &val
))
747 spin_lock(&drvdata
->spinlock
);
748 switch (drvdata
->nr_event
) {
750 /* EVENT0, bits[7:0] */
751 drvdata
->eventctrl0
= val
& 0xFF;
754 /* EVENT1, bits[15:8] */
755 drvdata
->eventctrl0
= val
& 0xFFFF;
758 /* EVENT2, bits[23:16] */
759 drvdata
->eventctrl0
= val
& 0xFFFFFF;
762 /* EVENT3, bits[31:24] */
763 drvdata
->eventctrl0
= val
;
768 spin_unlock(&drvdata
->spinlock
);
771 static DEVICE_ATTR_RW(event
);
773 static ssize_t
event_instren_show(struct device
*dev
,
774 struct device_attribute
*attr
,
778 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
780 val
= BMVAL(drvdata
->eventctrl1
, 0, 3);
781 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
784 static ssize_t
event_instren_store(struct device
*dev
,
785 struct device_attribute
*attr
,
786 const char *buf
, size_t size
)
789 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
791 if (kstrtoul(buf
, 16, &val
))
794 spin_lock(&drvdata
->spinlock
);
795 /* start by clearing all instruction event enable bits */
796 drvdata
->eventctrl1
&= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
797 switch (drvdata
->nr_event
) {
799 /* generate Event element for event 1 */
800 drvdata
->eventctrl1
|= val
& BIT(1);
803 /* generate Event element for event 1 and 2 */
804 drvdata
->eventctrl1
|= val
& (BIT(0) | BIT(1));
807 /* generate Event element for event 1, 2 and 3 */
808 drvdata
->eventctrl1
|= val
& (BIT(0) | BIT(1) | BIT(2));
811 /* generate Event element for all 4 events */
812 drvdata
->eventctrl1
|= val
& 0xF;
817 spin_unlock(&drvdata
->spinlock
);
820 static DEVICE_ATTR_RW(event_instren
);
822 static ssize_t
event_ts_show(struct device
*dev
,
823 struct device_attribute
*attr
,
827 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
829 val
= drvdata
->ts_ctrl
;
830 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
833 static ssize_t
event_ts_store(struct device
*dev
,
834 struct device_attribute
*attr
,
835 const char *buf
, size_t size
)
838 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
840 if (kstrtoul(buf
, 16, &val
))
842 if (!drvdata
->ts_size
)
845 drvdata
->ts_ctrl
= val
& ETMv4_EVENT_MASK
;
848 static DEVICE_ATTR_RW(event_ts
);
850 static ssize_t
syncfreq_show(struct device
*dev
,
851 struct device_attribute
*attr
,
855 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
857 val
= drvdata
->syncfreq
;
858 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
861 static ssize_t
syncfreq_store(struct device
*dev
,
862 struct device_attribute
*attr
,
863 const char *buf
, size_t size
)
866 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
868 if (kstrtoul(buf
, 16, &val
))
870 if (drvdata
->syncpr
== true)
873 drvdata
->syncfreq
= val
& ETMv4_SYNC_MASK
;
876 static DEVICE_ATTR_RW(syncfreq
);
878 static ssize_t
cyc_threshold_show(struct device
*dev
,
879 struct device_attribute
*attr
,
883 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
885 val
= drvdata
->ccctlr
;
886 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
889 static ssize_t
cyc_threshold_store(struct device
*dev
,
890 struct device_attribute
*attr
,
891 const char *buf
, size_t size
)
894 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
896 if (kstrtoul(buf
, 16, &val
))
898 if (val
< drvdata
->ccitmin
)
901 drvdata
->ccctlr
= val
& ETM_CYC_THRESHOLD_MASK
;
904 static DEVICE_ATTR_RW(cyc_threshold
);
906 static ssize_t
bb_ctrl_show(struct device
*dev
,
907 struct device_attribute
*attr
,
911 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
913 val
= drvdata
->bb_ctrl
;
914 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
917 static ssize_t
bb_ctrl_store(struct device
*dev
,
918 struct device_attribute
*attr
,
919 const char *buf
, size_t size
)
922 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
924 if (kstrtoul(buf
, 16, &val
))
926 if (drvdata
->trcbb
== false)
928 if (!drvdata
->nr_addr_cmp
)
931 * Bit[7:0] selects which address range comparator is used for
932 * branch broadcast control.
934 if (BMVAL(val
, 0, 7) > drvdata
->nr_addr_cmp
)
937 drvdata
->bb_ctrl
= val
;
940 static DEVICE_ATTR_RW(bb_ctrl
);
942 static ssize_t
event_vinst_show(struct device
*dev
,
943 struct device_attribute
*attr
,
947 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
949 val
= drvdata
->vinst_ctrl
& ETMv4_EVENT_MASK
;
950 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
953 static ssize_t
event_vinst_store(struct device
*dev
,
954 struct device_attribute
*attr
,
955 const char *buf
, size_t size
)
958 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
960 if (kstrtoul(buf
, 16, &val
))
963 spin_lock(&drvdata
->spinlock
);
964 val
&= ETMv4_EVENT_MASK
;
965 drvdata
->vinst_ctrl
&= ~ETMv4_EVENT_MASK
;
966 drvdata
->vinst_ctrl
|= val
;
967 spin_unlock(&drvdata
->spinlock
);
970 static DEVICE_ATTR_RW(event_vinst
);
972 static ssize_t
s_exlevel_vinst_show(struct device
*dev
,
973 struct device_attribute
*attr
,
977 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
979 val
= BMVAL(drvdata
->vinst_ctrl
, 16, 19);
980 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
983 static ssize_t
s_exlevel_vinst_store(struct device
*dev
,
984 struct device_attribute
*attr
,
985 const char *buf
, size_t size
)
988 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
990 if (kstrtoul(buf
, 16, &val
))
993 spin_lock(&drvdata
->spinlock
);
994 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
995 drvdata
->vinst_ctrl
&= ~(BIT(16) | BIT(17) | BIT(19));
996 /* enable instruction tracing for corresponding exception level */
997 val
&= drvdata
->s_ex_level
;
998 drvdata
->vinst_ctrl
|= (val
<< 16);
999 spin_unlock(&drvdata
->spinlock
);
1002 static DEVICE_ATTR_RW(s_exlevel_vinst
);
1004 static ssize_t
ns_exlevel_vinst_show(struct device
*dev
,
1005 struct device_attribute
*attr
,
1009 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1011 /* EXLEVEL_NS, bits[23:20] */
1012 val
= BMVAL(drvdata
->vinst_ctrl
, 20, 23);
1013 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1016 static ssize_t
ns_exlevel_vinst_store(struct device
*dev
,
1017 struct device_attribute
*attr
,
1018 const char *buf
, size_t size
)
1021 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1023 if (kstrtoul(buf
, 16, &val
))
1026 spin_lock(&drvdata
->spinlock
);
1027 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1028 drvdata
->vinst_ctrl
&= ~(BIT(20) | BIT(21) | BIT(22));
1029 /* enable instruction tracing for corresponding exception level */
1030 val
&= drvdata
->ns_ex_level
;
1031 drvdata
->vinst_ctrl
|= (val
<< 20);
1032 spin_unlock(&drvdata
->spinlock
);
1035 static DEVICE_ATTR_RW(ns_exlevel_vinst
);
1037 static ssize_t
addr_idx_show(struct device
*dev
,
1038 struct device_attribute
*attr
,
1042 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1044 val
= drvdata
->addr_idx
;
1045 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1048 static ssize_t
addr_idx_store(struct device
*dev
,
1049 struct device_attribute
*attr
,
1050 const char *buf
, size_t size
)
1053 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1055 if (kstrtoul(buf
, 16, &val
))
1057 if (val
>= drvdata
->nr_addr_cmp
* 2)
1061 * Use spinlock to ensure index doesn't change while it gets
1062 * dereferenced multiple times within a spinlock block elsewhere.
1064 spin_lock(&drvdata
->spinlock
);
1065 drvdata
->addr_idx
= val
;
1066 spin_unlock(&drvdata
->spinlock
);
1069 static DEVICE_ATTR_RW(addr_idx
);
1071 static ssize_t
addr_instdatatype_show(struct device
*dev
,
1072 struct device_attribute
*attr
,
1077 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1079 spin_lock(&drvdata
->spinlock
);
1080 idx
= drvdata
->addr_idx
;
1081 val
= BMVAL(drvdata
->addr_acc
[idx
], 0, 1);
1082 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n",
1083 val
== ETM_INSTR_ADDR
? "instr" :
1084 (val
== ETM_DATA_LOAD_ADDR
? "data_load" :
1085 (val
== ETM_DATA_STORE_ADDR
? "data_store" :
1086 "data_load_store")));
1087 spin_unlock(&drvdata
->spinlock
);
1091 static ssize_t
addr_instdatatype_store(struct device
*dev
,
1092 struct device_attribute
*attr
,
1093 const char *buf
, size_t size
)
1097 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1099 if (strlen(buf
) >= 20)
1101 if (sscanf(buf
, "%s", str
) != 1)
1104 spin_lock(&drvdata
->spinlock
);
1105 idx
= drvdata
->addr_idx
;
1106 if (!strcmp(str
, "instr"))
1107 /* TYPE, bits[1:0] */
1108 drvdata
->addr_acc
[idx
] &= ~(BIT(0) | BIT(1));
1110 spin_unlock(&drvdata
->spinlock
);
1113 static DEVICE_ATTR_RW(addr_instdatatype
);
1115 static ssize_t
addr_single_show(struct device
*dev
,
1116 struct device_attribute
*attr
,
1121 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1123 idx
= drvdata
->addr_idx
;
1124 spin_lock(&drvdata
->spinlock
);
1125 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1126 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
1127 spin_unlock(&drvdata
->spinlock
);
1130 val
= (unsigned long)drvdata
->addr_val
[idx
];
1131 spin_unlock(&drvdata
->spinlock
);
1132 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1135 static ssize_t
addr_single_store(struct device
*dev
,
1136 struct device_attribute
*attr
,
1137 const char *buf
, size_t size
)
1141 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1143 if (kstrtoul(buf
, 16, &val
))
1146 spin_lock(&drvdata
->spinlock
);
1147 idx
= drvdata
->addr_idx
;
1148 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1149 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
1150 spin_unlock(&drvdata
->spinlock
);
1154 drvdata
->addr_val
[idx
] = (u64
)val
;
1155 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_SINGLE
;
1156 spin_unlock(&drvdata
->spinlock
);
1159 static DEVICE_ATTR_RW(addr_single
);
1161 static ssize_t
addr_range_show(struct device
*dev
,
1162 struct device_attribute
*attr
,
1166 unsigned long val1
, val2
;
1167 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1169 spin_lock(&drvdata
->spinlock
);
1170 idx
= drvdata
->addr_idx
;
1172 spin_unlock(&drvdata
->spinlock
);
1175 if (!((drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
1176 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
1177 (drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
1178 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
1179 spin_unlock(&drvdata
->spinlock
);
1183 val1
= (unsigned long)drvdata
->addr_val
[idx
];
1184 val2
= (unsigned long)drvdata
->addr_val
[idx
+ 1];
1185 spin_unlock(&drvdata
->spinlock
);
1186 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1189 static ssize_t
addr_range_store(struct device
*dev
,
1190 struct device_attribute
*attr
,
1191 const char *buf
, size_t size
)
1194 unsigned long val1
, val2
;
1195 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1197 if (sscanf(buf
, "%lx %lx", &val1
, &val2
) != 2)
1199 /* lower address comparator cannot have a higher address value */
1203 spin_lock(&drvdata
->spinlock
);
1204 idx
= drvdata
->addr_idx
;
1206 spin_unlock(&drvdata
->spinlock
);
1210 if (!((drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
1211 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
1212 (drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
1213 drvdata
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
1214 spin_unlock(&drvdata
->spinlock
);
1218 drvdata
->addr_val
[idx
] = (u64
)val1
;
1219 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_RANGE
;
1220 drvdata
->addr_val
[idx
+ 1] = (u64
)val2
;
1221 drvdata
->addr_type
[idx
+ 1] = ETM_ADDR_TYPE_RANGE
;
1223 * Program include or exclude control bits for vinst or vdata
1224 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1226 if (drvdata
->mode
& ETM_MODE_EXCLUDE
)
1227 etm4_set_mode_exclude(drvdata
, true);
1229 etm4_set_mode_exclude(drvdata
, false);
1231 spin_unlock(&drvdata
->spinlock
);
1234 static DEVICE_ATTR_RW(addr_range
);
1236 static ssize_t
addr_start_show(struct device
*dev
,
1237 struct device_attribute
*attr
,
1242 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1244 spin_lock(&drvdata
->spinlock
);
1245 idx
= drvdata
->addr_idx
;
1247 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1248 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1249 spin_unlock(&drvdata
->spinlock
);
1253 val
= (unsigned long)drvdata
->addr_val
[idx
];
1254 spin_unlock(&drvdata
->spinlock
);
1255 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1258 static ssize_t
addr_start_store(struct device
*dev
,
1259 struct device_attribute
*attr
,
1260 const char *buf
, size_t size
)
1264 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1266 if (kstrtoul(buf
, 16, &val
))
1269 spin_lock(&drvdata
->spinlock
);
1270 idx
= drvdata
->addr_idx
;
1271 if (!drvdata
->nr_addr_cmp
) {
1272 spin_unlock(&drvdata
->spinlock
);
1275 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1276 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1277 spin_unlock(&drvdata
->spinlock
);
1281 drvdata
->addr_val
[idx
] = (u64
)val
;
1282 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_START
;
1283 drvdata
->vissctlr
|= BIT(idx
);
1284 /* SSSTATUS, bit[9] - turn on start/stop logic */
1285 drvdata
->vinst_ctrl
|= BIT(9);
1286 spin_unlock(&drvdata
->spinlock
);
1289 static DEVICE_ATTR_RW(addr_start
);
1291 static ssize_t
addr_stop_show(struct device
*dev
,
1292 struct device_attribute
*attr
,
1297 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1299 spin_lock(&drvdata
->spinlock
);
1300 idx
= drvdata
->addr_idx
;
1302 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1303 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1304 spin_unlock(&drvdata
->spinlock
);
1308 val
= (unsigned long)drvdata
->addr_val
[idx
];
1309 spin_unlock(&drvdata
->spinlock
);
1310 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1313 static ssize_t
addr_stop_store(struct device
*dev
,
1314 struct device_attribute
*attr
,
1315 const char *buf
, size_t size
)
1319 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1321 if (kstrtoul(buf
, 16, &val
))
1324 spin_lock(&drvdata
->spinlock
);
1325 idx
= drvdata
->addr_idx
;
1326 if (!drvdata
->nr_addr_cmp
) {
1327 spin_unlock(&drvdata
->spinlock
);
1330 if (!(drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1331 drvdata
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1332 spin_unlock(&drvdata
->spinlock
);
1336 drvdata
->addr_val
[idx
] = (u64
)val
;
1337 drvdata
->addr_type
[idx
] = ETM_ADDR_TYPE_STOP
;
1338 drvdata
->vissctlr
|= BIT(idx
+ 16);
1339 /* SSSTATUS, bit[9] - turn on start/stop logic */
1340 drvdata
->vinst_ctrl
|= BIT(9);
1341 spin_unlock(&drvdata
->spinlock
);
1344 static DEVICE_ATTR_RW(addr_stop
);
1346 static ssize_t
addr_ctxtype_show(struct device
*dev
,
1347 struct device_attribute
*attr
,
1352 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1354 spin_lock(&drvdata
->spinlock
);
1355 idx
= drvdata
->addr_idx
;
1356 /* CONTEXTTYPE, bits[3:2] */
1357 val
= BMVAL(drvdata
->addr_acc
[idx
], 2, 3);
1358 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n", val
== ETM_CTX_NONE
? "none" :
1359 (val
== ETM_CTX_CTXID
? "ctxid" :
1360 (val
== ETM_CTX_VMID
? "vmid" : "all")));
1361 spin_unlock(&drvdata
->spinlock
);
1365 static ssize_t
addr_ctxtype_store(struct device
*dev
,
1366 struct device_attribute
*attr
,
1367 const char *buf
, size_t size
)
1371 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1373 if (strlen(buf
) >= 10)
1375 if (sscanf(buf
, "%s", str
) != 1)
1378 spin_lock(&drvdata
->spinlock
);
1379 idx
= drvdata
->addr_idx
;
1380 if (!strcmp(str
, "none"))
1381 /* start by clearing context type bits */
1382 drvdata
->addr_acc
[idx
] &= ~(BIT(2) | BIT(3));
1383 else if (!strcmp(str
, "ctxid")) {
1384 /* 0b01 The trace unit performs a Context ID */
1385 if (drvdata
->numcidc
) {
1386 drvdata
->addr_acc
[idx
] |= BIT(2);
1387 drvdata
->addr_acc
[idx
] &= ~BIT(3);
1389 } else if (!strcmp(str
, "vmid")) {
1390 /* 0b10 The trace unit performs a VMID */
1391 if (drvdata
->numvmidc
) {
1392 drvdata
->addr_acc
[idx
] &= ~BIT(2);
1393 drvdata
->addr_acc
[idx
] |= BIT(3);
1395 } else if (!strcmp(str
, "all")) {
1397 * 0b11 The trace unit performs a Context ID
1398 * comparison and a VMID
1400 if (drvdata
->numcidc
)
1401 drvdata
->addr_acc
[idx
] |= BIT(2);
1402 if (drvdata
->numvmidc
)
1403 drvdata
->addr_acc
[idx
] |= BIT(3);
1405 spin_unlock(&drvdata
->spinlock
);
1408 static DEVICE_ATTR_RW(addr_ctxtype
);
1410 static ssize_t
addr_context_show(struct device
*dev
,
1411 struct device_attribute
*attr
,
1416 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1418 spin_lock(&drvdata
->spinlock
);
1419 idx
= drvdata
->addr_idx
;
1420 /* context ID comparator bits[6:4] */
1421 val
= BMVAL(drvdata
->addr_acc
[idx
], 4, 6);
1422 spin_unlock(&drvdata
->spinlock
);
1423 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1426 static ssize_t
addr_context_store(struct device
*dev
,
1427 struct device_attribute
*attr
,
1428 const char *buf
, size_t size
)
1432 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1434 if (kstrtoul(buf
, 16, &val
))
1436 if ((drvdata
->numcidc
<= 1) && (drvdata
->numvmidc
<= 1))
1438 if (val
>= (drvdata
->numcidc
>= drvdata
->numvmidc
?
1439 drvdata
->numcidc
: drvdata
->numvmidc
))
1442 spin_lock(&drvdata
->spinlock
);
1443 idx
= drvdata
->addr_idx
;
1444 /* clear context ID comparator bits[6:4] */
1445 drvdata
->addr_acc
[idx
] &= ~(BIT(4) | BIT(5) | BIT(6));
1446 drvdata
->addr_acc
[idx
] |= (val
<< 4);
1447 spin_unlock(&drvdata
->spinlock
);
1450 static DEVICE_ATTR_RW(addr_context
);
1452 static ssize_t
seq_idx_show(struct device
*dev
,
1453 struct device_attribute
*attr
,
1457 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1459 val
= drvdata
->seq_idx
;
1460 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1463 static ssize_t
seq_idx_store(struct device
*dev
,
1464 struct device_attribute
*attr
,
1465 const char *buf
, size_t size
)
1468 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1470 if (kstrtoul(buf
, 16, &val
))
1472 if (val
>= drvdata
->nrseqstate
- 1)
1476 * Use spinlock to ensure index doesn't change while it gets
1477 * dereferenced multiple times within a spinlock block elsewhere.
1479 spin_lock(&drvdata
->spinlock
);
1480 drvdata
->seq_idx
= val
;
1481 spin_unlock(&drvdata
->spinlock
);
1484 static DEVICE_ATTR_RW(seq_idx
);
1486 static ssize_t
seq_state_show(struct device
*dev
,
1487 struct device_attribute
*attr
,
1491 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1493 val
= drvdata
->seq_state
;
1494 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1497 static ssize_t
seq_state_store(struct device
*dev
,
1498 struct device_attribute
*attr
,
1499 const char *buf
, size_t size
)
1502 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1504 if (kstrtoul(buf
, 16, &val
))
1506 if (val
>= drvdata
->nrseqstate
)
1509 drvdata
->seq_state
= val
;
1512 static DEVICE_ATTR_RW(seq_state
);
1514 static ssize_t
seq_event_show(struct device
*dev
,
1515 struct device_attribute
*attr
,
1520 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1522 spin_lock(&drvdata
->spinlock
);
1523 idx
= drvdata
->seq_idx
;
1524 val
= drvdata
->seq_ctrl
[idx
];
1525 spin_unlock(&drvdata
->spinlock
);
1526 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1529 static ssize_t
seq_event_store(struct device
*dev
,
1530 struct device_attribute
*attr
,
1531 const char *buf
, size_t size
)
1535 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1537 if (kstrtoul(buf
, 16, &val
))
1540 spin_lock(&drvdata
->spinlock
);
1541 idx
= drvdata
->seq_idx
;
1542 /* RST, bits[7:0] */
1543 drvdata
->seq_ctrl
[idx
] = val
& 0xFF;
1544 spin_unlock(&drvdata
->spinlock
);
1547 static DEVICE_ATTR_RW(seq_event
);
1549 static ssize_t
seq_reset_event_show(struct device
*dev
,
1550 struct device_attribute
*attr
,
1554 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1556 val
= drvdata
->seq_rst
;
1557 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1560 static ssize_t
seq_reset_event_store(struct device
*dev
,
1561 struct device_attribute
*attr
,
1562 const char *buf
, size_t size
)
1565 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1567 if (kstrtoul(buf
, 16, &val
))
1569 if (!(drvdata
->nrseqstate
))
1572 drvdata
->seq_rst
= val
& ETMv4_EVENT_MASK
;
1575 static DEVICE_ATTR_RW(seq_reset_event
);
1577 static ssize_t
cntr_idx_show(struct device
*dev
,
1578 struct device_attribute
*attr
,
1582 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1584 val
= drvdata
->cntr_idx
;
1585 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1588 static ssize_t
cntr_idx_store(struct device
*dev
,
1589 struct device_attribute
*attr
,
1590 const char *buf
, size_t size
)
1593 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1595 if (kstrtoul(buf
, 16, &val
))
1597 if (val
>= drvdata
->nr_cntr
)
1601 * Use spinlock to ensure index doesn't change while it gets
1602 * dereferenced multiple times within a spinlock block elsewhere.
1604 spin_lock(&drvdata
->spinlock
);
1605 drvdata
->cntr_idx
= val
;
1606 spin_unlock(&drvdata
->spinlock
);
1609 static DEVICE_ATTR_RW(cntr_idx
);
1611 static ssize_t
cntrldvr_show(struct device
*dev
,
1612 struct device_attribute
*attr
,
1617 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1619 spin_lock(&drvdata
->spinlock
);
1620 idx
= drvdata
->cntr_idx
;
1621 val
= drvdata
->cntrldvr
[idx
];
1622 spin_unlock(&drvdata
->spinlock
);
1623 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1626 static ssize_t
cntrldvr_store(struct device
*dev
,
1627 struct device_attribute
*attr
,
1628 const char *buf
, size_t size
)
1632 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1634 if (kstrtoul(buf
, 16, &val
))
1636 if (val
> ETM_CNTR_MAX_VAL
)
1639 spin_lock(&drvdata
->spinlock
);
1640 idx
= drvdata
->cntr_idx
;
1641 drvdata
->cntrldvr
[idx
] = val
;
1642 spin_unlock(&drvdata
->spinlock
);
1645 static DEVICE_ATTR_RW(cntrldvr
);
1647 static ssize_t
cntr_val_show(struct device
*dev
,
1648 struct device_attribute
*attr
,
1653 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1655 spin_lock(&drvdata
->spinlock
);
1656 idx
= drvdata
->cntr_idx
;
1657 val
= drvdata
->cntr_val
[idx
];
1658 spin_unlock(&drvdata
->spinlock
);
1659 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1662 static ssize_t
cntr_val_store(struct device
*dev
,
1663 struct device_attribute
*attr
,
1664 const char *buf
, size_t size
)
1668 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1670 if (kstrtoul(buf
, 16, &val
))
1672 if (val
> ETM_CNTR_MAX_VAL
)
1675 spin_lock(&drvdata
->spinlock
);
1676 idx
= drvdata
->cntr_idx
;
1677 drvdata
->cntr_val
[idx
] = val
;
1678 spin_unlock(&drvdata
->spinlock
);
1681 static DEVICE_ATTR_RW(cntr_val
);
1683 static ssize_t
cntr_ctrl_show(struct device
*dev
,
1684 struct device_attribute
*attr
,
1689 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1691 spin_lock(&drvdata
->spinlock
);
1692 idx
= drvdata
->cntr_idx
;
1693 val
= drvdata
->cntr_ctrl
[idx
];
1694 spin_unlock(&drvdata
->spinlock
);
1695 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1698 static ssize_t
cntr_ctrl_store(struct device
*dev
,
1699 struct device_attribute
*attr
,
1700 const char *buf
, size_t size
)
1704 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1706 if (kstrtoul(buf
, 16, &val
))
1709 spin_lock(&drvdata
->spinlock
);
1710 idx
= drvdata
->cntr_idx
;
1711 drvdata
->cntr_ctrl
[idx
] = val
;
1712 spin_unlock(&drvdata
->spinlock
);
1715 static DEVICE_ATTR_RW(cntr_ctrl
);
1717 static ssize_t
res_idx_show(struct device
*dev
,
1718 struct device_attribute
*attr
,
1722 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1724 val
= drvdata
->res_idx
;
1725 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1728 static ssize_t
res_idx_store(struct device
*dev
,
1729 struct device_attribute
*attr
,
1730 const char *buf
, size_t size
)
1733 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1735 if (kstrtoul(buf
, 16, &val
))
1737 /* Resource selector pair 0 is always implemented and reserved */
1738 if (val
< 2 || val
>= drvdata
->nr_resource
* 2)
1742 * Use spinlock to ensure index doesn't change while it gets
1743 * dereferenced multiple times within a spinlock block elsewhere.
1745 spin_lock(&drvdata
->spinlock
);
1746 drvdata
->res_idx
= val
;
1747 spin_unlock(&drvdata
->spinlock
);
1750 static DEVICE_ATTR_RW(res_idx
);
1752 static ssize_t
res_ctrl_show(struct device
*dev
,
1753 struct device_attribute
*attr
,
1758 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1760 spin_lock(&drvdata
->spinlock
);
1761 idx
= drvdata
->res_idx
;
1762 val
= drvdata
->res_ctrl
[idx
];
1763 spin_unlock(&drvdata
->spinlock
);
1764 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1767 static ssize_t
res_ctrl_store(struct device
*dev
,
1768 struct device_attribute
*attr
,
1769 const char *buf
, size_t size
)
1773 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1775 if (kstrtoul(buf
, 16, &val
))
1778 spin_lock(&drvdata
->spinlock
);
1779 idx
= drvdata
->res_idx
;
1780 /* For odd idx pair inversal bit is RES0 */
1782 /* PAIRINV, bit[21] */
1784 drvdata
->res_ctrl
[idx
] = val
;
1785 spin_unlock(&drvdata
->spinlock
);
1788 static DEVICE_ATTR_RW(res_ctrl
);
1790 static ssize_t
ctxid_idx_show(struct device
*dev
,
1791 struct device_attribute
*attr
,
1795 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1797 val
= drvdata
->ctxid_idx
;
1798 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1801 static ssize_t
ctxid_idx_store(struct device
*dev
,
1802 struct device_attribute
*attr
,
1803 const char *buf
, size_t size
)
1806 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1808 if (kstrtoul(buf
, 16, &val
))
1810 if (val
>= drvdata
->numcidc
)
1814 * Use spinlock to ensure index doesn't change while it gets
1815 * dereferenced multiple times within a spinlock block elsewhere.
1817 spin_lock(&drvdata
->spinlock
);
1818 drvdata
->ctxid_idx
= val
;
1819 spin_unlock(&drvdata
->spinlock
);
1822 static DEVICE_ATTR_RW(ctxid_idx
);
1824 static ssize_t
ctxid_pid_show(struct device
*dev
,
1825 struct device_attribute
*attr
,
1830 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1832 spin_lock(&drvdata
->spinlock
);
1833 idx
= drvdata
->ctxid_idx
;
1834 val
= (unsigned long)drvdata
->ctxid_vpid
[idx
];
1835 spin_unlock(&drvdata
->spinlock
);
1836 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1839 static ssize_t
ctxid_pid_store(struct device
*dev
,
1840 struct device_attribute
*attr
,
1841 const char *buf
, size_t size
)
1844 unsigned long vpid
, pid
;
1845 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1848 * only implemented when ctxid tracing is enabled, i.e. at least one
1849 * ctxid comparator is implemented and ctxid is greater than 0 bits
1852 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1854 if (kstrtoul(buf
, 16, &vpid
))
1857 pid
= coresight_vpid_to_pid(vpid
);
1859 spin_lock(&drvdata
->spinlock
);
1860 idx
= drvdata
->ctxid_idx
;
1861 drvdata
->ctxid_pid
[idx
] = (u64
)pid
;
1862 drvdata
->ctxid_vpid
[idx
] = (u64
)vpid
;
1863 spin_unlock(&drvdata
->spinlock
);
1866 static DEVICE_ATTR_RW(ctxid_pid
);
1868 static ssize_t
ctxid_masks_show(struct device
*dev
,
1869 struct device_attribute
*attr
,
1872 unsigned long val1
, val2
;
1873 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1875 spin_lock(&drvdata
->spinlock
);
1876 val1
= drvdata
->ctxid_mask0
;
1877 val2
= drvdata
->ctxid_mask1
;
1878 spin_unlock(&drvdata
->spinlock
);
1879 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1882 static ssize_t
ctxid_masks_store(struct device
*dev
,
1883 struct device_attribute
*attr
,
1884 const char *buf
, size_t size
)
1887 unsigned long val1
, val2
, mask
;
1888 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1891 * only implemented when ctxid tracing is enabled, i.e. at least one
1892 * ctxid comparator is implemented and ctxid is greater than 0 bits
1895 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1897 if (sscanf(buf
, "%lx %lx", &val1
, &val2
) != 2)
1900 spin_lock(&drvdata
->spinlock
);
1902 * each byte[0..3] controls mask value applied to ctxid
1905 switch (drvdata
->numcidc
) {
1907 /* COMP0, bits[7:0] */
1908 drvdata
->ctxid_mask0
= val1
& 0xFF;
1911 /* COMP1, bits[15:8] */
1912 drvdata
->ctxid_mask0
= val1
& 0xFFFF;
1915 /* COMP2, bits[23:16] */
1916 drvdata
->ctxid_mask0
= val1
& 0xFFFFFF;
1919 /* COMP3, bits[31:24] */
1920 drvdata
->ctxid_mask0
= val1
;
1923 /* COMP4, bits[7:0] */
1924 drvdata
->ctxid_mask0
= val1
;
1925 drvdata
->ctxid_mask1
= val2
& 0xFF;
1928 /* COMP5, bits[15:8] */
1929 drvdata
->ctxid_mask0
= val1
;
1930 drvdata
->ctxid_mask1
= val2
& 0xFFFF;
1933 /* COMP6, bits[23:16] */
1934 drvdata
->ctxid_mask0
= val1
;
1935 drvdata
->ctxid_mask1
= val2
& 0xFFFFFF;
1938 /* COMP7, bits[31:24] */
1939 drvdata
->ctxid_mask0
= val1
;
1940 drvdata
->ctxid_mask1
= val2
;
1946 * If software sets a mask bit to 1, it must program relevant byte
1947 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1948 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1949 * of ctxid comparator0 value (corresponding to byte 0) register.
1951 mask
= drvdata
->ctxid_mask0
;
1952 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
1953 /* mask value of corresponding ctxid comparator */
1954 maskbyte
= mask
& ETMv4_EVENT_MASK
;
1956 * each bit corresponds to a byte of respective ctxid comparator
1959 for (j
= 0; j
< 8; j
++) {
1961 drvdata
->ctxid_pid
[i
] &= ~(0xFF << (j
* 8));
1964 /* Select the next ctxid comparator mask value */
1966 /* ctxid comparators[4-7] */
1967 mask
= drvdata
->ctxid_mask1
;
1972 spin_unlock(&drvdata
->spinlock
);
1975 static DEVICE_ATTR_RW(ctxid_masks
);
1977 static ssize_t
vmid_idx_show(struct device
*dev
,
1978 struct device_attribute
*attr
,
1982 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1984 val
= drvdata
->vmid_idx
;
1985 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1988 static ssize_t
vmid_idx_store(struct device
*dev
,
1989 struct device_attribute
*attr
,
1990 const char *buf
, size_t size
)
1993 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1995 if (kstrtoul(buf
, 16, &val
))
1997 if (val
>= drvdata
->numvmidc
)
2001 * Use spinlock to ensure index doesn't change while it gets
2002 * dereferenced multiple times within a spinlock block elsewhere.
2004 spin_lock(&drvdata
->spinlock
);
2005 drvdata
->vmid_idx
= val
;
2006 spin_unlock(&drvdata
->spinlock
);
2009 static DEVICE_ATTR_RW(vmid_idx
);
2011 static ssize_t
vmid_val_show(struct device
*dev
,
2012 struct device_attribute
*attr
,
2016 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2018 val
= (unsigned long)drvdata
->vmid_val
[drvdata
->vmid_idx
];
2019 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2022 static ssize_t
vmid_val_store(struct device
*dev
,
2023 struct device_attribute
*attr
,
2024 const char *buf
, size_t size
)
2027 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2030 * only implemented when vmid tracing is enabled, i.e. at least one
2031 * vmid comparator is implemented and at least 8 bit vmid size
2033 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2035 if (kstrtoul(buf
, 16, &val
))
2038 spin_lock(&drvdata
->spinlock
);
2039 drvdata
->vmid_val
[drvdata
->vmid_idx
] = (u64
)val
;
2040 spin_unlock(&drvdata
->spinlock
);
2043 static DEVICE_ATTR_RW(vmid_val
);
2045 static ssize_t
vmid_masks_show(struct device
*dev
,
2046 struct device_attribute
*attr
, char *buf
)
2048 unsigned long val1
, val2
;
2049 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2051 spin_lock(&drvdata
->spinlock
);
2052 val1
= drvdata
->vmid_mask0
;
2053 val2
= drvdata
->vmid_mask1
;
2054 spin_unlock(&drvdata
->spinlock
);
2055 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
2058 static ssize_t
vmid_masks_store(struct device
*dev
,
2059 struct device_attribute
*attr
,
2060 const char *buf
, size_t size
)
2063 unsigned long val1
, val2
, mask
;
2064 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2066 * only implemented when vmid tracing is enabled, i.e. at least one
2067 * vmid comparator is implemented and at least 8 bit vmid size
2069 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2071 if (sscanf(buf
, "%lx %lx", &val1
, &val2
) != 2)
2074 spin_lock(&drvdata
->spinlock
);
2077 * each byte[0..3] controls mask value applied to vmid
2080 switch (drvdata
->numvmidc
) {
2082 /* COMP0, bits[7:0] */
2083 drvdata
->vmid_mask0
= val1
& 0xFF;
2086 /* COMP1, bits[15:8] */
2087 drvdata
->vmid_mask0
= val1
& 0xFFFF;
2090 /* COMP2, bits[23:16] */
2091 drvdata
->vmid_mask0
= val1
& 0xFFFFFF;
2094 /* COMP3, bits[31:24] */
2095 drvdata
->vmid_mask0
= val1
;
2098 /* COMP4, bits[7:0] */
2099 drvdata
->vmid_mask0
= val1
;
2100 drvdata
->vmid_mask1
= val2
& 0xFF;
2103 /* COMP5, bits[15:8] */
2104 drvdata
->vmid_mask0
= val1
;
2105 drvdata
->vmid_mask1
= val2
& 0xFFFF;
2108 /* COMP6, bits[23:16] */
2109 drvdata
->vmid_mask0
= val1
;
2110 drvdata
->vmid_mask1
= val2
& 0xFFFFFF;
2113 /* COMP7, bits[31:24] */
2114 drvdata
->vmid_mask0
= val1
;
2115 drvdata
->vmid_mask1
= val2
;
2122 * If software sets a mask bit to 1, it must program relevant byte
2123 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2124 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2125 * of vmid comparator0 value (corresponding to byte 0) register.
2127 mask
= drvdata
->vmid_mask0
;
2128 for (i
= 0; i
< drvdata
->numvmidc
; i
++) {
2129 /* mask value of corresponding vmid comparator */
2130 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2132 * each bit corresponds to a byte of respective vmid comparator
2135 for (j
= 0; j
< 8; j
++) {
2137 drvdata
->vmid_val
[i
] &= ~(0xFF << (j
* 8));
2140 /* Select the next vmid comparator mask value */
2142 /* vmid comparators[4-7] */
2143 mask
= drvdata
->vmid_mask1
;
2147 spin_unlock(&drvdata
->spinlock
);
2150 static DEVICE_ATTR_RW(vmid_masks
);
2152 static ssize_t
cpu_show(struct device
*dev
,
2153 struct device_attribute
*attr
, char *buf
)
2156 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2159 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
2162 static DEVICE_ATTR_RO(cpu
);
2164 static struct attribute
*coresight_etmv4_attrs
[] = {
2165 &dev_attr_nr_pe_cmp
.attr
,
2166 &dev_attr_nr_addr_cmp
.attr
,
2167 &dev_attr_nr_cntr
.attr
,
2168 &dev_attr_nr_ext_inp
.attr
,
2169 &dev_attr_numcidc
.attr
,
2170 &dev_attr_numvmidc
.attr
,
2171 &dev_attr_nrseqstate
.attr
,
2172 &dev_attr_nr_resource
.attr
,
2173 &dev_attr_nr_ss_cmp
.attr
,
2174 &dev_attr_reset
.attr
,
2175 &dev_attr_mode
.attr
,
2177 &dev_attr_event
.attr
,
2178 &dev_attr_event_instren
.attr
,
2179 &dev_attr_event_ts
.attr
,
2180 &dev_attr_syncfreq
.attr
,
2181 &dev_attr_cyc_threshold
.attr
,
2182 &dev_attr_bb_ctrl
.attr
,
2183 &dev_attr_event_vinst
.attr
,
2184 &dev_attr_s_exlevel_vinst
.attr
,
2185 &dev_attr_ns_exlevel_vinst
.attr
,
2186 &dev_attr_addr_idx
.attr
,
2187 &dev_attr_addr_instdatatype
.attr
,
2188 &dev_attr_addr_single
.attr
,
2189 &dev_attr_addr_range
.attr
,
2190 &dev_attr_addr_start
.attr
,
2191 &dev_attr_addr_stop
.attr
,
2192 &dev_attr_addr_ctxtype
.attr
,
2193 &dev_attr_addr_context
.attr
,
2194 &dev_attr_seq_idx
.attr
,
2195 &dev_attr_seq_state
.attr
,
2196 &dev_attr_seq_event
.attr
,
2197 &dev_attr_seq_reset_event
.attr
,
2198 &dev_attr_cntr_idx
.attr
,
2199 &dev_attr_cntrldvr
.attr
,
2200 &dev_attr_cntr_val
.attr
,
2201 &dev_attr_cntr_ctrl
.attr
,
2202 &dev_attr_res_idx
.attr
,
2203 &dev_attr_res_ctrl
.attr
,
2204 &dev_attr_ctxid_idx
.attr
,
2205 &dev_attr_ctxid_pid
.attr
,
2206 &dev_attr_ctxid_masks
.attr
,
2207 &dev_attr_vmid_idx
.attr
,
2208 &dev_attr_vmid_val
.attr
,
2209 &dev_attr_vmid_masks
.attr
,
2214 #define coresight_simple_func(name, offset) \
2215 static ssize_t name##_show(struct device *_dev, \
2216 struct device_attribute *attr, char *buf) \
2218 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2219 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2220 readl_relaxed(drvdata->base + offset)); \
2222 DEVICE_ATTR_RO(name)
2224 coresight_simple_func(trcoslsr
, TRCOSLSR
);
2225 coresight_simple_func(trcpdcr
, TRCPDCR
);
2226 coresight_simple_func(trcpdsr
, TRCPDSR
);
2227 coresight_simple_func(trclsr
, TRCLSR
);
2228 coresight_simple_func(trcauthstatus
, TRCAUTHSTATUS
);
2229 coresight_simple_func(trcdevid
, TRCDEVID
);
2230 coresight_simple_func(trcdevtype
, TRCDEVTYPE
);
2231 coresight_simple_func(trcpidr0
, TRCPIDR0
);
2232 coresight_simple_func(trcpidr1
, TRCPIDR1
);
2233 coresight_simple_func(trcpidr2
, TRCPIDR2
);
2234 coresight_simple_func(trcpidr3
, TRCPIDR3
);
2236 static struct attribute
*coresight_etmv4_mgmt_attrs
[] = {
2237 &dev_attr_trcoslsr
.attr
,
2238 &dev_attr_trcpdcr
.attr
,
2239 &dev_attr_trcpdsr
.attr
,
2240 &dev_attr_trclsr
.attr
,
2241 &dev_attr_trcauthstatus
.attr
,
2242 &dev_attr_trcdevid
.attr
,
2243 &dev_attr_trcdevtype
.attr
,
2244 &dev_attr_trcpidr0
.attr
,
2245 &dev_attr_trcpidr1
.attr
,
2246 &dev_attr_trcpidr2
.attr
,
2247 &dev_attr_trcpidr3
.attr
,
2251 coresight_simple_func(trcidr0
, TRCIDR0
);
2252 coresight_simple_func(trcidr1
, TRCIDR1
);
2253 coresight_simple_func(trcidr2
, TRCIDR2
);
2254 coresight_simple_func(trcidr3
, TRCIDR3
);
2255 coresight_simple_func(trcidr4
, TRCIDR4
);
2256 coresight_simple_func(trcidr5
, TRCIDR5
);
2257 /* trcidr[6,7] are reserved */
2258 coresight_simple_func(trcidr8
, TRCIDR8
);
2259 coresight_simple_func(trcidr9
, TRCIDR9
);
2260 coresight_simple_func(trcidr10
, TRCIDR10
);
2261 coresight_simple_func(trcidr11
, TRCIDR11
);
2262 coresight_simple_func(trcidr12
, TRCIDR12
);
2263 coresight_simple_func(trcidr13
, TRCIDR13
);
2265 static struct attribute
*coresight_etmv4_trcidr_attrs
[] = {
2266 &dev_attr_trcidr0
.attr
,
2267 &dev_attr_trcidr1
.attr
,
2268 &dev_attr_trcidr2
.attr
,
2269 &dev_attr_trcidr3
.attr
,
2270 &dev_attr_trcidr4
.attr
,
2271 &dev_attr_trcidr5
.attr
,
2272 /* trcidr[6,7] are reserved */
2273 &dev_attr_trcidr8
.attr
,
2274 &dev_attr_trcidr9
.attr
,
2275 &dev_attr_trcidr10
.attr
,
2276 &dev_attr_trcidr11
.attr
,
2277 &dev_attr_trcidr12
.attr
,
2278 &dev_attr_trcidr13
.attr
,
2282 static const struct attribute_group coresight_etmv4_group
= {
2283 .attrs
= coresight_etmv4_attrs
,
2286 static const struct attribute_group coresight_etmv4_mgmt_group
= {
2287 .attrs
= coresight_etmv4_mgmt_attrs
,
2291 static const struct attribute_group coresight_etmv4_trcidr_group
= {
2292 .attrs
= coresight_etmv4_trcidr_attrs
,
2296 static const struct attribute_group
*coresight_etmv4_groups
[] = {
2297 &coresight_etmv4_group
,
2298 &coresight_etmv4_mgmt_group
,
2299 &coresight_etmv4_trcidr_group
,
2303 static void etm4_init_arch_data(void *info
)
2311 struct etmv4_drvdata
*drvdata
= info
;
2313 CS_UNLOCK(drvdata
->base
);
2315 /* find all capabilities of the tracing unit */
2316 etmidr0
= readl_relaxed(drvdata
->base
+ TRCIDR0
);
2318 /* INSTP0, bits[2:1] P0 tracing support field */
2319 if (BMVAL(etmidr0
, 1, 1) && BMVAL(etmidr0
, 2, 2))
2320 drvdata
->instrp0
= true;
2322 drvdata
->instrp0
= false;
2324 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2325 if (BMVAL(etmidr0
, 5, 5))
2326 drvdata
->trcbb
= true;
2328 drvdata
->trcbb
= false;
2330 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2331 if (BMVAL(etmidr0
, 6, 6))
2332 drvdata
->trccond
= true;
2334 drvdata
->trccond
= false;
2336 /* TRCCCI, bit[7] Cycle counting instruction bit */
2337 if (BMVAL(etmidr0
, 7, 7))
2338 drvdata
->trccci
= true;
2340 drvdata
->trccci
= false;
2342 /* RETSTACK, bit[9] Return stack bit */
2343 if (BMVAL(etmidr0
, 9, 9))
2344 drvdata
->retstack
= true;
2346 drvdata
->retstack
= false;
2348 /* NUMEVENT, bits[11:10] Number of events field */
2349 drvdata
->nr_event
= BMVAL(etmidr0
, 10, 11);
2350 /* QSUPP, bits[16:15] Q element support field */
2351 drvdata
->q_support
= BMVAL(etmidr0
, 15, 16);
2352 /* TSSIZE, bits[28:24] Global timestamp size field */
2353 drvdata
->ts_size
= BMVAL(etmidr0
, 24, 28);
2355 /* base architecture of trace unit */
2356 etmidr1
= readl_relaxed(drvdata
->base
+ TRCIDR1
);
2358 * TRCARCHMIN, bits[7:4] architecture the minor version number
2359 * TRCARCHMAJ, bits[11:8] architecture major versin number
2361 drvdata
->arch
= BMVAL(etmidr1
, 4, 11);
2363 /* maximum size of resources */
2364 etmidr2
= readl_relaxed(drvdata
->base
+ TRCIDR2
);
2365 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2366 drvdata
->ctxid_size
= BMVAL(etmidr2
, 5, 9);
2367 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2368 drvdata
->vmid_size
= BMVAL(etmidr2
, 10, 14);
2369 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2370 drvdata
->ccsize
= BMVAL(etmidr2
, 25, 28);
2372 etmidr3
= readl_relaxed(drvdata
->base
+ TRCIDR3
);
2373 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2374 drvdata
->ccitmin
= BMVAL(etmidr3
, 0, 11);
2375 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2376 drvdata
->s_ex_level
= BMVAL(etmidr3
, 16, 19);
2377 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2378 drvdata
->ns_ex_level
= BMVAL(etmidr3
, 20, 23);
2381 * TRCERR, bit[24] whether a trace unit can trace a
2382 * system error exception.
2384 if (BMVAL(etmidr3
, 24, 24))
2385 drvdata
->trc_error
= true;
2387 drvdata
->trc_error
= false;
2389 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2390 if (BMVAL(etmidr3
, 25, 25))
2391 drvdata
->syncpr
= true;
2393 drvdata
->syncpr
= false;
2395 /* STALLCTL, bit[26] is stall control implemented? */
2396 if (BMVAL(etmidr3
, 26, 26))
2397 drvdata
->stallctl
= true;
2399 drvdata
->stallctl
= false;
2401 /* SYSSTALL, bit[27] implementation can support stall control? */
2402 if (BMVAL(etmidr3
, 27, 27))
2403 drvdata
->sysstall
= true;
2405 drvdata
->sysstall
= false;
2407 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2408 drvdata
->nr_pe
= BMVAL(etmidr3
, 28, 30);
2410 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2411 if (BMVAL(etmidr3
, 31, 31))
2412 drvdata
->nooverflow
= true;
2414 drvdata
->nooverflow
= false;
2416 /* number of resources trace unit supports */
2417 etmidr4
= readl_relaxed(drvdata
->base
+ TRCIDR4
);
2418 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2419 drvdata
->nr_addr_cmp
= BMVAL(etmidr4
, 0, 3);
2420 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2421 drvdata
->nr_pe_cmp
= BMVAL(etmidr4
, 12, 15);
2423 * NUMRSPAIR, bits[19:16]
2424 * The number of resource pairs conveyed by the HW starts at 0, i.e a
2425 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
2426 * As such add 1 to the value of NUMRSPAIR for a better representation.
2428 drvdata
->nr_resource
= BMVAL(etmidr4
, 16, 19) + 1;
2430 * NUMSSCC, bits[23:20] the number of single-shot
2431 * comparator control for tracing
2433 drvdata
->nr_ss_cmp
= BMVAL(etmidr4
, 20, 23);
2434 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2435 drvdata
->numcidc
= BMVAL(etmidr4
, 24, 27);
2436 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2437 drvdata
->numvmidc
= BMVAL(etmidr4
, 28, 31);
2439 etmidr5
= readl_relaxed(drvdata
->base
+ TRCIDR5
);
2440 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2441 drvdata
->nr_ext_inp
= BMVAL(etmidr5
, 0, 8);
2442 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2443 drvdata
->trcid_size
= BMVAL(etmidr5
, 16, 21);
2444 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2445 if (BMVAL(etmidr5
, 22, 22))
2446 drvdata
->atbtrig
= true;
2448 drvdata
->atbtrig
= false;
2450 * LPOVERRIDE, bit[23] implementation supports
2451 * low-power state override
2453 if (BMVAL(etmidr5
, 23, 23))
2454 drvdata
->lpoverride
= true;
2456 drvdata
->lpoverride
= false;
2457 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2458 drvdata
->nrseqstate
= BMVAL(etmidr5
, 25, 27);
2459 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2460 drvdata
->nr_cntr
= BMVAL(etmidr5
, 28, 30);
2461 CS_LOCK(drvdata
->base
);
2464 static void etm4_init_default_data(struct etmv4_drvdata
*drvdata
)
2468 drvdata
->pe_sel
= 0x0;
2469 drvdata
->cfg
= (ETMv4_MODE_CTXID
| ETM_MODE_VMID
|
2470 ETMv4_MODE_TIMESTAMP
| ETM_MODE_RETURNSTACK
);
2472 /* disable all events tracing */
2473 drvdata
->eventctrl0
= 0x0;
2474 drvdata
->eventctrl1
= 0x0;
2476 /* disable stalling */
2477 drvdata
->stall_ctrl
= 0x0;
2479 /* disable timestamp event */
2480 drvdata
->ts_ctrl
= 0x0;
2482 /* enable trace synchronization every 4096 bytes for trace */
2483 if (drvdata
->syncpr
== false)
2484 drvdata
->syncfreq
= 0xC;
2487 * enable viewInst to trace everything with start-stop logic in
2490 drvdata
->vinst_ctrl
|= BIT(0);
2491 /* set initial state of start-stop logic */
2492 if (drvdata
->nr_addr_cmp
)
2493 drvdata
->vinst_ctrl
|= BIT(9);
2495 /* no address range filtering for ViewInst */
2496 drvdata
->viiectlr
= 0x0;
2497 /* no start-stop filtering for ViewInst */
2498 drvdata
->vissctlr
= 0x0;
2500 /* disable seq events */
2501 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
2502 drvdata
->seq_ctrl
[i
] = 0x0;
2503 drvdata
->seq_rst
= 0x0;
2504 drvdata
->seq_state
= 0x0;
2506 /* disable external input events */
2507 drvdata
->ext_inp
= 0x0;
2509 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
2510 drvdata
->cntrldvr
[i
] = 0x0;
2511 drvdata
->cntr_ctrl
[i
] = 0x0;
2512 drvdata
->cntr_val
[i
] = 0x0;
2515 /* Resource selector pair 0 is always implemented and reserved */
2516 drvdata
->res_idx
= 0x2;
2517 for (i
= 2; i
< drvdata
->nr_resource
* 2; i
++)
2518 drvdata
->res_ctrl
[i
] = 0x0;
2520 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
2521 drvdata
->ss_ctrl
[i
] = 0x0;
2522 drvdata
->ss_pe_cmp
[i
] = 0x0;
2525 if (drvdata
->nr_addr_cmp
>= 1) {
2526 drvdata
->addr_val
[0] = (unsigned long)_stext
;
2527 drvdata
->addr_val
[1] = (unsigned long)_etext
;
2528 drvdata
->addr_type
[0] = ETM_ADDR_TYPE_RANGE
;
2529 drvdata
->addr_type
[1] = ETM_ADDR_TYPE_RANGE
;
2532 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
2533 drvdata
->ctxid_pid
[i
] = 0x0;
2534 drvdata
->ctxid_vpid
[i
] = 0x0;
2537 drvdata
->ctxid_mask0
= 0x0;
2538 drvdata
->ctxid_mask1
= 0x0;
2540 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
2541 drvdata
->vmid_val
[i
] = 0x0;
2542 drvdata
->vmid_mask0
= 0x0;
2543 drvdata
->vmid_mask1
= 0x0;
2546 * A trace ID value of 0 is invalid, so let's start at some
2547 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2550 drvdata
->trcid
= 0x20 + drvdata
->cpu
;
2553 static int etm4_cpu_callback(struct notifier_block
*nfb
, unsigned long action
,
2556 unsigned int cpu
= (unsigned long)hcpu
;
2558 if (!etmdrvdata
[cpu
])
2561 switch (action
& (~CPU_TASKS_FROZEN
)) {
2563 spin_lock(&etmdrvdata
[cpu
]->spinlock
);
2564 if (!etmdrvdata
[cpu
]->os_unlock
) {
2565 etm4_os_unlock(etmdrvdata
[cpu
]);
2566 etmdrvdata
[cpu
]->os_unlock
= true;
2569 if (etmdrvdata
[cpu
]->enable
)
2570 etm4_enable_hw(etmdrvdata
[cpu
]);
2571 spin_unlock(&etmdrvdata
[cpu
]->spinlock
);
2575 if (etmdrvdata
[cpu
]->boot_enable
&&
2576 !etmdrvdata
[cpu
]->sticky_enable
)
2577 coresight_enable(etmdrvdata
[cpu
]->csdev
);
2581 spin_lock(&etmdrvdata
[cpu
]->spinlock
);
2582 if (etmdrvdata
[cpu
]->enable
)
2583 etm4_disable_hw(etmdrvdata
[cpu
]);
2584 spin_unlock(&etmdrvdata
[cpu
]->spinlock
);
2591 static struct notifier_block etm4_cpu_notifier
= {
2592 .notifier_call
= etm4_cpu_callback
,
2595 static int etm4_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2599 struct device
*dev
= &adev
->dev
;
2600 struct coresight_platform_data
*pdata
= NULL
;
2601 struct etmv4_drvdata
*drvdata
;
2602 struct resource
*res
= &adev
->res
;
2603 struct coresight_desc
*desc
;
2604 struct device_node
*np
= adev
->dev
.of_node
;
2606 desc
= devm_kzalloc(dev
, sizeof(*desc
), GFP_KERNEL
);
2610 drvdata
= devm_kzalloc(dev
, sizeof(*drvdata
), GFP_KERNEL
);
2615 pdata
= of_get_coresight_platform_data(dev
, np
);
2617 return PTR_ERR(pdata
);
2618 adev
->dev
.platform_data
= pdata
;
2621 drvdata
->dev
= &adev
->dev
;
2622 dev_set_drvdata(dev
, drvdata
);
2624 /* Validity for the resource is already checked by the AMBA core */
2625 base
= devm_ioremap_resource(dev
, res
);
2627 return PTR_ERR(base
);
2629 drvdata
->base
= base
;
2631 spin_lock_init(&drvdata
->spinlock
);
2633 drvdata
->cpu
= pdata
? pdata
->cpu
: 0;
2636 etmdrvdata
[drvdata
->cpu
] = drvdata
;
2638 if (!smp_call_function_single(drvdata
->cpu
, etm4_os_unlock
, drvdata
, 1))
2639 drvdata
->os_unlock
= true;
2641 if (smp_call_function_single(drvdata
->cpu
,
2642 etm4_init_arch_data
, drvdata
, 1))
2643 dev_err(dev
, "ETM arch init failed\n");
2646 register_hotcpu_notifier(&etm4_cpu_notifier
);
2650 if (etm4_arch_supported(drvdata
->arch
) == false) {
2652 goto err_arch_supported
;
2654 etm4_init_default_data(drvdata
);
2656 pm_runtime_put(&adev
->dev
);
2658 desc
->type
= CORESIGHT_DEV_TYPE_SOURCE
;
2659 desc
->subtype
.source_subtype
= CORESIGHT_DEV_SUBTYPE_SOURCE_PROC
;
2660 desc
->ops
= &etm4_cs_ops
;
2661 desc
->pdata
= pdata
;
2663 desc
->groups
= coresight_etmv4_groups
;
2664 drvdata
->csdev
= coresight_register(desc
);
2665 if (IS_ERR(drvdata
->csdev
)) {
2666 ret
= PTR_ERR(drvdata
->csdev
);
2667 goto err_coresight_register
;
2670 dev_info(dev
, "%s initialized\n", (char *)id
->data
);
2673 coresight_enable(drvdata
->csdev
);
2674 drvdata
->boot_enable
= true;
2680 pm_runtime_put(&adev
->dev
);
2681 err_coresight_register
:
2682 if (--etm4_count
== 0)
2683 unregister_hotcpu_notifier(&etm4_cpu_notifier
);
2687 static int etm4_remove(struct amba_device
*adev
)
2689 struct etmv4_drvdata
*drvdata
= amba_get_drvdata(adev
);
2691 coresight_unregister(drvdata
->csdev
);
2692 if (--etm4_count
== 0)
2693 unregister_hotcpu_notifier(&etm4_cpu_notifier
);
2698 static struct amba_id etm4_ids
[] = {
2699 { /* ETM 4.0 - Qualcomm */
2704 { /* ETM 4.0 - Juno board */
2712 static struct amba_driver etm4x_driver
= {
2714 .name
= "coresight-etm4x",
2716 .probe
= etm4_probe
,
2717 .remove
= etm4_remove
,
2718 .id_table
= etm4_ids
,
2721 module_amba_driver(etm4x_driver
);