1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 #include "coresight-syscfg.h"
14 static int etm4_set_mode_exclude(struct etmv4_drvdata
*drvdata
, bool exclude
)
17 struct etmv4_config
*config
= &drvdata
->config
;
19 idx
= config
->addr_idx
;
22 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 * the trace unit performs
25 if (FIELD_GET(TRCACATRn_TYPE_MASK
, config
->addr_acc
[idx
]) == TRCACATRn_TYPE_ADDR
) {
30 * We are performing instruction address comparison. Set the
31 * relevant bit of ViewInst Include/Exclude Control register
32 * for corresponding address comparator pair.
34 if (config
->addr_type
[idx
] != ETM_ADDR_TYPE_RANGE
||
35 config
->addr_type
[idx
+ 1] != ETM_ADDR_TYPE_RANGE
)
38 if (exclude
== true) {
40 * Set exclude bit and unset the include bit
41 * corresponding to comparator pair
43 config
->viiectlr
|= BIT(idx
/ 2 + 16);
44 config
->viiectlr
&= ~BIT(idx
/ 2);
47 * Set include bit and unset exclude bit
48 * corresponding to comparator pair
50 config
->viiectlr
|= BIT(idx
/ 2);
51 config
->viiectlr
&= ~BIT(idx
/ 2 + 16);
57 static ssize_t
nr_pe_cmp_show(struct device
*dev
,
58 struct device_attribute
*attr
,
62 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
64 val
= drvdata
->nr_pe_cmp
;
65 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
67 static DEVICE_ATTR_RO(nr_pe_cmp
);
69 static ssize_t
nr_addr_cmp_show(struct device
*dev
,
70 struct device_attribute
*attr
,
74 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
76 val
= drvdata
->nr_addr_cmp
;
77 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
79 static DEVICE_ATTR_RO(nr_addr_cmp
);
81 static ssize_t
nr_cntr_show(struct device
*dev
,
82 struct device_attribute
*attr
,
86 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
88 val
= drvdata
->nr_cntr
;
89 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
91 static DEVICE_ATTR_RO(nr_cntr
);
93 static ssize_t
nr_ext_inp_show(struct device
*dev
,
94 struct device_attribute
*attr
,
98 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
100 val
= drvdata
->nr_ext_inp
;
101 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
103 static DEVICE_ATTR_RO(nr_ext_inp
);
105 static ssize_t
numcidc_show(struct device
*dev
,
106 struct device_attribute
*attr
,
110 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
112 val
= drvdata
->numcidc
;
113 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
115 static DEVICE_ATTR_RO(numcidc
);
117 static ssize_t
numvmidc_show(struct device
*dev
,
118 struct device_attribute
*attr
,
122 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
124 val
= drvdata
->numvmidc
;
125 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
127 static DEVICE_ATTR_RO(numvmidc
);
129 static ssize_t
nrseqstate_show(struct device
*dev
,
130 struct device_attribute
*attr
,
134 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
136 val
= drvdata
->nrseqstate
;
137 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
139 static DEVICE_ATTR_RO(nrseqstate
);
141 static ssize_t
nr_resource_show(struct device
*dev
,
142 struct device_attribute
*attr
,
146 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
148 val
= drvdata
->nr_resource
;
149 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
151 static DEVICE_ATTR_RO(nr_resource
);
153 static ssize_t
nr_ss_cmp_show(struct device
*dev
,
154 struct device_attribute
*attr
,
158 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
160 val
= drvdata
->nr_ss_cmp
;
161 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
163 static DEVICE_ATTR_RO(nr_ss_cmp
);
165 static ssize_t
reset_store(struct device
*dev
,
166 struct device_attribute
*attr
,
167 const char *buf
, size_t size
)
171 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
172 struct etmv4_config
*config
= &drvdata
->config
;
174 if (kstrtoul(buf
, 16, &val
))
177 spin_lock(&drvdata
->spinlock
);
181 /* Disable data tracing: do not trace load and store data transfers */
182 config
->mode
&= ~(ETM_MODE_LOAD
| ETM_MODE_STORE
);
183 config
->cfg
&= ~(TRCCONFIGR_INSTP0_LOAD
| TRCCONFIGR_INSTP0_STORE
);
185 /* Disable data value and data address tracing */
186 config
->mode
&= ~(ETM_MODE_DATA_TRACE_ADDR
|
187 ETM_MODE_DATA_TRACE_VAL
);
188 config
->cfg
&= ~(TRCCONFIGR_DA
| TRCCONFIGR_DV
);
190 /* Disable all events tracing */
191 config
->eventctrl0
= 0x0;
192 config
->eventctrl1
= 0x0;
194 /* Disable timestamp event */
195 config
->ts_ctrl
= 0x0;
197 /* Disable stalling */
198 config
->stall_ctrl
= 0x0;
200 /* Reset trace synchronization period to 2^8 = 256 bytes*/
201 if (drvdata
->syncpr
== false)
202 config
->syncfreq
= 0x8;
205 * Enable ViewInst to trace everything with start-stop logic in
206 * started state. ARM recommends start-stop logic is set before
209 config
->vinst_ctrl
= FIELD_PREP(TRCVICTLR_EVENT_MASK
, 0x01);
210 if (drvdata
->nr_addr_cmp
> 0) {
211 config
->mode
|= ETM_MODE_VIEWINST_STARTSTOP
;
212 /* SSSTATUS, bit[9] */
213 config
->vinst_ctrl
|= TRCVICTLR_SSSTATUS
;
216 /* No address range filtering for ViewInst */
217 config
->viiectlr
= 0x0;
219 /* No start-stop filtering for ViewInst */
220 config
->vissctlr
= 0x0;
221 config
->vipcssctlr
= 0x0;
223 /* Disable seq events */
224 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
225 config
->seq_ctrl
[i
] = 0x0;
226 config
->seq_rst
= 0x0;
227 config
->seq_state
= 0x0;
229 /* Disable external input events */
230 config
->ext_inp
= 0x0;
232 config
->cntr_idx
= 0x0;
233 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
234 config
->cntrldvr
[i
] = 0x0;
235 config
->cntr_ctrl
[i
] = 0x0;
236 config
->cntr_val
[i
] = 0x0;
239 config
->res_idx
= 0x0;
240 for (i
= 2; i
< 2 * drvdata
->nr_resource
; i
++)
241 config
->res_ctrl
[i
] = 0x0;
243 config
->ss_idx
= 0x0;
244 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
245 config
->ss_ctrl
[i
] = 0x0;
246 config
->ss_pe_cmp
[i
] = 0x0;
249 config
->addr_idx
= 0x0;
250 for (i
= 0; i
< drvdata
->nr_addr_cmp
* 2; i
++) {
251 config
->addr_val
[i
] = 0x0;
252 config
->addr_acc
[i
] = 0x0;
253 config
->addr_type
[i
] = ETM_ADDR_TYPE_NONE
;
256 config
->ctxid_idx
= 0x0;
257 for (i
= 0; i
< drvdata
->numcidc
; i
++)
258 config
->ctxid_pid
[i
] = 0x0;
260 config
->ctxid_mask0
= 0x0;
261 config
->ctxid_mask1
= 0x0;
263 config
->vmid_idx
= 0x0;
264 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
265 config
->vmid_val
[i
] = 0x0;
266 config
->vmid_mask0
= 0x0;
267 config
->vmid_mask1
= 0x0;
269 spin_unlock(&drvdata
->spinlock
);
271 /* for sysfs - only release trace id when resetting */
272 etm4_release_trace_id(drvdata
);
274 cscfg_csdev_reset_feats(to_coresight_device(dev
));
278 static DEVICE_ATTR_WO(reset
);
280 static ssize_t
mode_show(struct device
*dev
,
281 struct device_attribute
*attr
,
285 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
286 struct etmv4_config
*config
= &drvdata
->config
;
289 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
292 static ssize_t
mode_store(struct device
*dev
,
293 struct device_attribute
*attr
,
294 const char *buf
, size_t size
)
296 unsigned long val
, mode
;
297 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
298 struct etmv4_config
*config
= &drvdata
->config
;
300 if (kstrtoul(buf
, 16, &val
))
303 spin_lock(&drvdata
->spinlock
);
304 config
->mode
= val
& ETMv4_MODE_ALL
;
306 if (drvdata
->instrp0
== true) {
307 /* start by clearing instruction P0 field */
308 config
->cfg
&= ~TRCCONFIGR_INSTP0_LOAD_STORE
;
309 if (config
->mode
& ETM_MODE_LOAD
)
310 /* 0b01 Trace load instructions as P0 instructions */
311 config
->cfg
|= TRCCONFIGR_INSTP0_LOAD
;
312 if (config
->mode
& ETM_MODE_STORE
)
313 /* 0b10 Trace store instructions as P0 instructions */
314 config
->cfg
|= TRCCONFIGR_INSTP0_STORE
;
315 if (config
->mode
& ETM_MODE_LOAD_STORE
)
317 * 0b11 Trace load and store instructions
320 config
->cfg
|= TRCCONFIGR_INSTP0_LOAD_STORE
;
323 /* bit[3], Branch broadcast mode */
324 if ((config
->mode
& ETM_MODE_BB
) && (drvdata
->trcbb
== true))
325 config
->cfg
|= TRCCONFIGR_BB
;
327 config
->cfg
&= ~TRCCONFIGR_BB
;
329 /* bit[4], Cycle counting instruction trace bit */
330 if ((config
->mode
& ETMv4_MODE_CYCACC
) &&
331 (drvdata
->trccci
== true))
332 config
->cfg
|= TRCCONFIGR_CCI
;
334 config
->cfg
&= ~TRCCONFIGR_CCI
;
336 /* bit[6], Context ID tracing bit */
337 if ((config
->mode
& ETMv4_MODE_CTXID
) && (drvdata
->ctxid_size
))
338 config
->cfg
|= TRCCONFIGR_CID
;
340 config
->cfg
&= ~TRCCONFIGR_CID
;
342 if ((config
->mode
& ETM_MODE_VMID
) && (drvdata
->vmid_size
))
343 config
->cfg
|= TRCCONFIGR_VMID
;
345 config
->cfg
&= ~TRCCONFIGR_VMID
;
347 /* bits[10:8], Conditional instruction tracing bit */
348 mode
= ETM_MODE_COND(config
->mode
);
349 if (drvdata
->trccond
== true) {
350 config
->cfg
&= ~TRCCONFIGR_COND_MASK
;
351 config
->cfg
|= mode
<< __bf_shf(TRCCONFIGR_COND_MASK
);
354 /* bit[11], Global timestamp tracing bit */
355 if ((config
->mode
& ETMv4_MODE_TIMESTAMP
) && (drvdata
->ts_size
))
356 config
->cfg
|= TRCCONFIGR_TS
;
358 config
->cfg
&= ~TRCCONFIGR_TS
;
360 /* bit[12], Return stack enable bit */
361 if ((config
->mode
& ETM_MODE_RETURNSTACK
) &&
362 (drvdata
->retstack
== true))
363 config
->cfg
|= TRCCONFIGR_RS
;
365 config
->cfg
&= ~TRCCONFIGR_RS
;
367 /* bits[14:13], Q element enable field */
368 mode
= ETM_MODE_QELEM(config
->mode
);
369 /* start by clearing QE bits */
370 config
->cfg
&= ~(TRCCONFIGR_QE_W_COUNTS
| TRCCONFIGR_QE_WO_COUNTS
);
372 * if supported, Q elements with instruction counts are enabled.
373 * Always set the low bit for any requested mode. Valid combos are
374 * 0b00, 0b01 and 0b11.
376 if (mode
&& drvdata
->q_support
)
377 config
->cfg
|= TRCCONFIGR_QE_W_COUNTS
;
379 * if supported, Q elements with and without instruction
382 if ((mode
& BIT(1)) && (drvdata
->q_support
& BIT(1)))
383 config
->cfg
|= TRCCONFIGR_QE_WO_COUNTS
;
385 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
386 if ((config
->mode
& ETM_MODE_ATB_TRIGGER
) &&
387 (drvdata
->atbtrig
== true))
388 config
->eventctrl1
|= TRCEVENTCTL1R_ATB
;
390 config
->eventctrl1
&= ~TRCEVENTCTL1R_ATB
;
392 /* bit[12], Low-power state behavior override bit */
393 if ((config
->mode
& ETM_MODE_LPOVERRIDE
) &&
394 (drvdata
->lpoverride
== true))
395 config
->eventctrl1
|= TRCEVENTCTL1R_LPOVERRIDE
;
397 config
->eventctrl1
&= ~TRCEVENTCTL1R_LPOVERRIDE
;
399 /* bit[8], Instruction stall bit */
400 if ((config
->mode
& ETM_MODE_ISTALL_EN
) && (drvdata
->stallctl
== true))
401 config
->stall_ctrl
|= TRCSTALLCTLR_ISTALL
;
403 config
->stall_ctrl
&= ~TRCSTALLCTLR_ISTALL
;
405 /* bit[10], Prioritize instruction trace bit */
406 if (config
->mode
& ETM_MODE_INSTPRIO
)
407 config
->stall_ctrl
|= TRCSTALLCTLR_INSTPRIORITY
;
409 config
->stall_ctrl
&= ~TRCSTALLCTLR_INSTPRIORITY
;
411 /* bit[13], Trace overflow prevention bit */
412 if ((config
->mode
& ETM_MODE_NOOVERFLOW
) &&
413 (drvdata
->nooverflow
== true))
414 config
->stall_ctrl
|= TRCSTALLCTLR_NOOVERFLOW
;
416 config
->stall_ctrl
&= ~TRCSTALLCTLR_NOOVERFLOW
;
418 /* bit[9] Start/stop logic control bit */
419 if (config
->mode
& ETM_MODE_VIEWINST_STARTSTOP
)
420 config
->vinst_ctrl
|= TRCVICTLR_SSSTATUS
;
422 config
->vinst_ctrl
&= ~TRCVICTLR_SSSTATUS
;
424 /* bit[10], Whether a trace unit must trace a Reset exception */
425 if (config
->mode
& ETM_MODE_TRACE_RESET
)
426 config
->vinst_ctrl
|= TRCVICTLR_TRCRESET
;
428 config
->vinst_ctrl
&= ~TRCVICTLR_TRCRESET
;
430 /* bit[11], Whether a trace unit must trace a system error exception */
431 if ((config
->mode
& ETM_MODE_TRACE_ERR
) &&
432 (drvdata
->trc_error
== true))
433 config
->vinst_ctrl
|= TRCVICTLR_TRCERR
;
435 config
->vinst_ctrl
&= ~TRCVICTLR_TRCERR
;
437 if (config
->mode
& (ETM_MODE_EXCL_KERN
| ETM_MODE_EXCL_USER
))
438 etm4_config_trace_mode(config
);
440 spin_unlock(&drvdata
->spinlock
);
444 static DEVICE_ATTR_RW(mode
);
446 static ssize_t
pe_show(struct device
*dev
,
447 struct device_attribute
*attr
,
451 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
452 struct etmv4_config
*config
= &drvdata
->config
;
454 val
= config
->pe_sel
;
455 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
458 static ssize_t
pe_store(struct device
*dev
,
459 struct device_attribute
*attr
,
460 const char *buf
, size_t size
)
463 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
464 struct etmv4_config
*config
= &drvdata
->config
;
466 if (kstrtoul(buf
, 16, &val
))
469 spin_lock(&drvdata
->spinlock
);
470 if (val
> drvdata
->nr_pe
) {
471 spin_unlock(&drvdata
->spinlock
);
475 config
->pe_sel
= val
;
476 spin_unlock(&drvdata
->spinlock
);
479 static DEVICE_ATTR_RW(pe
);
481 static ssize_t
event_show(struct device
*dev
,
482 struct device_attribute
*attr
,
486 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
487 struct etmv4_config
*config
= &drvdata
->config
;
489 val
= config
->eventctrl0
;
490 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
493 static ssize_t
event_store(struct device
*dev
,
494 struct device_attribute
*attr
,
495 const char *buf
, size_t size
)
498 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
499 struct etmv4_config
*config
= &drvdata
->config
;
501 if (kstrtoul(buf
, 16, &val
))
504 spin_lock(&drvdata
->spinlock
);
505 switch (drvdata
->nr_event
) {
507 /* EVENT0, bits[7:0] */
508 config
->eventctrl0
= val
& 0xFF;
511 /* EVENT1, bits[15:8] */
512 config
->eventctrl0
= val
& 0xFFFF;
515 /* EVENT2, bits[23:16] */
516 config
->eventctrl0
= val
& 0xFFFFFF;
519 /* EVENT3, bits[31:24] */
520 config
->eventctrl0
= val
;
525 spin_unlock(&drvdata
->spinlock
);
528 static DEVICE_ATTR_RW(event
);
530 static ssize_t
event_instren_show(struct device
*dev
,
531 struct device_attribute
*attr
,
535 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
536 struct etmv4_config
*config
= &drvdata
->config
;
538 val
= FIELD_GET(TRCEVENTCTL1R_INSTEN_MASK
, config
->eventctrl1
);
539 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
542 static ssize_t
event_instren_store(struct device
*dev
,
543 struct device_attribute
*attr
,
544 const char *buf
, size_t size
)
547 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
548 struct etmv4_config
*config
= &drvdata
->config
;
550 if (kstrtoul(buf
, 16, &val
))
553 spin_lock(&drvdata
->spinlock
);
554 /* start by clearing all instruction event enable bits */
555 config
->eventctrl1
&= ~TRCEVENTCTL1R_INSTEN_MASK
;
556 switch (drvdata
->nr_event
) {
558 /* generate Event element for event 1 */
559 config
->eventctrl1
|= val
& TRCEVENTCTL1R_INSTEN_1
;
562 /* generate Event element for event 1 and 2 */
563 config
->eventctrl1
|= val
& (TRCEVENTCTL1R_INSTEN_0
| TRCEVENTCTL1R_INSTEN_1
);
566 /* generate Event element for event 1, 2 and 3 */
567 config
->eventctrl1
|= val
& (TRCEVENTCTL1R_INSTEN_0
|
568 TRCEVENTCTL1R_INSTEN_1
|
569 TRCEVENTCTL1R_INSTEN_2
);
572 /* generate Event element for all 4 events */
573 config
->eventctrl1
|= val
& (TRCEVENTCTL1R_INSTEN_0
|
574 TRCEVENTCTL1R_INSTEN_1
|
575 TRCEVENTCTL1R_INSTEN_2
|
576 TRCEVENTCTL1R_INSTEN_3
);
581 spin_unlock(&drvdata
->spinlock
);
584 static DEVICE_ATTR_RW(event_instren
);
586 static ssize_t
event_ts_show(struct device
*dev
,
587 struct device_attribute
*attr
,
591 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
592 struct etmv4_config
*config
= &drvdata
->config
;
594 val
= config
->ts_ctrl
;
595 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
598 static ssize_t
event_ts_store(struct device
*dev
,
599 struct device_attribute
*attr
,
600 const char *buf
, size_t size
)
603 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
604 struct etmv4_config
*config
= &drvdata
->config
;
606 if (kstrtoul(buf
, 16, &val
))
608 if (!drvdata
->ts_size
)
611 config
->ts_ctrl
= val
& ETMv4_EVENT_MASK
;
614 static DEVICE_ATTR_RW(event_ts
);
616 static ssize_t
syncfreq_show(struct device
*dev
,
617 struct device_attribute
*attr
,
621 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
622 struct etmv4_config
*config
= &drvdata
->config
;
624 val
= config
->syncfreq
;
625 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
628 static ssize_t
syncfreq_store(struct device
*dev
,
629 struct device_attribute
*attr
,
630 const char *buf
, size_t size
)
633 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
634 struct etmv4_config
*config
= &drvdata
->config
;
636 if (kstrtoul(buf
, 16, &val
))
638 if (drvdata
->syncpr
== true)
641 config
->syncfreq
= val
& ETMv4_SYNC_MASK
;
644 static DEVICE_ATTR_RW(syncfreq
);
646 static ssize_t
cyc_threshold_show(struct device
*dev
,
647 struct device_attribute
*attr
,
651 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
652 struct etmv4_config
*config
= &drvdata
->config
;
654 val
= config
->ccctlr
;
655 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
658 static ssize_t
cyc_threshold_store(struct device
*dev
,
659 struct device_attribute
*attr
,
660 const char *buf
, size_t size
)
663 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
664 struct etmv4_config
*config
= &drvdata
->config
;
666 if (kstrtoul(buf
, 16, &val
))
669 /* mask off max threshold before checking min value */
670 val
&= ETM_CYC_THRESHOLD_MASK
;
671 if (val
< drvdata
->ccitmin
)
674 config
->ccctlr
= val
;
677 static DEVICE_ATTR_RW(cyc_threshold
);
679 static ssize_t
bb_ctrl_show(struct device
*dev
,
680 struct device_attribute
*attr
,
684 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
685 struct etmv4_config
*config
= &drvdata
->config
;
687 val
= config
->bb_ctrl
;
688 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
691 static ssize_t
bb_ctrl_store(struct device
*dev
,
692 struct device_attribute
*attr
,
693 const char *buf
, size_t size
)
696 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
697 struct etmv4_config
*config
= &drvdata
->config
;
699 if (kstrtoul(buf
, 16, &val
))
701 if (drvdata
->trcbb
== false)
703 if (!drvdata
->nr_addr_cmp
)
707 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
708 * individual range comparators. If include then at least 1
709 * range must be selected.
711 if ((val
& TRCBBCTLR_MODE
) && (FIELD_GET(TRCBBCTLR_RANGE_MASK
, val
) == 0))
714 config
->bb_ctrl
= val
& (TRCBBCTLR_MODE
| TRCBBCTLR_RANGE_MASK
);
717 static DEVICE_ATTR_RW(bb_ctrl
);
719 static ssize_t
event_vinst_show(struct device
*dev
,
720 struct device_attribute
*attr
,
724 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
725 struct etmv4_config
*config
= &drvdata
->config
;
727 val
= FIELD_GET(TRCVICTLR_EVENT_MASK
, config
->vinst_ctrl
);
728 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
731 static ssize_t
event_vinst_store(struct device
*dev
,
732 struct device_attribute
*attr
,
733 const char *buf
, size_t size
)
736 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
737 struct etmv4_config
*config
= &drvdata
->config
;
739 if (kstrtoul(buf
, 16, &val
))
742 spin_lock(&drvdata
->spinlock
);
743 val
&= TRCVICTLR_EVENT_MASK
>> __bf_shf(TRCVICTLR_EVENT_MASK
);
744 config
->vinst_ctrl
&= ~TRCVICTLR_EVENT_MASK
;
745 config
->vinst_ctrl
|= FIELD_PREP(TRCVICTLR_EVENT_MASK
, val
);
746 spin_unlock(&drvdata
->spinlock
);
749 static DEVICE_ATTR_RW(event_vinst
);
751 static ssize_t
s_exlevel_vinst_show(struct device
*dev
,
752 struct device_attribute
*attr
,
756 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
757 struct etmv4_config
*config
= &drvdata
->config
;
759 val
= FIELD_GET(TRCVICTLR_EXLEVEL_S_MASK
, config
->vinst_ctrl
);
760 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
763 static ssize_t
s_exlevel_vinst_store(struct device
*dev
,
764 struct device_attribute
*attr
,
765 const char *buf
, size_t size
)
768 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
769 struct etmv4_config
*config
= &drvdata
->config
;
771 if (kstrtoul(buf
, 16, &val
))
774 spin_lock(&drvdata
->spinlock
);
775 /* clear all EXLEVEL_S bits */
776 config
->vinst_ctrl
&= ~TRCVICTLR_EXLEVEL_S_MASK
;
777 /* enable instruction tracing for corresponding exception level */
778 val
&= drvdata
->s_ex_level
;
779 config
->vinst_ctrl
|= val
<< __bf_shf(TRCVICTLR_EXLEVEL_S_MASK
);
780 spin_unlock(&drvdata
->spinlock
);
783 static DEVICE_ATTR_RW(s_exlevel_vinst
);
785 static ssize_t
ns_exlevel_vinst_show(struct device
*dev
,
786 struct device_attribute
*attr
,
790 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
791 struct etmv4_config
*config
= &drvdata
->config
;
793 /* EXLEVEL_NS, bits[23:20] */
794 val
= FIELD_GET(TRCVICTLR_EXLEVEL_NS_MASK
, config
->vinst_ctrl
);
795 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
798 static ssize_t
ns_exlevel_vinst_store(struct device
*dev
,
799 struct device_attribute
*attr
,
800 const char *buf
, size_t size
)
803 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
804 struct etmv4_config
*config
= &drvdata
->config
;
806 if (kstrtoul(buf
, 16, &val
))
809 spin_lock(&drvdata
->spinlock
);
810 /* clear EXLEVEL_NS bits */
811 config
->vinst_ctrl
&= ~TRCVICTLR_EXLEVEL_NS_MASK
;
812 /* enable instruction tracing for corresponding exception level */
813 val
&= drvdata
->ns_ex_level
;
814 config
->vinst_ctrl
|= val
<< __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK
);
815 spin_unlock(&drvdata
->spinlock
);
818 static DEVICE_ATTR_RW(ns_exlevel_vinst
);
820 static ssize_t
addr_idx_show(struct device
*dev
,
821 struct device_attribute
*attr
,
825 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
826 struct etmv4_config
*config
= &drvdata
->config
;
828 val
= config
->addr_idx
;
829 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
832 static ssize_t
addr_idx_store(struct device
*dev
,
833 struct device_attribute
*attr
,
834 const char *buf
, size_t size
)
837 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
838 struct etmv4_config
*config
= &drvdata
->config
;
840 if (kstrtoul(buf
, 16, &val
))
842 if (val
>= drvdata
->nr_addr_cmp
* 2)
846 * Use spinlock to ensure index doesn't change while it gets
847 * dereferenced multiple times within a spinlock block elsewhere.
849 spin_lock(&drvdata
->spinlock
);
850 config
->addr_idx
= val
;
851 spin_unlock(&drvdata
->spinlock
);
854 static DEVICE_ATTR_RW(addr_idx
);
856 static ssize_t
addr_instdatatype_show(struct device
*dev
,
857 struct device_attribute
*attr
,
862 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
863 struct etmv4_config
*config
= &drvdata
->config
;
865 spin_lock(&drvdata
->spinlock
);
866 idx
= config
->addr_idx
;
867 val
= FIELD_GET(TRCACATRn_TYPE_MASK
, config
->addr_acc
[idx
]);
868 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n",
869 val
== TRCACATRn_TYPE_ADDR
? "instr" :
870 (val
== TRCACATRn_TYPE_DATA_LOAD_ADDR
? "data_load" :
871 (val
== TRCACATRn_TYPE_DATA_STORE_ADDR
? "data_store" :
872 "data_load_store")));
873 spin_unlock(&drvdata
->spinlock
);
877 static ssize_t
addr_instdatatype_store(struct device
*dev
,
878 struct device_attribute
*attr
,
879 const char *buf
, size_t size
)
883 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
884 struct etmv4_config
*config
= &drvdata
->config
;
886 if (strlen(buf
) >= 20)
888 if (sscanf(buf
, "%s", str
) != 1)
891 spin_lock(&drvdata
->spinlock
);
892 idx
= config
->addr_idx
;
893 if (!strcmp(str
, "instr"))
894 /* TYPE, bits[1:0] */
895 config
->addr_acc
[idx
] &= ~TRCACATRn_TYPE_MASK
;
897 spin_unlock(&drvdata
->spinlock
);
900 static DEVICE_ATTR_RW(addr_instdatatype
);
902 static ssize_t
addr_single_show(struct device
*dev
,
903 struct device_attribute
*attr
,
908 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
909 struct etmv4_config
*config
= &drvdata
->config
;
911 idx
= config
->addr_idx
;
912 spin_lock(&drvdata
->spinlock
);
913 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
914 config
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
915 spin_unlock(&drvdata
->spinlock
);
918 val
= (unsigned long)config
->addr_val
[idx
];
919 spin_unlock(&drvdata
->spinlock
);
920 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
923 static ssize_t
addr_single_store(struct device
*dev
,
924 struct device_attribute
*attr
,
925 const char *buf
, size_t size
)
929 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
930 struct etmv4_config
*config
= &drvdata
->config
;
932 if (kstrtoul(buf
, 16, &val
))
935 spin_lock(&drvdata
->spinlock
);
936 idx
= config
->addr_idx
;
937 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
938 config
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
939 spin_unlock(&drvdata
->spinlock
);
943 config
->addr_val
[idx
] = (u64
)val
;
944 config
->addr_type
[idx
] = ETM_ADDR_TYPE_SINGLE
;
945 spin_unlock(&drvdata
->spinlock
);
948 static DEVICE_ATTR_RW(addr_single
);
950 static ssize_t
addr_range_show(struct device
*dev
,
951 struct device_attribute
*attr
,
955 unsigned long val1
, val2
;
956 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
957 struct etmv4_config
*config
= &drvdata
->config
;
959 spin_lock(&drvdata
->spinlock
);
960 idx
= config
->addr_idx
;
962 spin_unlock(&drvdata
->spinlock
);
965 if (!((config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
966 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
967 (config
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
968 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
969 spin_unlock(&drvdata
->spinlock
);
973 val1
= (unsigned long)config
->addr_val
[idx
];
974 val2
= (unsigned long)config
->addr_val
[idx
+ 1];
975 spin_unlock(&drvdata
->spinlock
);
976 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
979 static ssize_t
addr_range_store(struct device
*dev
,
980 struct device_attribute
*attr
,
981 const char *buf
, size_t size
)
984 unsigned long val1
, val2
;
985 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
986 struct etmv4_config
*config
= &drvdata
->config
;
987 int elements
, exclude
;
989 elements
= sscanf(buf
, "%lx %lx %x", &val1
, &val2
, &exclude
);
991 /* exclude is optional, but need at least two parameter */
994 /* lower address comparator cannot have a higher address value */
998 spin_lock(&drvdata
->spinlock
);
999 idx
= config
->addr_idx
;
1001 spin_unlock(&drvdata
->spinlock
);
1005 if (!((config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
1006 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
1007 (config
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
1008 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
1009 spin_unlock(&drvdata
->spinlock
);
1013 config
->addr_val
[idx
] = (u64
)val1
;
1014 config
->addr_type
[idx
] = ETM_ADDR_TYPE_RANGE
;
1015 config
->addr_val
[idx
+ 1] = (u64
)val2
;
1016 config
->addr_type
[idx
+ 1] = ETM_ADDR_TYPE_RANGE
;
1018 * Program include or exclude control bits for vinst or vdata
1019 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1020 * use supplied value, or default to bit set in 'mode'
1023 exclude
= config
->mode
& ETM_MODE_EXCLUDE
;
1024 etm4_set_mode_exclude(drvdata
, exclude
? true : false);
1026 spin_unlock(&drvdata
->spinlock
);
1029 static DEVICE_ATTR_RW(addr_range
);
1031 static ssize_t
addr_start_show(struct device
*dev
,
1032 struct device_attribute
*attr
,
1037 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1038 struct etmv4_config
*config
= &drvdata
->config
;
1040 spin_lock(&drvdata
->spinlock
);
1041 idx
= config
->addr_idx
;
1043 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1044 config
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1045 spin_unlock(&drvdata
->spinlock
);
1049 val
= (unsigned long)config
->addr_val
[idx
];
1050 spin_unlock(&drvdata
->spinlock
);
1051 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1054 static ssize_t
addr_start_store(struct device
*dev
,
1055 struct device_attribute
*attr
,
1056 const char *buf
, size_t size
)
1060 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1061 struct etmv4_config
*config
= &drvdata
->config
;
1063 if (kstrtoul(buf
, 16, &val
))
1066 spin_lock(&drvdata
->spinlock
);
1067 idx
= config
->addr_idx
;
1068 if (!drvdata
->nr_addr_cmp
) {
1069 spin_unlock(&drvdata
->spinlock
);
1072 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1073 config
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1074 spin_unlock(&drvdata
->spinlock
);
1078 config
->addr_val
[idx
] = (u64
)val
;
1079 config
->addr_type
[idx
] = ETM_ADDR_TYPE_START
;
1080 config
->vissctlr
|= BIT(idx
);
1081 spin_unlock(&drvdata
->spinlock
);
1084 static DEVICE_ATTR_RW(addr_start
);
1086 static ssize_t
addr_stop_show(struct device
*dev
,
1087 struct device_attribute
*attr
,
1092 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1093 struct etmv4_config
*config
= &drvdata
->config
;
1095 spin_lock(&drvdata
->spinlock
);
1096 idx
= config
->addr_idx
;
1098 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1099 config
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1100 spin_unlock(&drvdata
->spinlock
);
1104 val
= (unsigned long)config
->addr_val
[idx
];
1105 spin_unlock(&drvdata
->spinlock
);
1106 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1109 static ssize_t
addr_stop_store(struct device
*dev
,
1110 struct device_attribute
*attr
,
1111 const char *buf
, size_t size
)
1115 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1116 struct etmv4_config
*config
= &drvdata
->config
;
1118 if (kstrtoul(buf
, 16, &val
))
1121 spin_lock(&drvdata
->spinlock
);
1122 idx
= config
->addr_idx
;
1123 if (!drvdata
->nr_addr_cmp
) {
1124 spin_unlock(&drvdata
->spinlock
);
1127 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1128 config
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1129 spin_unlock(&drvdata
->spinlock
);
1133 config
->addr_val
[idx
] = (u64
)val
;
1134 config
->addr_type
[idx
] = ETM_ADDR_TYPE_STOP
;
1135 config
->vissctlr
|= BIT(idx
+ 16);
1136 spin_unlock(&drvdata
->spinlock
);
1139 static DEVICE_ATTR_RW(addr_stop
);
1141 static ssize_t
addr_ctxtype_show(struct device
*dev
,
1142 struct device_attribute
*attr
,
1147 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1148 struct etmv4_config
*config
= &drvdata
->config
;
1150 spin_lock(&drvdata
->spinlock
);
1151 idx
= config
->addr_idx
;
1152 /* CONTEXTTYPE, bits[3:2] */
1153 val
= FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK
, config
->addr_acc
[idx
]);
1154 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n", val
== ETM_CTX_NONE
? "none" :
1155 (val
== ETM_CTX_CTXID
? "ctxid" :
1156 (val
== ETM_CTX_VMID
? "vmid" : "all")));
1157 spin_unlock(&drvdata
->spinlock
);
1161 static ssize_t
addr_ctxtype_store(struct device
*dev
,
1162 struct device_attribute
*attr
,
1163 const char *buf
, size_t size
)
1167 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1168 struct etmv4_config
*config
= &drvdata
->config
;
1170 if (strlen(buf
) >= 10)
1172 if (sscanf(buf
, "%s", str
) != 1)
1175 spin_lock(&drvdata
->spinlock
);
1176 idx
= config
->addr_idx
;
1177 if (!strcmp(str
, "none"))
1178 /* start by clearing context type bits */
1179 config
->addr_acc
[idx
] &= ~TRCACATRn_CONTEXTTYPE_MASK
;
1180 else if (!strcmp(str
, "ctxid")) {
1181 /* 0b01 The trace unit performs a Context ID */
1182 if (drvdata
->numcidc
) {
1183 config
->addr_acc
[idx
] |= TRCACATRn_CONTEXTTYPE_CTXID
;
1184 config
->addr_acc
[idx
] &= ~TRCACATRn_CONTEXTTYPE_VMID
;
1186 } else if (!strcmp(str
, "vmid")) {
1187 /* 0b10 The trace unit performs a VMID */
1188 if (drvdata
->numvmidc
) {
1189 config
->addr_acc
[idx
] &= ~TRCACATRn_CONTEXTTYPE_CTXID
;
1190 config
->addr_acc
[idx
] |= TRCACATRn_CONTEXTTYPE_VMID
;
1192 } else if (!strcmp(str
, "all")) {
1194 * 0b11 The trace unit performs a Context ID
1195 * comparison and a VMID
1197 if (drvdata
->numcidc
)
1198 config
->addr_acc
[idx
] |= TRCACATRn_CONTEXTTYPE_CTXID
;
1199 if (drvdata
->numvmidc
)
1200 config
->addr_acc
[idx
] |= TRCACATRn_CONTEXTTYPE_VMID
;
1202 spin_unlock(&drvdata
->spinlock
);
1205 static DEVICE_ATTR_RW(addr_ctxtype
);
1207 static ssize_t
addr_context_show(struct device
*dev
,
1208 struct device_attribute
*attr
,
1213 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1214 struct etmv4_config
*config
= &drvdata
->config
;
1216 spin_lock(&drvdata
->spinlock
);
1217 idx
= config
->addr_idx
;
1218 /* context ID comparator bits[6:4] */
1219 val
= FIELD_GET(TRCACATRn_CONTEXT_MASK
, config
->addr_acc
[idx
]);
1220 spin_unlock(&drvdata
->spinlock
);
1221 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1224 static ssize_t
addr_context_store(struct device
*dev
,
1225 struct device_attribute
*attr
,
1226 const char *buf
, size_t size
)
1230 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1231 struct etmv4_config
*config
= &drvdata
->config
;
1233 if (kstrtoul(buf
, 16, &val
))
1235 if ((drvdata
->numcidc
<= 1) && (drvdata
->numvmidc
<= 1))
1237 if (val
>= (drvdata
->numcidc
>= drvdata
->numvmidc
?
1238 drvdata
->numcidc
: drvdata
->numvmidc
))
1241 spin_lock(&drvdata
->spinlock
);
1242 idx
= config
->addr_idx
;
1243 /* clear context ID comparator bits[6:4] */
1244 config
->addr_acc
[idx
] &= ~TRCACATRn_CONTEXT_MASK
;
1245 config
->addr_acc
[idx
] |= val
<< __bf_shf(TRCACATRn_CONTEXT_MASK
);
1246 spin_unlock(&drvdata
->spinlock
);
1249 static DEVICE_ATTR_RW(addr_context
);
1251 static ssize_t
addr_exlevel_s_ns_show(struct device
*dev
,
1252 struct device_attribute
*attr
,
1257 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1258 struct etmv4_config
*config
= &drvdata
->config
;
1260 spin_lock(&drvdata
->spinlock
);
1261 idx
= config
->addr_idx
;
1262 val
= FIELD_GET(TRCACATRn_EXLEVEL_MASK
, config
->addr_acc
[idx
]);
1263 spin_unlock(&drvdata
->spinlock
);
1264 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1267 static ssize_t
addr_exlevel_s_ns_store(struct device
*dev
,
1268 struct device_attribute
*attr
,
1269 const char *buf
, size_t size
)
1273 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1274 struct etmv4_config
*config
= &drvdata
->config
;
1276 if (kstrtoul(buf
, 0, &val
))
1279 if (val
& ~(TRCACATRn_EXLEVEL_MASK
>> __bf_shf(TRCACATRn_EXLEVEL_MASK
)))
1282 spin_lock(&drvdata
->spinlock
);
1283 idx
= config
->addr_idx
;
1284 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1285 config
->addr_acc
[idx
] &= ~TRCACATRn_EXLEVEL_MASK
;
1286 config
->addr_acc
[idx
] |= val
<< __bf_shf(TRCACATRn_EXLEVEL_MASK
);
1287 spin_unlock(&drvdata
->spinlock
);
1290 static DEVICE_ATTR_RW(addr_exlevel_s_ns
);
1292 static const char * const addr_type_names
[] = {
1300 static ssize_t
addr_cmp_view_show(struct device
*dev
,
1301 struct device_attribute
*attr
, char *buf
)
1304 unsigned long addr_v
, addr_v2
, addr_ctrl
;
1305 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1306 struct etmv4_config
*config
= &drvdata
->config
;
1308 bool exclude
= false;
1310 spin_lock(&drvdata
->spinlock
);
1311 idx
= config
->addr_idx
;
1312 addr_v
= config
->addr_val
[idx
];
1313 addr_ctrl
= config
->addr_acc
[idx
];
1314 addr_type
= config
->addr_type
[idx
];
1315 if (addr_type
== ETM_ADDR_TYPE_RANGE
) {
1319 addr_v
= config
->addr_val
[idx
];
1321 addr_v2
= config
->addr_val
[idx
+ 1];
1323 exclude
= config
->viiectlr
& BIT(idx
/ 2 + 16);
1325 spin_unlock(&drvdata
->spinlock
);
1327 size
= scnprintf(buf
, PAGE_SIZE
, "addr_cmp[%i] %s %#lx", idx
,
1328 addr_type_names
[addr_type
], addr_v
);
1329 if (addr_type
== ETM_ADDR_TYPE_RANGE
) {
1330 size
+= scnprintf(buf
+ size
, PAGE_SIZE
- size
,
1331 " %#lx %s", addr_v2
,
1332 exclude
? "exclude" : "include");
1334 size
+= scnprintf(buf
+ size
, PAGE_SIZE
- size
,
1335 " ctrl(%#lx)\n", addr_ctrl
);
1337 size
= scnprintf(buf
, PAGE_SIZE
, "addr_cmp[%i] unused\n", idx
);
1341 static DEVICE_ATTR_RO(addr_cmp_view
);
1343 static ssize_t
vinst_pe_cmp_start_stop_show(struct device
*dev
,
1344 struct device_attribute
*attr
,
1348 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1349 struct etmv4_config
*config
= &drvdata
->config
;
1351 if (!drvdata
->nr_pe_cmp
)
1353 val
= config
->vipcssctlr
;
1354 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1356 static ssize_t
vinst_pe_cmp_start_stop_store(struct device
*dev
,
1357 struct device_attribute
*attr
,
1358 const char *buf
, size_t size
)
1361 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1362 struct etmv4_config
*config
= &drvdata
->config
;
1364 if (kstrtoul(buf
, 16, &val
))
1366 if (!drvdata
->nr_pe_cmp
)
1369 spin_lock(&drvdata
->spinlock
);
1370 config
->vipcssctlr
= val
;
1371 spin_unlock(&drvdata
->spinlock
);
1374 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop
);
1376 static ssize_t
seq_idx_show(struct device
*dev
,
1377 struct device_attribute
*attr
,
1381 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1382 struct etmv4_config
*config
= &drvdata
->config
;
1384 val
= config
->seq_idx
;
1385 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1388 static ssize_t
seq_idx_store(struct device
*dev
,
1389 struct device_attribute
*attr
,
1390 const char *buf
, size_t size
)
1393 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1394 struct etmv4_config
*config
= &drvdata
->config
;
1396 if (kstrtoul(buf
, 16, &val
))
1398 if (val
>= drvdata
->nrseqstate
- 1)
1402 * Use spinlock to ensure index doesn't change while it gets
1403 * dereferenced multiple times within a spinlock block elsewhere.
1405 spin_lock(&drvdata
->spinlock
);
1406 config
->seq_idx
= val
;
1407 spin_unlock(&drvdata
->spinlock
);
1410 static DEVICE_ATTR_RW(seq_idx
);
1412 static ssize_t
seq_state_show(struct device
*dev
,
1413 struct device_attribute
*attr
,
1417 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1418 struct etmv4_config
*config
= &drvdata
->config
;
1420 val
= config
->seq_state
;
1421 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1424 static ssize_t
seq_state_store(struct device
*dev
,
1425 struct device_attribute
*attr
,
1426 const char *buf
, size_t size
)
1429 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1430 struct etmv4_config
*config
= &drvdata
->config
;
1432 if (kstrtoul(buf
, 16, &val
))
1434 if (val
>= drvdata
->nrseqstate
)
1437 config
->seq_state
= val
;
1440 static DEVICE_ATTR_RW(seq_state
);
1442 static ssize_t
seq_event_show(struct device
*dev
,
1443 struct device_attribute
*attr
,
1448 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1449 struct etmv4_config
*config
= &drvdata
->config
;
1451 spin_lock(&drvdata
->spinlock
);
1452 idx
= config
->seq_idx
;
1453 val
= config
->seq_ctrl
[idx
];
1454 spin_unlock(&drvdata
->spinlock
);
1455 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1458 static ssize_t
seq_event_store(struct device
*dev
,
1459 struct device_attribute
*attr
,
1460 const char *buf
, size_t size
)
1464 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1465 struct etmv4_config
*config
= &drvdata
->config
;
1467 if (kstrtoul(buf
, 16, &val
))
1470 spin_lock(&drvdata
->spinlock
);
1471 idx
= config
->seq_idx
;
1472 /* Seq control has two masks B[15:8] F[7:0] */
1473 config
->seq_ctrl
[idx
] = val
& 0xFFFF;
1474 spin_unlock(&drvdata
->spinlock
);
1477 static DEVICE_ATTR_RW(seq_event
);
1479 static ssize_t
seq_reset_event_show(struct device
*dev
,
1480 struct device_attribute
*attr
,
1484 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1485 struct etmv4_config
*config
= &drvdata
->config
;
1487 val
= config
->seq_rst
;
1488 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1491 static ssize_t
seq_reset_event_store(struct device
*dev
,
1492 struct device_attribute
*attr
,
1493 const char *buf
, size_t size
)
1496 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1497 struct etmv4_config
*config
= &drvdata
->config
;
1499 if (kstrtoul(buf
, 16, &val
))
1501 if (!(drvdata
->nrseqstate
))
1504 config
->seq_rst
= val
& ETMv4_EVENT_MASK
;
1507 static DEVICE_ATTR_RW(seq_reset_event
);
1509 static ssize_t
cntr_idx_show(struct device
*dev
,
1510 struct device_attribute
*attr
,
1514 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1515 struct etmv4_config
*config
= &drvdata
->config
;
1517 val
= config
->cntr_idx
;
1518 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1521 static ssize_t
cntr_idx_store(struct device
*dev
,
1522 struct device_attribute
*attr
,
1523 const char *buf
, size_t size
)
1526 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1527 struct etmv4_config
*config
= &drvdata
->config
;
1529 if (kstrtoul(buf
, 16, &val
))
1531 if (val
>= drvdata
->nr_cntr
)
1535 * Use spinlock to ensure index doesn't change while it gets
1536 * dereferenced multiple times within a spinlock block elsewhere.
1538 spin_lock(&drvdata
->spinlock
);
1539 config
->cntr_idx
= val
;
1540 spin_unlock(&drvdata
->spinlock
);
1543 static DEVICE_ATTR_RW(cntr_idx
);
1545 static ssize_t
cntrldvr_show(struct device
*dev
,
1546 struct device_attribute
*attr
,
1551 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1552 struct etmv4_config
*config
= &drvdata
->config
;
1554 spin_lock(&drvdata
->spinlock
);
1555 idx
= config
->cntr_idx
;
1556 val
= config
->cntrldvr
[idx
];
1557 spin_unlock(&drvdata
->spinlock
);
1558 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1561 static ssize_t
cntrldvr_store(struct device
*dev
,
1562 struct device_attribute
*attr
,
1563 const char *buf
, size_t size
)
1567 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1568 struct etmv4_config
*config
= &drvdata
->config
;
1570 if (kstrtoul(buf
, 16, &val
))
1572 if (val
> ETM_CNTR_MAX_VAL
)
1575 spin_lock(&drvdata
->spinlock
);
1576 idx
= config
->cntr_idx
;
1577 config
->cntrldvr
[idx
] = val
;
1578 spin_unlock(&drvdata
->spinlock
);
1581 static DEVICE_ATTR_RW(cntrldvr
);
1583 static ssize_t
cntr_val_show(struct device
*dev
,
1584 struct device_attribute
*attr
,
1589 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1590 struct etmv4_config
*config
= &drvdata
->config
;
1592 spin_lock(&drvdata
->spinlock
);
1593 idx
= config
->cntr_idx
;
1594 val
= config
->cntr_val
[idx
];
1595 spin_unlock(&drvdata
->spinlock
);
1596 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1599 static ssize_t
cntr_val_store(struct device
*dev
,
1600 struct device_attribute
*attr
,
1601 const char *buf
, size_t size
)
1605 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1606 struct etmv4_config
*config
= &drvdata
->config
;
1608 if (kstrtoul(buf
, 16, &val
))
1610 if (val
> ETM_CNTR_MAX_VAL
)
1613 spin_lock(&drvdata
->spinlock
);
1614 idx
= config
->cntr_idx
;
1615 config
->cntr_val
[idx
] = val
;
1616 spin_unlock(&drvdata
->spinlock
);
1619 static DEVICE_ATTR_RW(cntr_val
);
1621 static ssize_t
cntr_ctrl_show(struct device
*dev
,
1622 struct device_attribute
*attr
,
1627 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1628 struct etmv4_config
*config
= &drvdata
->config
;
1630 spin_lock(&drvdata
->spinlock
);
1631 idx
= config
->cntr_idx
;
1632 val
= config
->cntr_ctrl
[idx
];
1633 spin_unlock(&drvdata
->spinlock
);
1634 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1637 static ssize_t
cntr_ctrl_store(struct device
*dev
,
1638 struct device_attribute
*attr
,
1639 const char *buf
, size_t size
)
1643 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1644 struct etmv4_config
*config
= &drvdata
->config
;
1646 if (kstrtoul(buf
, 16, &val
))
1649 spin_lock(&drvdata
->spinlock
);
1650 idx
= config
->cntr_idx
;
1651 config
->cntr_ctrl
[idx
] = val
;
1652 spin_unlock(&drvdata
->spinlock
);
1655 static DEVICE_ATTR_RW(cntr_ctrl
);
1657 static ssize_t
res_idx_show(struct device
*dev
,
1658 struct device_attribute
*attr
,
1662 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1663 struct etmv4_config
*config
= &drvdata
->config
;
1665 val
= config
->res_idx
;
1666 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1669 static ssize_t
res_idx_store(struct device
*dev
,
1670 struct device_attribute
*attr
,
1671 const char *buf
, size_t size
)
1674 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1675 struct etmv4_config
*config
= &drvdata
->config
;
1677 if (kstrtoul(buf
, 16, &val
))
1680 * Resource selector pair 0 is always implemented and reserved,
1681 * namely an idx with 0 and 1 is illegal.
1683 if ((val
< 2) || (val
>= 2 * drvdata
->nr_resource
))
1687 * Use spinlock to ensure index doesn't change while it gets
1688 * dereferenced multiple times within a spinlock block elsewhere.
1690 spin_lock(&drvdata
->spinlock
);
1691 config
->res_idx
= val
;
1692 spin_unlock(&drvdata
->spinlock
);
1695 static DEVICE_ATTR_RW(res_idx
);
1697 static ssize_t
res_ctrl_show(struct device
*dev
,
1698 struct device_attribute
*attr
,
1703 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1704 struct etmv4_config
*config
= &drvdata
->config
;
1706 spin_lock(&drvdata
->spinlock
);
1707 idx
= config
->res_idx
;
1708 val
= config
->res_ctrl
[idx
];
1709 spin_unlock(&drvdata
->spinlock
);
1710 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1713 static ssize_t
res_ctrl_store(struct device
*dev
,
1714 struct device_attribute
*attr
,
1715 const char *buf
, size_t size
)
1719 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1720 struct etmv4_config
*config
= &drvdata
->config
;
1722 if (kstrtoul(buf
, 16, &val
))
1725 spin_lock(&drvdata
->spinlock
);
1726 idx
= config
->res_idx
;
1727 /* For odd idx pair inversal bit is RES0 */
1729 /* PAIRINV, bit[21] */
1730 val
&= ~TRCRSCTLRn_PAIRINV
;
1731 config
->res_ctrl
[idx
] = val
& (TRCRSCTLRn_PAIRINV
|
1733 TRCRSCTLRn_GROUP_MASK
|
1734 TRCRSCTLRn_SELECT_MASK
);
1735 spin_unlock(&drvdata
->spinlock
);
1738 static DEVICE_ATTR_RW(res_ctrl
);
1740 static ssize_t
sshot_idx_show(struct device
*dev
,
1741 struct device_attribute
*attr
, char *buf
)
1744 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1745 struct etmv4_config
*config
= &drvdata
->config
;
1747 val
= config
->ss_idx
;
1748 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1751 static ssize_t
sshot_idx_store(struct device
*dev
,
1752 struct device_attribute
*attr
,
1753 const char *buf
, size_t size
)
1756 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1757 struct etmv4_config
*config
= &drvdata
->config
;
1759 if (kstrtoul(buf
, 16, &val
))
1761 if (val
>= drvdata
->nr_ss_cmp
)
1764 spin_lock(&drvdata
->spinlock
);
1765 config
->ss_idx
= val
;
1766 spin_unlock(&drvdata
->spinlock
);
1769 static DEVICE_ATTR_RW(sshot_idx
);
1771 static ssize_t
sshot_ctrl_show(struct device
*dev
,
1772 struct device_attribute
*attr
,
1776 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1777 struct etmv4_config
*config
= &drvdata
->config
;
1779 spin_lock(&drvdata
->spinlock
);
1780 val
= config
->ss_ctrl
[config
->ss_idx
];
1781 spin_unlock(&drvdata
->spinlock
);
1782 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1785 static ssize_t
sshot_ctrl_store(struct device
*dev
,
1786 struct device_attribute
*attr
,
1787 const char *buf
, size_t size
)
1791 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1792 struct etmv4_config
*config
= &drvdata
->config
;
1794 if (kstrtoul(buf
, 16, &val
))
1797 spin_lock(&drvdata
->spinlock
);
1798 idx
= config
->ss_idx
;
1799 config
->ss_ctrl
[idx
] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK
, val
);
1800 /* must clear bit 31 in related status register on programming */
1801 config
->ss_status
[idx
] &= ~TRCSSCSRn_STATUS
;
1802 spin_unlock(&drvdata
->spinlock
);
1805 static DEVICE_ATTR_RW(sshot_ctrl
);
1807 static ssize_t
sshot_status_show(struct device
*dev
,
1808 struct device_attribute
*attr
, char *buf
)
1811 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1812 struct etmv4_config
*config
= &drvdata
->config
;
1814 spin_lock(&drvdata
->spinlock
);
1815 val
= config
->ss_status
[config
->ss_idx
];
1816 spin_unlock(&drvdata
->spinlock
);
1817 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1819 static DEVICE_ATTR_RO(sshot_status
);
1821 static ssize_t
sshot_pe_ctrl_show(struct device
*dev
,
1822 struct device_attribute
*attr
,
1826 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1827 struct etmv4_config
*config
= &drvdata
->config
;
1829 spin_lock(&drvdata
->spinlock
);
1830 val
= config
->ss_pe_cmp
[config
->ss_idx
];
1831 spin_unlock(&drvdata
->spinlock
);
1832 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1835 static ssize_t
sshot_pe_ctrl_store(struct device
*dev
,
1836 struct device_attribute
*attr
,
1837 const char *buf
, size_t size
)
1841 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1842 struct etmv4_config
*config
= &drvdata
->config
;
1844 if (kstrtoul(buf
, 16, &val
))
1847 spin_lock(&drvdata
->spinlock
);
1848 idx
= config
->ss_idx
;
1849 config
->ss_pe_cmp
[idx
] = FIELD_PREP(TRCSSPCICRn_PC_MASK
, val
);
1850 /* must clear bit 31 in related status register on programming */
1851 config
->ss_status
[idx
] &= ~TRCSSCSRn_STATUS
;
1852 spin_unlock(&drvdata
->spinlock
);
1855 static DEVICE_ATTR_RW(sshot_pe_ctrl
);
1857 static ssize_t
ctxid_idx_show(struct device
*dev
,
1858 struct device_attribute
*attr
,
1862 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1863 struct etmv4_config
*config
= &drvdata
->config
;
1865 val
= config
->ctxid_idx
;
1866 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1869 static ssize_t
ctxid_idx_store(struct device
*dev
,
1870 struct device_attribute
*attr
,
1871 const char *buf
, size_t size
)
1874 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1875 struct etmv4_config
*config
= &drvdata
->config
;
1877 if (kstrtoul(buf
, 16, &val
))
1879 if (val
>= drvdata
->numcidc
)
1883 * Use spinlock to ensure index doesn't change while it gets
1884 * dereferenced multiple times within a spinlock block elsewhere.
1886 spin_lock(&drvdata
->spinlock
);
1887 config
->ctxid_idx
= val
;
1888 spin_unlock(&drvdata
->spinlock
);
1891 static DEVICE_ATTR_RW(ctxid_idx
);
1893 static ssize_t
ctxid_pid_show(struct device
*dev
,
1894 struct device_attribute
*attr
,
1899 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1900 struct etmv4_config
*config
= &drvdata
->config
;
1903 * Don't use contextID tracing if coming from a PID namespace. See
1904 * comment in ctxid_pid_store().
1906 if (task_active_pid_ns(current
) != &init_pid_ns
)
1909 spin_lock(&drvdata
->spinlock
);
1910 idx
= config
->ctxid_idx
;
1911 val
= (unsigned long)config
->ctxid_pid
[idx
];
1912 spin_unlock(&drvdata
->spinlock
);
1913 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1916 static ssize_t
ctxid_pid_store(struct device
*dev
,
1917 struct device_attribute
*attr
,
1918 const char *buf
, size_t size
)
1922 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1923 struct etmv4_config
*config
= &drvdata
->config
;
1926 * When contextID tracing is enabled the tracers will insert the
1927 * value found in the contextID register in the trace stream. But if
1928 * a process is in a namespace the PID of that process as seen from the
1929 * namespace won't be what the kernel sees, something that makes the
1930 * feature confusing and can potentially leak kernel only information.
1931 * As such refuse to use the feature if @current is not in the initial
1934 if (task_active_pid_ns(current
) != &init_pid_ns
)
1938 * only implemented when ctxid tracing is enabled, i.e. at least one
1939 * ctxid comparator is implemented and ctxid is greater than 0 bits
1942 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1944 if (kstrtoul(buf
, 16, &pid
))
1947 spin_lock(&drvdata
->spinlock
);
1948 idx
= config
->ctxid_idx
;
1949 config
->ctxid_pid
[idx
] = (u64
)pid
;
1950 spin_unlock(&drvdata
->spinlock
);
1953 static DEVICE_ATTR_RW(ctxid_pid
);
1955 static ssize_t
ctxid_masks_show(struct device
*dev
,
1956 struct device_attribute
*attr
,
1959 unsigned long val1
, val2
;
1960 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1961 struct etmv4_config
*config
= &drvdata
->config
;
1964 * Don't use contextID tracing if coming from a PID namespace. See
1965 * comment in ctxid_pid_store().
1967 if (task_active_pid_ns(current
) != &init_pid_ns
)
1970 spin_lock(&drvdata
->spinlock
);
1971 val1
= config
->ctxid_mask0
;
1972 val2
= config
->ctxid_mask1
;
1973 spin_unlock(&drvdata
->spinlock
);
1974 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1977 static ssize_t
ctxid_masks_store(struct device
*dev
,
1978 struct device_attribute
*attr
,
1979 const char *buf
, size_t size
)
1982 unsigned long val1
, val2
, mask
;
1983 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1984 struct etmv4_config
*config
= &drvdata
->config
;
1988 * Don't use contextID tracing if coming from a PID namespace. See
1989 * comment in ctxid_pid_store().
1991 if (task_active_pid_ns(current
) != &init_pid_ns
)
1995 * only implemented when ctxid tracing is enabled, i.e. at least one
1996 * ctxid comparator is implemented and ctxid is greater than 0 bits
1999 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
2001 /* one mask if <= 4 comparators, two for up to 8 */
2002 nr_inputs
= sscanf(buf
, "%lx %lx", &val1
, &val2
);
2003 if ((drvdata
->numcidc
> 4) && (nr_inputs
!= 2))
2006 spin_lock(&drvdata
->spinlock
);
2008 * each byte[0..3] controls mask value applied to ctxid
2011 switch (drvdata
->numcidc
) {
2013 /* COMP0, bits[7:0] */
2014 config
->ctxid_mask0
= val1
& 0xFF;
2017 /* COMP1, bits[15:8] */
2018 config
->ctxid_mask0
= val1
& 0xFFFF;
2021 /* COMP2, bits[23:16] */
2022 config
->ctxid_mask0
= val1
& 0xFFFFFF;
2025 /* COMP3, bits[31:24] */
2026 config
->ctxid_mask0
= val1
;
2029 /* COMP4, bits[7:0] */
2030 config
->ctxid_mask0
= val1
;
2031 config
->ctxid_mask1
= val2
& 0xFF;
2034 /* COMP5, bits[15:8] */
2035 config
->ctxid_mask0
= val1
;
2036 config
->ctxid_mask1
= val2
& 0xFFFF;
2039 /* COMP6, bits[23:16] */
2040 config
->ctxid_mask0
= val1
;
2041 config
->ctxid_mask1
= val2
& 0xFFFFFF;
2044 /* COMP7, bits[31:24] */
2045 config
->ctxid_mask0
= val1
;
2046 config
->ctxid_mask1
= val2
;
2052 * If software sets a mask bit to 1, it must program relevant byte
2053 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2054 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2055 * of ctxid comparator0 value (corresponding to byte 0) register.
2057 mask
= config
->ctxid_mask0
;
2058 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
2059 /* mask value of corresponding ctxid comparator */
2060 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2062 * each bit corresponds to a byte of respective ctxid comparator
2065 for (j
= 0; j
< 8; j
++) {
2067 config
->ctxid_pid
[i
] &= ~(0xFFUL
<< (j
* 8));
2070 /* Select the next ctxid comparator mask value */
2072 /* ctxid comparators[4-7] */
2073 mask
= config
->ctxid_mask1
;
2078 spin_unlock(&drvdata
->spinlock
);
2081 static DEVICE_ATTR_RW(ctxid_masks
);
2083 static ssize_t
vmid_idx_show(struct device
*dev
,
2084 struct device_attribute
*attr
,
2088 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2089 struct etmv4_config
*config
= &drvdata
->config
;
2091 val
= config
->vmid_idx
;
2092 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2095 static ssize_t
vmid_idx_store(struct device
*dev
,
2096 struct device_attribute
*attr
,
2097 const char *buf
, size_t size
)
2100 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2101 struct etmv4_config
*config
= &drvdata
->config
;
2103 if (kstrtoul(buf
, 16, &val
))
2105 if (val
>= drvdata
->numvmidc
)
2109 * Use spinlock to ensure index doesn't change while it gets
2110 * dereferenced multiple times within a spinlock block elsewhere.
2112 spin_lock(&drvdata
->spinlock
);
2113 config
->vmid_idx
= val
;
2114 spin_unlock(&drvdata
->spinlock
);
2117 static DEVICE_ATTR_RW(vmid_idx
);
2119 static ssize_t
vmid_val_show(struct device
*dev
,
2120 struct device_attribute
*attr
,
2124 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2125 struct etmv4_config
*config
= &drvdata
->config
;
2128 * Don't use virtual contextID tracing if coming from a PID namespace.
2129 * See comment in ctxid_pid_store().
2131 if (!task_is_in_init_pid_ns(current
))
2134 spin_lock(&drvdata
->spinlock
);
2135 val
= (unsigned long)config
->vmid_val
[config
->vmid_idx
];
2136 spin_unlock(&drvdata
->spinlock
);
2137 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2140 static ssize_t
vmid_val_store(struct device
*dev
,
2141 struct device_attribute
*attr
,
2142 const char *buf
, size_t size
)
2145 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2146 struct etmv4_config
*config
= &drvdata
->config
;
2149 * Don't use virtual contextID tracing if coming from a PID namespace.
2150 * See comment in ctxid_pid_store().
2152 if (!task_is_in_init_pid_ns(current
))
2156 * only implemented when vmid tracing is enabled, i.e. at least one
2157 * vmid comparator is implemented and at least 8 bit vmid size
2159 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2161 if (kstrtoul(buf
, 16, &val
))
2164 spin_lock(&drvdata
->spinlock
);
2165 config
->vmid_val
[config
->vmid_idx
] = (u64
)val
;
2166 spin_unlock(&drvdata
->spinlock
);
2169 static DEVICE_ATTR_RW(vmid_val
);
2171 static ssize_t
vmid_masks_show(struct device
*dev
,
2172 struct device_attribute
*attr
, char *buf
)
2174 unsigned long val1
, val2
;
2175 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2176 struct etmv4_config
*config
= &drvdata
->config
;
2179 * Don't use virtual contextID tracing if coming from a PID namespace.
2180 * See comment in ctxid_pid_store().
2182 if (!task_is_in_init_pid_ns(current
))
2185 spin_lock(&drvdata
->spinlock
);
2186 val1
= config
->vmid_mask0
;
2187 val2
= config
->vmid_mask1
;
2188 spin_unlock(&drvdata
->spinlock
);
2189 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
2192 static ssize_t
vmid_masks_store(struct device
*dev
,
2193 struct device_attribute
*attr
,
2194 const char *buf
, size_t size
)
2197 unsigned long val1
, val2
, mask
;
2198 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2199 struct etmv4_config
*config
= &drvdata
->config
;
2203 * Don't use virtual contextID tracing if coming from a PID namespace.
2204 * See comment in ctxid_pid_store().
2206 if (!task_is_in_init_pid_ns(current
))
2210 * only implemented when vmid tracing is enabled, i.e. at least one
2211 * vmid comparator is implemented and at least 8 bit vmid size
2213 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2215 /* one mask if <= 4 comparators, two for up to 8 */
2216 nr_inputs
= sscanf(buf
, "%lx %lx", &val1
, &val2
);
2217 if ((drvdata
->numvmidc
> 4) && (nr_inputs
!= 2))
2220 spin_lock(&drvdata
->spinlock
);
2223 * each byte[0..3] controls mask value applied to vmid
2226 switch (drvdata
->numvmidc
) {
2228 /* COMP0, bits[7:0] */
2229 config
->vmid_mask0
= val1
& 0xFF;
2232 /* COMP1, bits[15:8] */
2233 config
->vmid_mask0
= val1
& 0xFFFF;
2236 /* COMP2, bits[23:16] */
2237 config
->vmid_mask0
= val1
& 0xFFFFFF;
2240 /* COMP3, bits[31:24] */
2241 config
->vmid_mask0
= val1
;
2244 /* COMP4, bits[7:0] */
2245 config
->vmid_mask0
= val1
;
2246 config
->vmid_mask1
= val2
& 0xFF;
2249 /* COMP5, bits[15:8] */
2250 config
->vmid_mask0
= val1
;
2251 config
->vmid_mask1
= val2
& 0xFFFF;
2254 /* COMP6, bits[23:16] */
2255 config
->vmid_mask0
= val1
;
2256 config
->vmid_mask1
= val2
& 0xFFFFFF;
2259 /* COMP7, bits[31:24] */
2260 config
->vmid_mask0
= val1
;
2261 config
->vmid_mask1
= val2
;
2268 * If software sets a mask bit to 1, it must program relevant byte
2269 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2270 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2271 * of vmid comparator0 value (corresponding to byte 0) register.
2273 mask
= config
->vmid_mask0
;
2274 for (i
= 0; i
< drvdata
->numvmidc
; i
++) {
2275 /* mask value of corresponding vmid comparator */
2276 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2278 * each bit corresponds to a byte of respective vmid comparator
2281 for (j
= 0; j
< 8; j
++) {
2283 config
->vmid_val
[i
] &= ~(0xFFUL
<< (j
* 8));
2286 /* Select the next vmid comparator mask value */
2288 /* vmid comparators[4-7] */
2289 mask
= config
->vmid_mask1
;
2293 spin_unlock(&drvdata
->spinlock
);
2296 static DEVICE_ATTR_RW(vmid_masks
);
2298 static ssize_t
cpu_show(struct device
*dev
,
2299 struct device_attribute
*attr
, char *buf
)
2302 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2305 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
2308 static DEVICE_ATTR_RO(cpu
);
2310 static ssize_t
ts_source_show(struct device
*dev
,
2311 struct device_attribute
*attr
,
2315 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2317 if (!drvdata
->trfcr
) {
2322 switch (drvdata
->trfcr
& TRFCR_ELx_TS_MASK
) {
2323 case TRFCR_ELx_TS_VIRTUAL
:
2324 case TRFCR_ELx_TS_GUEST_PHYSICAL
:
2325 case TRFCR_ELx_TS_PHYSICAL
:
2326 val
= FIELD_GET(TRFCR_ELx_TS_MASK
, drvdata
->trfcr
);
2334 return sysfs_emit(buf
, "%d\n", val
);
2336 static DEVICE_ATTR_RO(ts_source
);
2338 static struct attribute
*coresight_etmv4_attrs
[] = {
2339 &dev_attr_nr_pe_cmp
.attr
,
2340 &dev_attr_nr_addr_cmp
.attr
,
2341 &dev_attr_nr_cntr
.attr
,
2342 &dev_attr_nr_ext_inp
.attr
,
2343 &dev_attr_numcidc
.attr
,
2344 &dev_attr_numvmidc
.attr
,
2345 &dev_attr_nrseqstate
.attr
,
2346 &dev_attr_nr_resource
.attr
,
2347 &dev_attr_nr_ss_cmp
.attr
,
2348 &dev_attr_reset
.attr
,
2349 &dev_attr_mode
.attr
,
2351 &dev_attr_event
.attr
,
2352 &dev_attr_event_instren
.attr
,
2353 &dev_attr_event_ts
.attr
,
2354 &dev_attr_syncfreq
.attr
,
2355 &dev_attr_cyc_threshold
.attr
,
2356 &dev_attr_bb_ctrl
.attr
,
2357 &dev_attr_event_vinst
.attr
,
2358 &dev_attr_s_exlevel_vinst
.attr
,
2359 &dev_attr_ns_exlevel_vinst
.attr
,
2360 &dev_attr_addr_idx
.attr
,
2361 &dev_attr_addr_instdatatype
.attr
,
2362 &dev_attr_addr_single
.attr
,
2363 &dev_attr_addr_range
.attr
,
2364 &dev_attr_addr_start
.attr
,
2365 &dev_attr_addr_stop
.attr
,
2366 &dev_attr_addr_ctxtype
.attr
,
2367 &dev_attr_addr_context
.attr
,
2368 &dev_attr_addr_exlevel_s_ns
.attr
,
2369 &dev_attr_addr_cmp_view
.attr
,
2370 &dev_attr_vinst_pe_cmp_start_stop
.attr
,
2371 &dev_attr_sshot_idx
.attr
,
2372 &dev_attr_sshot_ctrl
.attr
,
2373 &dev_attr_sshot_pe_ctrl
.attr
,
2374 &dev_attr_sshot_status
.attr
,
2375 &dev_attr_seq_idx
.attr
,
2376 &dev_attr_seq_state
.attr
,
2377 &dev_attr_seq_event
.attr
,
2378 &dev_attr_seq_reset_event
.attr
,
2379 &dev_attr_cntr_idx
.attr
,
2380 &dev_attr_cntrldvr
.attr
,
2381 &dev_attr_cntr_val
.attr
,
2382 &dev_attr_cntr_ctrl
.attr
,
2383 &dev_attr_res_idx
.attr
,
2384 &dev_attr_res_ctrl
.attr
,
2385 &dev_attr_ctxid_idx
.attr
,
2386 &dev_attr_ctxid_pid
.attr
,
2387 &dev_attr_ctxid_masks
.attr
,
2388 &dev_attr_vmid_idx
.attr
,
2389 &dev_attr_vmid_val
.attr
,
2390 &dev_attr_vmid_masks
.attr
,
2392 &dev_attr_ts_source
.attr
,
2397 * Trace ID allocated dynamically on enable - but also allocate on read
2398 * in case sysfs or perf read before enable to ensure consistent metadata
2399 * information for trace decode
2401 static ssize_t
trctraceid_show(struct device
*dev
,
2402 struct device_attribute
*attr
,
2406 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2408 trace_id
= etm4_read_alloc_trace_id(drvdata
);
2412 return sysfs_emit(buf
, "0x%x\n", trace_id
);
2416 struct coresight_device
*csdev
;
2421 static void do_smp_cross_read(void *data
)
2423 struct etmv4_reg
*reg
= data
;
2425 reg
->data
= etm4x_relaxed_read32(®
->csdev
->access
, reg
->offset
);
2428 static u32
etmv4_cross_read(const struct etmv4_drvdata
*drvdata
, u32 offset
)
2430 struct etmv4_reg reg
;
2432 reg
.offset
= offset
;
2433 reg
.csdev
= drvdata
->csdev
;
2436 * smp cross call ensures the CPU will be powered up before
2437 * accessing the ETMv4 trace core registers
2439 smp_call_function_single(drvdata
->cpu
, do_smp_cross_read
, ®
, 1);
2443 static inline u32
coresight_etm4x_attr_to_offset(struct device_attribute
*attr
)
2445 struct dev_ext_attribute
*eattr
;
2447 eattr
= container_of(attr
, struct dev_ext_attribute
, attr
);
2448 return (u32
)(unsigned long)eattr
->var
;
2451 static ssize_t
coresight_etm4x_reg_show(struct device
*dev
,
2452 struct device_attribute
*d_attr
,
2456 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2458 offset
= coresight_etm4x_attr_to_offset(d_attr
);
2460 pm_runtime_get_sync(dev
->parent
);
2461 val
= etmv4_cross_read(drvdata
, offset
);
2462 pm_runtime_put_sync(dev
->parent
);
2464 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", val
);
2468 etm4x_register_implemented(struct etmv4_drvdata
*drvdata
, u32 offset
)
2471 ETM_COMMON_SYSREG_LIST_CASES
2473 * Common registers to ETE & ETM4x accessible via system
2474 * instructions are always implemented.
2478 ETM4x_ONLY_SYSREG_LIST_CASES
2480 * We only support etm4x and ete. So if the device is not
2481 * ETE, it must be ETMv4x.
2483 return !etm4x_is_ete(drvdata
);
2485 ETM4x_MMAP_LIST_CASES
2487 * Registers accessible only via memory-mapped registers
2488 * must not be accessed via system instructions.
2489 * We cannot access the drvdata->csdev here, as this
2490 * function is called during the device creation, via
2491 * coresight_register() and the csdev is not initialized
2492 * until that is done. So rely on the drvdata->base to
2493 * detect if we have a memory mapped access.
2494 * Also ETE doesn't implement memory mapped access, thus
2495 * it is sufficient to check that we are using mmio.
2497 return !!drvdata
->base
;
2499 ETE_ONLY_SYSREG_LIST_CASES
2500 return etm4x_is_ete(drvdata
);
2507 * Hide the ETM4x registers that may not be available on the
2509 * There are certain management registers unavailable via system
2510 * instructions. Make those sysfs attributes hidden on such
2514 coresight_etm4x_attr_reg_implemented(struct kobject
*kobj
,
2515 struct attribute
*attr
, int unused
)
2517 struct device
*dev
= kobj_to_dev(kobj
);
2518 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2519 struct device_attribute
*d_attr
;
2522 d_attr
= container_of(attr
, struct device_attribute
, attr
);
2523 offset
= coresight_etm4x_attr_to_offset(d_attr
);
2525 if (etm4x_register_implemented(drvdata
, offset
))
2531 * Macro to set an RO ext attribute with offset and show function.
2532 * Offset is used in mgmt group to ensure only correct registers for
2533 * the ETM / ETE variant are visible.
2535 #define coresight_etm4x_reg_showfn(name, offset, showfn) ( \
2536 &((struct dev_ext_attribute[]) { \
2538 __ATTR(name, 0444, showfn, NULL), \
2539 (void *)(unsigned long)offset \
2544 /* macro using the default coresight_etm4x_reg_show function */
2545 #define coresight_etm4x_reg(name, offset) \
2546 coresight_etm4x_reg_showfn(name, offset, coresight_etm4x_reg_show)
2548 static struct attribute
*coresight_etmv4_mgmt_attrs
[] = {
2549 coresight_etm4x_reg(trcpdcr
, TRCPDCR
),
2550 coresight_etm4x_reg(trcpdsr
, TRCPDSR
),
2551 coresight_etm4x_reg(trclsr
, TRCLSR
),
2552 coresight_etm4x_reg(trcauthstatus
, TRCAUTHSTATUS
),
2553 coresight_etm4x_reg(trcdevid
, TRCDEVID
),
2554 coresight_etm4x_reg(trcdevtype
, TRCDEVTYPE
),
2555 coresight_etm4x_reg(trcpidr0
, TRCPIDR0
),
2556 coresight_etm4x_reg(trcpidr1
, TRCPIDR1
),
2557 coresight_etm4x_reg(trcpidr2
, TRCPIDR2
),
2558 coresight_etm4x_reg(trcpidr3
, TRCPIDR3
),
2559 coresight_etm4x_reg(trcoslsr
, TRCOSLSR
),
2560 coresight_etm4x_reg(trcconfig
, TRCCONFIGR
),
2561 coresight_etm4x_reg_showfn(trctraceid
, TRCTRACEIDR
, trctraceid_show
),
2562 coresight_etm4x_reg(trcdevarch
, TRCDEVARCH
),
2566 static struct attribute
*coresight_etmv4_trcidr_attrs
[] = {
2567 coresight_etm4x_reg(trcidr0
, TRCIDR0
),
2568 coresight_etm4x_reg(trcidr1
, TRCIDR1
),
2569 coresight_etm4x_reg(trcidr2
, TRCIDR2
),
2570 coresight_etm4x_reg(trcidr3
, TRCIDR3
),
2571 coresight_etm4x_reg(trcidr4
, TRCIDR4
),
2572 coresight_etm4x_reg(trcidr5
, TRCIDR5
),
2573 /* trcidr[6,7] are reserved */
2574 coresight_etm4x_reg(trcidr8
, TRCIDR8
),
2575 coresight_etm4x_reg(trcidr9
, TRCIDR9
),
2576 coresight_etm4x_reg(trcidr10
, TRCIDR10
),
2577 coresight_etm4x_reg(trcidr11
, TRCIDR11
),
2578 coresight_etm4x_reg(trcidr12
, TRCIDR12
),
2579 coresight_etm4x_reg(trcidr13
, TRCIDR13
),
2583 static const struct attribute_group coresight_etmv4_group
= {
2584 .attrs
= coresight_etmv4_attrs
,
2587 static const struct attribute_group coresight_etmv4_mgmt_group
= {
2588 .is_visible
= coresight_etm4x_attr_reg_implemented
,
2589 .attrs
= coresight_etmv4_mgmt_attrs
,
2593 static const struct attribute_group coresight_etmv4_trcidr_group
= {
2594 .attrs
= coresight_etmv4_trcidr_attrs
,
2598 const struct attribute_group
*coresight_etmv4_groups
[] = {
2599 &coresight_etmv4_group
,
2600 &coresight_etmv4_mgmt_group
,
2601 &coresight_etmv4_trcidr_group
,