1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
13 static int etm4_set_mode_exclude(struct etmv4_drvdata
*drvdata
, bool exclude
)
16 struct etmv4_config
*config
= &drvdata
->config
;
18 idx
= config
->addr_idx
;
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
24 if (BMVAL(config
->addr_acc
[idx
], 0, 1) == ETM_INSTR_ADDR
) {
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
33 if (config
->addr_type
[idx
] != ETM_ADDR_TYPE_RANGE
||
34 config
->addr_type
[idx
+ 1] != ETM_ADDR_TYPE_RANGE
)
37 if (exclude
== true) {
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
42 config
->viiectlr
|= BIT(idx
/ 2 + 16);
43 config
->viiectlr
&= ~BIT(idx
/ 2);
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
49 config
->viiectlr
|= BIT(idx
/ 2);
50 config
->viiectlr
&= ~BIT(idx
/ 2 + 16);
56 static ssize_t
nr_pe_cmp_show(struct device
*dev
,
57 struct device_attribute
*attr
,
61 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
63 val
= drvdata
->nr_pe_cmp
;
64 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
66 static DEVICE_ATTR_RO(nr_pe_cmp
);
68 static ssize_t
nr_addr_cmp_show(struct device
*dev
,
69 struct device_attribute
*attr
,
73 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
75 val
= drvdata
->nr_addr_cmp
;
76 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
78 static DEVICE_ATTR_RO(nr_addr_cmp
);
80 static ssize_t
nr_cntr_show(struct device
*dev
,
81 struct device_attribute
*attr
,
85 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
87 val
= drvdata
->nr_cntr
;
88 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
90 static DEVICE_ATTR_RO(nr_cntr
);
92 static ssize_t
nr_ext_inp_show(struct device
*dev
,
93 struct device_attribute
*attr
,
97 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
99 val
= drvdata
->nr_ext_inp
;
100 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
102 static DEVICE_ATTR_RO(nr_ext_inp
);
104 static ssize_t
numcidc_show(struct device
*dev
,
105 struct device_attribute
*attr
,
109 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
111 val
= drvdata
->numcidc
;
112 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
114 static DEVICE_ATTR_RO(numcidc
);
116 static ssize_t
numvmidc_show(struct device
*dev
,
117 struct device_attribute
*attr
,
121 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
123 val
= drvdata
->numvmidc
;
124 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
126 static DEVICE_ATTR_RO(numvmidc
);
128 static ssize_t
nrseqstate_show(struct device
*dev
,
129 struct device_attribute
*attr
,
133 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
135 val
= drvdata
->nrseqstate
;
136 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
138 static DEVICE_ATTR_RO(nrseqstate
);
140 static ssize_t
nr_resource_show(struct device
*dev
,
141 struct device_attribute
*attr
,
145 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
147 val
= drvdata
->nr_resource
;
148 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
150 static DEVICE_ATTR_RO(nr_resource
);
152 static ssize_t
nr_ss_cmp_show(struct device
*dev
,
153 struct device_attribute
*attr
,
157 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
159 val
= drvdata
->nr_ss_cmp
;
160 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
162 static DEVICE_ATTR_RO(nr_ss_cmp
);
164 static ssize_t
reset_store(struct device
*dev
,
165 struct device_attribute
*attr
,
166 const char *buf
, size_t size
)
170 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
171 struct etmv4_config
*config
= &drvdata
->config
;
173 if (kstrtoul(buf
, 16, &val
))
176 spin_lock(&drvdata
->spinlock
);
180 /* Disable data tracing: do not trace load and store data transfers */
181 config
->mode
&= ~(ETM_MODE_LOAD
| ETM_MODE_STORE
);
182 config
->cfg
&= ~(BIT(1) | BIT(2));
184 /* Disable data value and data address tracing */
185 config
->mode
&= ~(ETM_MODE_DATA_TRACE_ADDR
|
186 ETM_MODE_DATA_TRACE_VAL
);
187 config
->cfg
&= ~(BIT(16) | BIT(17));
189 /* Disable all events tracing */
190 config
->eventctrl0
= 0x0;
191 config
->eventctrl1
= 0x0;
193 /* Disable timestamp event */
194 config
->ts_ctrl
= 0x0;
196 /* Disable stalling */
197 config
->stall_ctrl
= 0x0;
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata
->syncpr
== false)
201 config
->syncfreq
= 0x8;
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
208 config
->vinst_ctrl
|= BIT(0);
209 if (drvdata
->nr_addr_cmp
== true) {
210 config
->mode
|= ETM_MODE_VIEWINST_STARTSTOP
;
211 /* SSSTATUS, bit[9] */
212 config
->vinst_ctrl
|= BIT(9);
215 /* No address range filtering for ViewInst */
216 config
->viiectlr
= 0x0;
218 /* No start-stop filtering for ViewInst */
219 config
->vissctlr
= 0x0;
220 config
->vipcssctlr
= 0x0;
222 /* Disable seq events */
223 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
224 config
->seq_ctrl
[i
] = 0x0;
225 config
->seq_rst
= 0x0;
226 config
->seq_state
= 0x0;
228 /* Disable external input events */
229 config
->ext_inp
= 0x0;
231 config
->cntr_idx
= 0x0;
232 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
233 config
->cntrldvr
[i
] = 0x0;
234 config
->cntr_ctrl
[i
] = 0x0;
235 config
->cntr_val
[i
] = 0x0;
238 config
->res_idx
= 0x0;
239 for (i
= 0; i
< drvdata
->nr_resource
; i
++)
240 config
->res_ctrl
[i
] = 0x0;
242 config
->ss_idx
= 0x0;
243 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
244 config
->ss_ctrl
[i
] = 0x0;
245 config
->ss_pe_cmp
[i
] = 0x0;
248 config
->addr_idx
= 0x0;
249 for (i
= 0; i
< drvdata
->nr_addr_cmp
* 2; i
++) {
250 config
->addr_val
[i
] = 0x0;
251 config
->addr_acc
[i
] = 0x0;
252 config
->addr_type
[i
] = ETM_ADDR_TYPE_NONE
;
255 config
->ctxid_idx
= 0x0;
256 for (i
= 0; i
< drvdata
->numcidc
; i
++)
257 config
->ctxid_pid
[i
] = 0x0;
259 config
->ctxid_mask0
= 0x0;
260 config
->ctxid_mask1
= 0x0;
262 config
->vmid_idx
= 0x0;
263 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
264 config
->vmid_val
[i
] = 0x0;
265 config
->vmid_mask0
= 0x0;
266 config
->vmid_mask1
= 0x0;
268 drvdata
->trcid
= drvdata
->cpu
+ 1;
270 spin_unlock(&drvdata
->spinlock
);
274 static DEVICE_ATTR_WO(reset
);
276 static ssize_t
mode_show(struct device
*dev
,
277 struct device_attribute
*attr
,
281 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
282 struct etmv4_config
*config
= &drvdata
->config
;
285 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
288 static ssize_t
mode_store(struct device
*dev
,
289 struct device_attribute
*attr
,
290 const char *buf
, size_t size
)
292 unsigned long val
, mode
;
293 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
294 struct etmv4_config
*config
= &drvdata
->config
;
296 if (kstrtoul(buf
, 16, &val
))
299 spin_lock(&drvdata
->spinlock
);
300 config
->mode
= val
& ETMv4_MODE_ALL
;
302 if (drvdata
->instrp0
== true) {
303 /* start by clearing instruction P0 field */
304 config
->cfg
&= ~(BIT(1) | BIT(2));
305 if (config
->mode
& ETM_MODE_LOAD
)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config
->cfg
|= BIT(1);
308 if (config
->mode
& ETM_MODE_STORE
)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config
->cfg
|= BIT(2);
311 if (config
->mode
& ETM_MODE_LOAD_STORE
)
313 * 0b11 Trace load and store instructions
316 config
->cfg
|= BIT(1) | BIT(2);
319 /* bit[3], Branch broadcast mode */
320 if ((config
->mode
& ETM_MODE_BB
) && (drvdata
->trcbb
== true))
321 config
->cfg
|= BIT(3);
323 config
->cfg
&= ~BIT(3);
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config
->mode
& ETMv4_MODE_CYCACC
) &&
327 (drvdata
->trccci
== true))
328 config
->cfg
|= BIT(4);
330 config
->cfg
&= ~BIT(4);
332 /* bit[6], Context ID tracing bit */
333 if ((config
->mode
& ETMv4_MODE_CTXID
) && (drvdata
->ctxid_size
))
334 config
->cfg
|= BIT(6);
336 config
->cfg
&= ~BIT(6);
338 if ((config
->mode
& ETM_MODE_VMID
) && (drvdata
->vmid_size
))
339 config
->cfg
|= BIT(7);
341 config
->cfg
&= ~BIT(7);
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode
= ETM_MODE_COND(config
->mode
);
345 if (drvdata
->trccond
== true) {
346 config
->cfg
&= ~(BIT(8) | BIT(9) | BIT(10));
347 config
->cfg
|= mode
<< 8;
350 /* bit[11], Global timestamp tracing bit */
351 if ((config
->mode
& ETMv4_MODE_TIMESTAMP
) && (drvdata
->ts_size
))
352 config
->cfg
|= BIT(11);
354 config
->cfg
&= ~BIT(11);
356 /* bit[12], Return stack enable bit */
357 if ((config
->mode
& ETM_MODE_RETURNSTACK
) &&
358 (drvdata
->retstack
== true))
359 config
->cfg
|= BIT(12);
361 config
->cfg
&= ~BIT(12);
363 /* bits[14:13], Q element enable field */
364 mode
= ETM_MODE_QELEM(config
->mode
);
365 /* start by clearing QE bits */
366 config
->cfg
&= ~(BIT(13) | BIT(14));
367 /* if supported, Q elements with instruction counts are enabled */
368 if ((mode
& BIT(0)) && (drvdata
->q_support
& BIT(0)))
369 config
->cfg
|= BIT(13);
371 * if supported, Q elements with and without instruction
374 if ((mode
& BIT(1)) && (drvdata
->q_support
& BIT(1)))
375 config
->cfg
|= BIT(14);
377 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
378 if ((config
->mode
& ETM_MODE_ATB_TRIGGER
) &&
379 (drvdata
->atbtrig
== true))
380 config
->eventctrl1
|= BIT(11);
382 config
->eventctrl1
&= ~BIT(11);
384 /* bit[12], Low-power state behavior override bit */
385 if ((config
->mode
& ETM_MODE_LPOVERRIDE
) &&
386 (drvdata
->lpoverride
== true))
387 config
->eventctrl1
|= BIT(12);
389 config
->eventctrl1
&= ~BIT(12);
391 /* bit[8], Instruction stall bit */
392 if (config
->mode
& ETM_MODE_ISTALL_EN
)
393 config
->stall_ctrl
|= BIT(8);
395 config
->stall_ctrl
&= ~BIT(8);
397 /* bit[10], Prioritize instruction trace bit */
398 if (config
->mode
& ETM_MODE_INSTPRIO
)
399 config
->stall_ctrl
|= BIT(10);
401 config
->stall_ctrl
&= ~BIT(10);
403 /* bit[13], Trace overflow prevention bit */
404 if ((config
->mode
& ETM_MODE_NOOVERFLOW
) &&
405 (drvdata
->nooverflow
== true))
406 config
->stall_ctrl
|= BIT(13);
408 config
->stall_ctrl
&= ~BIT(13);
410 /* bit[9] Start/stop logic control bit */
411 if (config
->mode
& ETM_MODE_VIEWINST_STARTSTOP
)
412 config
->vinst_ctrl
|= BIT(9);
414 config
->vinst_ctrl
&= ~BIT(9);
416 /* bit[10], Whether a trace unit must trace a Reset exception */
417 if (config
->mode
& ETM_MODE_TRACE_RESET
)
418 config
->vinst_ctrl
|= BIT(10);
420 config
->vinst_ctrl
&= ~BIT(10);
422 /* bit[11], Whether a trace unit must trace a system error exception */
423 if ((config
->mode
& ETM_MODE_TRACE_ERR
) &&
424 (drvdata
->trc_error
== true))
425 config
->vinst_ctrl
|= BIT(11);
427 config
->vinst_ctrl
&= ~BIT(11);
429 if (config
->mode
& (ETM_MODE_EXCL_KERN
| ETM_MODE_EXCL_USER
))
430 etm4_config_trace_mode(config
);
432 spin_unlock(&drvdata
->spinlock
);
436 static DEVICE_ATTR_RW(mode
);
438 static ssize_t
pe_show(struct device
*dev
,
439 struct device_attribute
*attr
,
443 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
444 struct etmv4_config
*config
= &drvdata
->config
;
446 val
= config
->pe_sel
;
447 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
450 static ssize_t
pe_store(struct device
*dev
,
451 struct device_attribute
*attr
,
452 const char *buf
, size_t size
)
455 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
456 struct etmv4_config
*config
= &drvdata
->config
;
458 if (kstrtoul(buf
, 16, &val
))
461 spin_lock(&drvdata
->spinlock
);
462 if (val
> drvdata
->nr_pe
) {
463 spin_unlock(&drvdata
->spinlock
);
467 config
->pe_sel
= val
;
468 spin_unlock(&drvdata
->spinlock
);
471 static DEVICE_ATTR_RW(pe
);
473 static ssize_t
event_show(struct device
*dev
,
474 struct device_attribute
*attr
,
478 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
479 struct etmv4_config
*config
= &drvdata
->config
;
481 val
= config
->eventctrl0
;
482 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
485 static ssize_t
event_store(struct device
*dev
,
486 struct device_attribute
*attr
,
487 const char *buf
, size_t size
)
490 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
491 struct etmv4_config
*config
= &drvdata
->config
;
493 if (kstrtoul(buf
, 16, &val
))
496 spin_lock(&drvdata
->spinlock
);
497 switch (drvdata
->nr_event
) {
499 /* EVENT0, bits[7:0] */
500 config
->eventctrl0
= val
& 0xFF;
503 /* EVENT1, bits[15:8] */
504 config
->eventctrl0
= val
& 0xFFFF;
507 /* EVENT2, bits[23:16] */
508 config
->eventctrl0
= val
& 0xFFFFFF;
511 /* EVENT3, bits[31:24] */
512 config
->eventctrl0
= val
;
517 spin_unlock(&drvdata
->spinlock
);
520 static DEVICE_ATTR_RW(event
);
522 static ssize_t
event_instren_show(struct device
*dev
,
523 struct device_attribute
*attr
,
527 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
528 struct etmv4_config
*config
= &drvdata
->config
;
530 val
= BMVAL(config
->eventctrl1
, 0, 3);
531 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
534 static ssize_t
event_instren_store(struct device
*dev
,
535 struct device_attribute
*attr
,
536 const char *buf
, size_t size
)
539 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
540 struct etmv4_config
*config
= &drvdata
->config
;
542 if (kstrtoul(buf
, 16, &val
))
545 spin_lock(&drvdata
->spinlock
);
546 /* start by clearing all instruction event enable bits */
547 config
->eventctrl1
&= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
548 switch (drvdata
->nr_event
) {
550 /* generate Event element for event 1 */
551 config
->eventctrl1
|= val
& BIT(1);
554 /* generate Event element for event 1 and 2 */
555 config
->eventctrl1
|= val
& (BIT(0) | BIT(1));
558 /* generate Event element for event 1, 2 and 3 */
559 config
->eventctrl1
|= val
& (BIT(0) | BIT(1) | BIT(2));
562 /* generate Event element for all 4 events */
563 config
->eventctrl1
|= val
& 0xF;
568 spin_unlock(&drvdata
->spinlock
);
571 static DEVICE_ATTR_RW(event_instren
);
573 static ssize_t
event_ts_show(struct device
*dev
,
574 struct device_attribute
*attr
,
578 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
579 struct etmv4_config
*config
= &drvdata
->config
;
581 val
= config
->ts_ctrl
;
582 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
585 static ssize_t
event_ts_store(struct device
*dev
,
586 struct device_attribute
*attr
,
587 const char *buf
, size_t size
)
590 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
591 struct etmv4_config
*config
= &drvdata
->config
;
593 if (kstrtoul(buf
, 16, &val
))
595 if (!drvdata
->ts_size
)
598 config
->ts_ctrl
= val
& ETMv4_EVENT_MASK
;
601 static DEVICE_ATTR_RW(event_ts
);
603 static ssize_t
syncfreq_show(struct device
*dev
,
604 struct device_attribute
*attr
,
608 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
609 struct etmv4_config
*config
= &drvdata
->config
;
611 val
= config
->syncfreq
;
612 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
615 static ssize_t
syncfreq_store(struct device
*dev
,
616 struct device_attribute
*attr
,
617 const char *buf
, size_t size
)
620 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
621 struct etmv4_config
*config
= &drvdata
->config
;
623 if (kstrtoul(buf
, 16, &val
))
625 if (drvdata
->syncpr
== true)
628 config
->syncfreq
= val
& ETMv4_SYNC_MASK
;
631 static DEVICE_ATTR_RW(syncfreq
);
633 static ssize_t
cyc_threshold_show(struct device
*dev
,
634 struct device_attribute
*attr
,
638 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
639 struct etmv4_config
*config
= &drvdata
->config
;
641 val
= config
->ccctlr
;
642 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
645 static ssize_t
cyc_threshold_store(struct device
*dev
,
646 struct device_attribute
*attr
,
647 const char *buf
, size_t size
)
650 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
651 struct etmv4_config
*config
= &drvdata
->config
;
653 if (kstrtoul(buf
, 16, &val
))
656 /* mask off max threshold before checking min value */
657 val
&= ETM_CYC_THRESHOLD_MASK
;
658 if (val
< drvdata
->ccitmin
)
661 config
->ccctlr
= val
;
664 static DEVICE_ATTR_RW(cyc_threshold
);
666 static ssize_t
bb_ctrl_show(struct device
*dev
,
667 struct device_attribute
*attr
,
671 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
672 struct etmv4_config
*config
= &drvdata
->config
;
674 val
= config
->bb_ctrl
;
675 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
678 static ssize_t
bb_ctrl_store(struct device
*dev
,
679 struct device_attribute
*attr
,
680 const char *buf
, size_t size
)
683 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
684 struct etmv4_config
*config
= &drvdata
->config
;
686 if (kstrtoul(buf
, 16, &val
))
688 if (drvdata
->trcbb
== false)
690 if (!drvdata
->nr_addr_cmp
)
694 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
695 * individual range comparators. If include then at least 1
696 * range must be selected.
698 if ((val
& BIT(8)) && (BMVAL(val
, 0, 7) == 0))
701 config
->bb_ctrl
= val
& GENMASK(8, 0);
704 static DEVICE_ATTR_RW(bb_ctrl
);
706 static ssize_t
event_vinst_show(struct device
*dev
,
707 struct device_attribute
*attr
,
711 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
712 struct etmv4_config
*config
= &drvdata
->config
;
714 val
= config
->vinst_ctrl
& ETMv4_EVENT_MASK
;
715 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
718 static ssize_t
event_vinst_store(struct device
*dev
,
719 struct device_attribute
*attr
,
720 const char *buf
, size_t size
)
723 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
724 struct etmv4_config
*config
= &drvdata
->config
;
726 if (kstrtoul(buf
, 16, &val
))
729 spin_lock(&drvdata
->spinlock
);
730 val
&= ETMv4_EVENT_MASK
;
731 config
->vinst_ctrl
&= ~ETMv4_EVENT_MASK
;
732 config
->vinst_ctrl
|= val
;
733 spin_unlock(&drvdata
->spinlock
);
736 static DEVICE_ATTR_RW(event_vinst
);
738 static ssize_t
s_exlevel_vinst_show(struct device
*dev
,
739 struct device_attribute
*attr
,
743 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
744 struct etmv4_config
*config
= &drvdata
->config
;
746 val
= (config
->vinst_ctrl
& ETM_EXLEVEL_S_VICTLR_MASK
) >> 16;
747 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
750 static ssize_t
s_exlevel_vinst_store(struct device
*dev
,
751 struct device_attribute
*attr
,
752 const char *buf
, size_t size
)
755 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
756 struct etmv4_config
*config
= &drvdata
->config
;
758 if (kstrtoul(buf
, 16, &val
))
761 spin_lock(&drvdata
->spinlock
);
762 /* clear all EXLEVEL_S bits */
763 config
->vinst_ctrl
&= ~(ETM_EXLEVEL_S_VICTLR_MASK
);
764 /* enable instruction tracing for corresponding exception level */
765 val
&= drvdata
->s_ex_level
;
766 config
->vinst_ctrl
|= (val
<< 16);
767 spin_unlock(&drvdata
->spinlock
);
770 static DEVICE_ATTR_RW(s_exlevel_vinst
);
772 static ssize_t
ns_exlevel_vinst_show(struct device
*dev
,
773 struct device_attribute
*attr
,
777 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
778 struct etmv4_config
*config
= &drvdata
->config
;
780 /* EXLEVEL_NS, bits[23:20] */
781 val
= (config
->vinst_ctrl
& ETM_EXLEVEL_NS_VICTLR_MASK
) >> 20;
782 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
785 static ssize_t
ns_exlevel_vinst_store(struct device
*dev
,
786 struct device_attribute
*attr
,
787 const char *buf
, size_t size
)
790 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
791 struct etmv4_config
*config
= &drvdata
->config
;
793 if (kstrtoul(buf
, 16, &val
))
796 spin_lock(&drvdata
->spinlock
);
797 /* clear EXLEVEL_NS bits */
798 config
->vinst_ctrl
&= ~(ETM_EXLEVEL_NS_VICTLR_MASK
);
799 /* enable instruction tracing for corresponding exception level */
800 val
&= drvdata
->ns_ex_level
;
801 config
->vinst_ctrl
|= (val
<< 20);
802 spin_unlock(&drvdata
->spinlock
);
805 static DEVICE_ATTR_RW(ns_exlevel_vinst
);
807 static ssize_t
addr_idx_show(struct device
*dev
,
808 struct device_attribute
*attr
,
812 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
813 struct etmv4_config
*config
= &drvdata
->config
;
815 val
= config
->addr_idx
;
816 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
819 static ssize_t
addr_idx_store(struct device
*dev
,
820 struct device_attribute
*attr
,
821 const char *buf
, size_t size
)
824 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
825 struct etmv4_config
*config
= &drvdata
->config
;
827 if (kstrtoul(buf
, 16, &val
))
829 if (val
>= drvdata
->nr_addr_cmp
* 2)
833 * Use spinlock to ensure index doesn't change while it gets
834 * dereferenced multiple times within a spinlock block elsewhere.
836 spin_lock(&drvdata
->spinlock
);
837 config
->addr_idx
= val
;
838 spin_unlock(&drvdata
->spinlock
);
841 static DEVICE_ATTR_RW(addr_idx
);
843 static ssize_t
addr_instdatatype_show(struct device
*dev
,
844 struct device_attribute
*attr
,
849 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
850 struct etmv4_config
*config
= &drvdata
->config
;
852 spin_lock(&drvdata
->spinlock
);
853 idx
= config
->addr_idx
;
854 val
= BMVAL(config
->addr_acc
[idx
], 0, 1);
855 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n",
856 val
== ETM_INSTR_ADDR
? "instr" :
857 (val
== ETM_DATA_LOAD_ADDR
? "data_load" :
858 (val
== ETM_DATA_STORE_ADDR
? "data_store" :
859 "data_load_store")));
860 spin_unlock(&drvdata
->spinlock
);
864 static ssize_t
addr_instdatatype_store(struct device
*dev
,
865 struct device_attribute
*attr
,
866 const char *buf
, size_t size
)
870 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
871 struct etmv4_config
*config
= &drvdata
->config
;
873 if (strlen(buf
) >= 20)
875 if (sscanf(buf
, "%s", str
) != 1)
878 spin_lock(&drvdata
->spinlock
);
879 idx
= config
->addr_idx
;
880 if (!strcmp(str
, "instr"))
881 /* TYPE, bits[1:0] */
882 config
->addr_acc
[idx
] &= ~(BIT(0) | BIT(1));
884 spin_unlock(&drvdata
->spinlock
);
887 static DEVICE_ATTR_RW(addr_instdatatype
);
889 static ssize_t
addr_single_show(struct device
*dev
,
890 struct device_attribute
*attr
,
895 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
896 struct etmv4_config
*config
= &drvdata
->config
;
898 idx
= config
->addr_idx
;
899 spin_lock(&drvdata
->spinlock
);
900 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
901 config
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
902 spin_unlock(&drvdata
->spinlock
);
905 val
= (unsigned long)config
->addr_val
[idx
];
906 spin_unlock(&drvdata
->spinlock
);
907 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
910 static ssize_t
addr_single_store(struct device
*dev
,
911 struct device_attribute
*attr
,
912 const char *buf
, size_t size
)
916 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
917 struct etmv4_config
*config
= &drvdata
->config
;
919 if (kstrtoul(buf
, 16, &val
))
922 spin_lock(&drvdata
->spinlock
);
923 idx
= config
->addr_idx
;
924 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
925 config
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
926 spin_unlock(&drvdata
->spinlock
);
930 config
->addr_val
[idx
] = (u64
)val
;
931 config
->addr_type
[idx
] = ETM_ADDR_TYPE_SINGLE
;
932 spin_unlock(&drvdata
->spinlock
);
935 static DEVICE_ATTR_RW(addr_single
);
937 static ssize_t
addr_range_show(struct device
*dev
,
938 struct device_attribute
*attr
,
942 unsigned long val1
, val2
;
943 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
944 struct etmv4_config
*config
= &drvdata
->config
;
946 spin_lock(&drvdata
->spinlock
);
947 idx
= config
->addr_idx
;
949 spin_unlock(&drvdata
->spinlock
);
952 if (!((config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
953 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
954 (config
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
955 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
956 spin_unlock(&drvdata
->spinlock
);
960 val1
= (unsigned long)config
->addr_val
[idx
];
961 val2
= (unsigned long)config
->addr_val
[idx
+ 1];
962 spin_unlock(&drvdata
->spinlock
);
963 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
966 static ssize_t
addr_range_store(struct device
*dev
,
967 struct device_attribute
*attr
,
968 const char *buf
, size_t size
)
971 unsigned long val1
, val2
;
972 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
973 struct etmv4_config
*config
= &drvdata
->config
;
974 int elements
, exclude
;
976 elements
= sscanf(buf
, "%lx %lx %x", &val1
, &val2
, &exclude
);
978 /* exclude is optional, but need at least two parameter */
981 /* lower address comparator cannot have a higher address value */
985 spin_lock(&drvdata
->spinlock
);
986 idx
= config
->addr_idx
;
988 spin_unlock(&drvdata
->spinlock
);
992 if (!((config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
993 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
994 (config
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
995 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
996 spin_unlock(&drvdata
->spinlock
);
1000 config
->addr_val
[idx
] = (u64
)val1
;
1001 config
->addr_type
[idx
] = ETM_ADDR_TYPE_RANGE
;
1002 config
->addr_val
[idx
+ 1] = (u64
)val2
;
1003 config
->addr_type
[idx
+ 1] = ETM_ADDR_TYPE_RANGE
;
1005 * Program include or exclude control bits for vinst or vdata
1006 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1007 * use supplied value, or default to bit set in 'mode'
1010 exclude
= config
->mode
& ETM_MODE_EXCLUDE
;
1011 etm4_set_mode_exclude(drvdata
, exclude
? true : false);
1013 spin_unlock(&drvdata
->spinlock
);
1016 static DEVICE_ATTR_RW(addr_range
);
1018 static ssize_t
addr_start_show(struct device
*dev
,
1019 struct device_attribute
*attr
,
1024 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1025 struct etmv4_config
*config
= &drvdata
->config
;
1027 spin_lock(&drvdata
->spinlock
);
1028 idx
= config
->addr_idx
;
1030 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1031 config
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1032 spin_unlock(&drvdata
->spinlock
);
1036 val
= (unsigned long)config
->addr_val
[idx
];
1037 spin_unlock(&drvdata
->spinlock
);
1038 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1041 static ssize_t
addr_start_store(struct device
*dev
,
1042 struct device_attribute
*attr
,
1043 const char *buf
, size_t size
)
1047 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1048 struct etmv4_config
*config
= &drvdata
->config
;
1050 if (kstrtoul(buf
, 16, &val
))
1053 spin_lock(&drvdata
->spinlock
);
1054 idx
= config
->addr_idx
;
1055 if (!drvdata
->nr_addr_cmp
) {
1056 spin_unlock(&drvdata
->spinlock
);
1059 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1060 config
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1061 spin_unlock(&drvdata
->spinlock
);
1065 config
->addr_val
[idx
] = (u64
)val
;
1066 config
->addr_type
[idx
] = ETM_ADDR_TYPE_START
;
1067 config
->vissctlr
|= BIT(idx
);
1068 spin_unlock(&drvdata
->spinlock
);
1071 static DEVICE_ATTR_RW(addr_start
);
1073 static ssize_t
addr_stop_show(struct device
*dev
,
1074 struct device_attribute
*attr
,
1079 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1080 struct etmv4_config
*config
= &drvdata
->config
;
1082 spin_lock(&drvdata
->spinlock
);
1083 idx
= config
->addr_idx
;
1085 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1086 config
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1087 spin_unlock(&drvdata
->spinlock
);
1091 val
= (unsigned long)config
->addr_val
[idx
];
1092 spin_unlock(&drvdata
->spinlock
);
1093 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1096 static ssize_t
addr_stop_store(struct device
*dev
,
1097 struct device_attribute
*attr
,
1098 const char *buf
, size_t size
)
1102 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1103 struct etmv4_config
*config
= &drvdata
->config
;
1105 if (kstrtoul(buf
, 16, &val
))
1108 spin_lock(&drvdata
->spinlock
);
1109 idx
= config
->addr_idx
;
1110 if (!drvdata
->nr_addr_cmp
) {
1111 spin_unlock(&drvdata
->spinlock
);
1114 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1115 config
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1116 spin_unlock(&drvdata
->spinlock
);
1120 config
->addr_val
[idx
] = (u64
)val
;
1121 config
->addr_type
[idx
] = ETM_ADDR_TYPE_STOP
;
1122 config
->vissctlr
|= BIT(idx
+ 16);
1123 spin_unlock(&drvdata
->spinlock
);
1126 static DEVICE_ATTR_RW(addr_stop
);
1128 static ssize_t
addr_ctxtype_show(struct device
*dev
,
1129 struct device_attribute
*attr
,
1134 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1135 struct etmv4_config
*config
= &drvdata
->config
;
1137 spin_lock(&drvdata
->spinlock
);
1138 idx
= config
->addr_idx
;
1139 /* CONTEXTTYPE, bits[3:2] */
1140 val
= BMVAL(config
->addr_acc
[idx
], 2, 3);
1141 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n", val
== ETM_CTX_NONE
? "none" :
1142 (val
== ETM_CTX_CTXID
? "ctxid" :
1143 (val
== ETM_CTX_VMID
? "vmid" : "all")));
1144 spin_unlock(&drvdata
->spinlock
);
1148 static ssize_t
addr_ctxtype_store(struct device
*dev
,
1149 struct device_attribute
*attr
,
1150 const char *buf
, size_t size
)
1154 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1155 struct etmv4_config
*config
= &drvdata
->config
;
1157 if (strlen(buf
) >= 10)
1159 if (sscanf(buf
, "%s", str
) != 1)
1162 spin_lock(&drvdata
->spinlock
);
1163 idx
= config
->addr_idx
;
1164 if (!strcmp(str
, "none"))
1165 /* start by clearing context type bits */
1166 config
->addr_acc
[idx
] &= ~(BIT(2) | BIT(3));
1167 else if (!strcmp(str
, "ctxid")) {
1168 /* 0b01 The trace unit performs a Context ID */
1169 if (drvdata
->numcidc
) {
1170 config
->addr_acc
[idx
] |= BIT(2);
1171 config
->addr_acc
[idx
] &= ~BIT(3);
1173 } else if (!strcmp(str
, "vmid")) {
1174 /* 0b10 The trace unit performs a VMID */
1175 if (drvdata
->numvmidc
) {
1176 config
->addr_acc
[idx
] &= ~BIT(2);
1177 config
->addr_acc
[idx
] |= BIT(3);
1179 } else if (!strcmp(str
, "all")) {
1181 * 0b11 The trace unit performs a Context ID
1182 * comparison and a VMID
1184 if (drvdata
->numcidc
)
1185 config
->addr_acc
[idx
] |= BIT(2);
1186 if (drvdata
->numvmidc
)
1187 config
->addr_acc
[idx
] |= BIT(3);
1189 spin_unlock(&drvdata
->spinlock
);
1192 static DEVICE_ATTR_RW(addr_ctxtype
);
1194 static ssize_t
addr_context_show(struct device
*dev
,
1195 struct device_attribute
*attr
,
1200 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1201 struct etmv4_config
*config
= &drvdata
->config
;
1203 spin_lock(&drvdata
->spinlock
);
1204 idx
= config
->addr_idx
;
1205 /* context ID comparator bits[6:4] */
1206 val
= BMVAL(config
->addr_acc
[idx
], 4, 6);
1207 spin_unlock(&drvdata
->spinlock
);
1208 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1211 static ssize_t
addr_context_store(struct device
*dev
,
1212 struct device_attribute
*attr
,
1213 const char *buf
, size_t size
)
1217 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1218 struct etmv4_config
*config
= &drvdata
->config
;
1220 if (kstrtoul(buf
, 16, &val
))
1222 if ((drvdata
->numcidc
<= 1) && (drvdata
->numvmidc
<= 1))
1224 if (val
>= (drvdata
->numcidc
>= drvdata
->numvmidc
?
1225 drvdata
->numcidc
: drvdata
->numvmidc
))
1228 spin_lock(&drvdata
->spinlock
);
1229 idx
= config
->addr_idx
;
1230 /* clear context ID comparator bits[6:4] */
1231 config
->addr_acc
[idx
] &= ~(BIT(4) | BIT(5) | BIT(6));
1232 config
->addr_acc
[idx
] |= (val
<< 4);
1233 spin_unlock(&drvdata
->spinlock
);
1236 static DEVICE_ATTR_RW(addr_context
);
1238 static ssize_t
addr_exlevel_s_ns_show(struct device
*dev
,
1239 struct device_attribute
*attr
,
1244 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1245 struct etmv4_config
*config
= &drvdata
->config
;
1247 spin_lock(&drvdata
->spinlock
);
1248 idx
= config
->addr_idx
;
1249 val
= BMVAL(config
->addr_acc
[idx
], 8, 14);
1250 spin_unlock(&drvdata
->spinlock
);
1251 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1254 static ssize_t
addr_exlevel_s_ns_store(struct device
*dev
,
1255 struct device_attribute
*attr
,
1256 const char *buf
, size_t size
)
1260 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1261 struct etmv4_config
*config
= &drvdata
->config
;
1263 if (kstrtoul(buf
, 0, &val
))
1266 if (val
& ~((GENMASK(14, 8) >> 8)))
1269 spin_lock(&drvdata
->spinlock
);
1270 idx
= config
->addr_idx
;
1271 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1272 config
->addr_acc
[idx
] &= ~(GENMASK(14, 8));
1273 config
->addr_acc
[idx
] |= (val
<< 8);
1274 spin_unlock(&drvdata
->spinlock
);
1277 static DEVICE_ATTR_RW(addr_exlevel_s_ns
);
1279 static const char * const addr_type_names
[] = {
1287 static ssize_t
addr_cmp_view_show(struct device
*dev
,
1288 struct device_attribute
*attr
, char *buf
)
1291 unsigned long addr_v
, addr_v2
, addr_ctrl
;
1292 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1293 struct etmv4_config
*config
= &drvdata
->config
;
1295 bool exclude
= false;
1297 spin_lock(&drvdata
->spinlock
);
1298 idx
= config
->addr_idx
;
1299 addr_v
= config
->addr_val
[idx
];
1300 addr_ctrl
= config
->addr_acc
[idx
];
1301 addr_type
= config
->addr_type
[idx
];
1302 if (addr_type
== ETM_ADDR_TYPE_RANGE
) {
1306 addr_v
= config
->addr_val
[idx
];
1308 addr_v2
= config
->addr_val
[idx
+ 1];
1310 exclude
= config
->viiectlr
& BIT(idx
/ 2 + 16);
1312 spin_unlock(&drvdata
->spinlock
);
1314 size
= scnprintf(buf
, PAGE_SIZE
, "addr_cmp[%i] %s %#lx", idx
,
1315 addr_type_names
[addr_type
], addr_v
);
1316 if (addr_type
== ETM_ADDR_TYPE_RANGE
) {
1317 size
+= scnprintf(buf
+ size
, PAGE_SIZE
- size
,
1318 " %#lx %s", addr_v2
,
1319 exclude
? "exclude" : "include");
1321 size
+= scnprintf(buf
+ size
, PAGE_SIZE
- size
,
1322 " ctrl(%#lx)\n", addr_ctrl
);
1324 size
= scnprintf(buf
, PAGE_SIZE
, "addr_cmp[%i] unused\n", idx
);
1328 static DEVICE_ATTR_RO(addr_cmp_view
);
1330 static ssize_t
vinst_pe_cmp_start_stop_show(struct device
*dev
,
1331 struct device_attribute
*attr
,
1335 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1336 struct etmv4_config
*config
= &drvdata
->config
;
1338 if (!drvdata
->nr_pe_cmp
)
1340 val
= config
->vipcssctlr
;
1341 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1343 static ssize_t
vinst_pe_cmp_start_stop_store(struct device
*dev
,
1344 struct device_attribute
*attr
,
1345 const char *buf
, size_t size
)
1348 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1349 struct etmv4_config
*config
= &drvdata
->config
;
1351 if (kstrtoul(buf
, 16, &val
))
1353 if (!drvdata
->nr_pe_cmp
)
1356 spin_lock(&drvdata
->spinlock
);
1357 config
->vipcssctlr
= val
;
1358 spin_unlock(&drvdata
->spinlock
);
1361 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop
);
1363 static ssize_t
seq_idx_show(struct device
*dev
,
1364 struct device_attribute
*attr
,
1368 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1369 struct etmv4_config
*config
= &drvdata
->config
;
1371 val
= config
->seq_idx
;
1372 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1375 static ssize_t
seq_idx_store(struct device
*dev
,
1376 struct device_attribute
*attr
,
1377 const char *buf
, size_t size
)
1380 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1381 struct etmv4_config
*config
= &drvdata
->config
;
1383 if (kstrtoul(buf
, 16, &val
))
1385 if (val
>= drvdata
->nrseqstate
- 1)
1389 * Use spinlock to ensure index doesn't change while it gets
1390 * dereferenced multiple times within a spinlock block elsewhere.
1392 spin_lock(&drvdata
->spinlock
);
1393 config
->seq_idx
= val
;
1394 spin_unlock(&drvdata
->spinlock
);
1397 static DEVICE_ATTR_RW(seq_idx
);
1399 static ssize_t
seq_state_show(struct device
*dev
,
1400 struct device_attribute
*attr
,
1404 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1405 struct etmv4_config
*config
= &drvdata
->config
;
1407 val
= config
->seq_state
;
1408 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1411 static ssize_t
seq_state_store(struct device
*dev
,
1412 struct device_attribute
*attr
,
1413 const char *buf
, size_t size
)
1416 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1417 struct etmv4_config
*config
= &drvdata
->config
;
1419 if (kstrtoul(buf
, 16, &val
))
1421 if (val
>= drvdata
->nrseqstate
)
1424 config
->seq_state
= val
;
1427 static DEVICE_ATTR_RW(seq_state
);
1429 static ssize_t
seq_event_show(struct device
*dev
,
1430 struct device_attribute
*attr
,
1435 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1436 struct etmv4_config
*config
= &drvdata
->config
;
1438 spin_lock(&drvdata
->spinlock
);
1439 idx
= config
->seq_idx
;
1440 val
= config
->seq_ctrl
[idx
];
1441 spin_unlock(&drvdata
->spinlock
);
1442 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1445 static ssize_t
seq_event_store(struct device
*dev
,
1446 struct device_attribute
*attr
,
1447 const char *buf
, size_t size
)
1451 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1452 struct etmv4_config
*config
= &drvdata
->config
;
1454 if (kstrtoul(buf
, 16, &val
))
1457 spin_lock(&drvdata
->spinlock
);
1458 idx
= config
->seq_idx
;
1459 /* Seq control has two masks B[15:8] F[7:0] */
1460 config
->seq_ctrl
[idx
] = val
& 0xFFFF;
1461 spin_unlock(&drvdata
->spinlock
);
1464 static DEVICE_ATTR_RW(seq_event
);
1466 static ssize_t
seq_reset_event_show(struct device
*dev
,
1467 struct device_attribute
*attr
,
1471 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1472 struct etmv4_config
*config
= &drvdata
->config
;
1474 val
= config
->seq_rst
;
1475 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1478 static ssize_t
seq_reset_event_store(struct device
*dev
,
1479 struct device_attribute
*attr
,
1480 const char *buf
, size_t size
)
1483 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1484 struct etmv4_config
*config
= &drvdata
->config
;
1486 if (kstrtoul(buf
, 16, &val
))
1488 if (!(drvdata
->nrseqstate
))
1491 config
->seq_rst
= val
& ETMv4_EVENT_MASK
;
1494 static DEVICE_ATTR_RW(seq_reset_event
);
1496 static ssize_t
cntr_idx_show(struct device
*dev
,
1497 struct device_attribute
*attr
,
1501 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1502 struct etmv4_config
*config
= &drvdata
->config
;
1504 val
= config
->cntr_idx
;
1505 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1508 static ssize_t
cntr_idx_store(struct device
*dev
,
1509 struct device_attribute
*attr
,
1510 const char *buf
, size_t size
)
1513 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1514 struct etmv4_config
*config
= &drvdata
->config
;
1516 if (kstrtoul(buf
, 16, &val
))
1518 if (val
>= drvdata
->nr_cntr
)
1522 * Use spinlock to ensure index doesn't change while it gets
1523 * dereferenced multiple times within a spinlock block elsewhere.
1525 spin_lock(&drvdata
->spinlock
);
1526 config
->cntr_idx
= val
;
1527 spin_unlock(&drvdata
->spinlock
);
1530 static DEVICE_ATTR_RW(cntr_idx
);
1532 static ssize_t
cntrldvr_show(struct device
*dev
,
1533 struct device_attribute
*attr
,
1538 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1539 struct etmv4_config
*config
= &drvdata
->config
;
1541 spin_lock(&drvdata
->spinlock
);
1542 idx
= config
->cntr_idx
;
1543 val
= config
->cntrldvr
[idx
];
1544 spin_unlock(&drvdata
->spinlock
);
1545 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1548 static ssize_t
cntrldvr_store(struct device
*dev
,
1549 struct device_attribute
*attr
,
1550 const char *buf
, size_t size
)
1554 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1555 struct etmv4_config
*config
= &drvdata
->config
;
1557 if (kstrtoul(buf
, 16, &val
))
1559 if (val
> ETM_CNTR_MAX_VAL
)
1562 spin_lock(&drvdata
->spinlock
);
1563 idx
= config
->cntr_idx
;
1564 config
->cntrldvr
[idx
] = val
;
1565 spin_unlock(&drvdata
->spinlock
);
1568 static DEVICE_ATTR_RW(cntrldvr
);
1570 static ssize_t
cntr_val_show(struct device
*dev
,
1571 struct device_attribute
*attr
,
1576 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1577 struct etmv4_config
*config
= &drvdata
->config
;
1579 spin_lock(&drvdata
->spinlock
);
1580 idx
= config
->cntr_idx
;
1581 val
= config
->cntr_val
[idx
];
1582 spin_unlock(&drvdata
->spinlock
);
1583 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1586 static ssize_t
cntr_val_store(struct device
*dev
,
1587 struct device_attribute
*attr
,
1588 const char *buf
, size_t size
)
1592 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1593 struct etmv4_config
*config
= &drvdata
->config
;
1595 if (kstrtoul(buf
, 16, &val
))
1597 if (val
> ETM_CNTR_MAX_VAL
)
1600 spin_lock(&drvdata
->spinlock
);
1601 idx
= config
->cntr_idx
;
1602 config
->cntr_val
[idx
] = val
;
1603 spin_unlock(&drvdata
->spinlock
);
1606 static DEVICE_ATTR_RW(cntr_val
);
1608 static ssize_t
cntr_ctrl_show(struct device
*dev
,
1609 struct device_attribute
*attr
,
1614 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1615 struct etmv4_config
*config
= &drvdata
->config
;
1617 spin_lock(&drvdata
->spinlock
);
1618 idx
= config
->cntr_idx
;
1619 val
= config
->cntr_ctrl
[idx
];
1620 spin_unlock(&drvdata
->spinlock
);
1621 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1624 static ssize_t
cntr_ctrl_store(struct device
*dev
,
1625 struct device_attribute
*attr
,
1626 const char *buf
, size_t size
)
1630 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1631 struct etmv4_config
*config
= &drvdata
->config
;
1633 if (kstrtoul(buf
, 16, &val
))
1636 spin_lock(&drvdata
->spinlock
);
1637 idx
= config
->cntr_idx
;
1638 config
->cntr_ctrl
[idx
] = val
;
1639 spin_unlock(&drvdata
->spinlock
);
1642 static DEVICE_ATTR_RW(cntr_ctrl
);
1644 static ssize_t
res_idx_show(struct device
*dev
,
1645 struct device_attribute
*attr
,
1649 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1650 struct etmv4_config
*config
= &drvdata
->config
;
1652 val
= config
->res_idx
;
1653 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1656 static ssize_t
res_idx_store(struct device
*dev
,
1657 struct device_attribute
*attr
,
1658 const char *buf
, size_t size
)
1661 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1662 struct etmv4_config
*config
= &drvdata
->config
;
1664 if (kstrtoul(buf
, 16, &val
))
1666 /* Resource selector pair 0 is always implemented and reserved */
1667 if ((val
== 0) || (val
>= drvdata
->nr_resource
))
1671 * Use spinlock to ensure index doesn't change while it gets
1672 * dereferenced multiple times within a spinlock block elsewhere.
1674 spin_lock(&drvdata
->spinlock
);
1675 config
->res_idx
= val
;
1676 spin_unlock(&drvdata
->spinlock
);
1679 static DEVICE_ATTR_RW(res_idx
);
1681 static ssize_t
res_ctrl_show(struct device
*dev
,
1682 struct device_attribute
*attr
,
1687 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1688 struct etmv4_config
*config
= &drvdata
->config
;
1690 spin_lock(&drvdata
->spinlock
);
1691 idx
= config
->res_idx
;
1692 val
= config
->res_ctrl
[idx
];
1693 spin_unlock(&drvdata
->spinlock
);
1694 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1697 static ssize_t
res_ctrl_store(struct device
*dev
,
1698 struct device_attribute
*attr
,
1699 const char *buf
, size_t size
)
1703 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1704 struct etmv4_config
*config
= &drvdata
->config
;
1706 if (kstrtoul(buf
, 16, &val
))
1709 spin_lock(&drvdata
->spinlock
);
1710 idx
= config
->res_idx
;
1711 /* For odd idx pair inversal bit is RES0 */
1713 /* PAIRINV, bit[21] */
1715 config
->res_ctrl
[idx
] = val
& GENMASK(21, 0);
1716 spin_unlock(&drvdata
->spinlock
);
1719 static DEVICE_ATTR_RW(res_ctrl
);
1721 static ssize_t
sshot_idx_show(struct device
*dev
,
1722 struct device_attribute
*attr
, char *buf
)
1725 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1726 struct etmv4_config
*config
= &drvdata
->config
;
1728 val
= config
->ss_idx
;
1729 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1732 static ssize_t
sshot_idx_store(struct device
*dev
,
1733 struct device_attribute
*attr
,
1734 const char *buf
, size_t size
)
1737 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1738 struct etmv4_config
*config
= &drvdata
->config
;
1740 if (kstrtoul(buf
, 16, &val
))
1742 if (val
>= drvdata
->nr_ss_cmp
)
1745 spin_lock(&drvdata
->spinlock
);
1746 config
->ss_idx
= val
;
1747 spin_unlock(&drvdata
->spinlock
);
1750 static DEVICE_ATTR_RW(sshot_idx
);
1752 static ssize_t
sshot_ctrl_show(struct device
*dev
,
1753 struct device_attribute
*attr
,
1757 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1758 struct etmv4_config
*config
= &drvdata
->config
;
1760 spin_lock(&drvdata
->spinlock
);
1761 val
= config
->ss_ctrl
[config
->ss_idx
];
1762 spin_unlock(&drvdata
->spinlock
);
1763 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1766 static ssize_t
sshot_ctrl_store(struct device
*dev
,
1767 struct device_attribute
*attr
,
1768 const char *buf
, size_t size
)
1772 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1773 struct etmv4_config
*config
= &drvdata
->config
;
1775 if (kstrtoul(buf
, 16, &val
))
1778 spin_lock(&drvdata
->spinlock
);
1779 idx
= config
->ss_idx
;
1780 config
->ss_ctrl
[idx
] = val
& GENMASK(24, 0);
1781 /* must clear bit 31 in related status register on programming */
1782 config
->ss_status
[idx
] &= ~BIT(31);
1783 spin_unlock(&drvdata
->spinlock
);
1786 static DEVICE_ATTR_RW(sshot_ctrl
);
1788 static ssize_t
sshot_status_show(struct device
*dev
,
1789 struct device_attribute
*attr
, char *buf
)
1792 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1793 struct etmv4_config
*config
= &drvdata
->config
;
1795 spin_lock(&drvdata
->spinlock
);
1796 val
= config
->ss_status
[config
->ss_idx
];
1797 spin_unlock(&drvdata
->spinlock
);
1798 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1800 static DEVICE_ATTR_RO(sshot_status
);
1802 static ssize_t
sshot_pe_ctrl_show(struct device
*dev
,
1803 struct device_attribute
*attr
,
1807 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1808 struct etmv4_config
*config
= &drvdata
->config
;
1810 spin_lock(&drvdata
->spinlock
);
1811 val
= config
->ss_pe_cmp
[config
->ss_idx
];
1812 spin_unlock(&drvdata
->spinlock
);
1813 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1816 static ssize_t
sshot_pe_ctrl_store(struct device
*dev
,
1817 struct device_attribute
*attr
,
1818 const char *buf
, size_t size
)
1822 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1823 struct etmv4_config
*config
= &drvdata
->config
;
1825 if (kstrtoul(buf
, 16, &val
))
1828 spin_lock(&drvdata
->spinlock
);
1829 idx
= config
->ss_idx
;
1830 config
->ss_pe_cmp
[idx
] = val
& GENMASK(7, 0);
1831 /* must clear bit 31 in related status register on programming */
1832 config
->ss_status
[idx
] &= ~BIT(31);
1833 spin_unlock(&drvdata
->spinlock
);
1836 static DEVICE_ATTR_RW(sshot_pe_ctrl
);
1838 static ssize_t
ctxid_idx_show(struct device
*dev
,
1839 struct device_attribute
*attr
,
1843 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1844 struct etmv4_config
*config
= &drvdata
->config
;
1846 val
= config
->ctxid_idx
;
1847 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1850 static ssize_t
ctxid_idx_store(struct device
*dev
,
1851 struct device_attribute
*attr
,
1852 const char *buf
, size_t size
)
1855 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1856 struct etmv4_config
*config
= &drvdata
->config
;
1858 if (kstrtoul(buf
, 16, &val
))
1860 if (val
>= drvdata
->numcidc
)
1864 * Use spinlock to ensure index doesn't change while it gets
1865 * dereferenced multiple times within a spinlock block elsewhere.
1867 spin_lock(&drvdata
->spinlock
);
1868 config
->ctxid_idx
= val
;
1869 spin_unlock(&drvdata
->spinlock
);
1872 static DEVICE_ATTR_RW(ctxid_idx
);
1874 static ssize_t
ctxid_pid_show(struct device
*dev
,
1875 struct device_attribute
*attr
,
1880 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1881 struct etmv4_config
*config
= &drvdata
->config
;
1884 * Don't use contextID tracing if coming from a PID namespace. See
1885 * comment in ctxid_pid_store().
1887 if (task_active_pid_ns(current
) != &init_pid_ns
)
1890 spin_lock(&drvdata
->spinlock
);
1891 idx
= config
->ctxid_idx
;
1892 val
= (unsigned long)config
->ctxid_pid
[idx
];
1893 spin_unlock(&drvdata
->spinlock
);
1894 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1897 static ssize_t
ctxid_pid_store(struct device
*dev
,
1898 struct device_attribute
*attr
,
1899 const char *buf
, size_t size
)
1903 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1904 struct etmv4_config
*config
= &drvdata
->config
;
1907 * When contextID tracing is enabled the tracers will insert the
1908 * value found in the contextID register in the trace stream. But if
1909 * a process is in a namespace the PID of that process as seen from the
1910 * namespace won't be what the kernel sees, something that makes the
1911 * feature confusing and can potentially leak kernel only information.
1912 * As such refuse to use the feature if @current is not in the initial
1915 if (task_active_pid_ns(current
) != &init_pid_ns
)
1919 * only implemented when ctxid tracing is enabled, i.e. at least one
1920 * ctxid comparator is implemented and ctxid is greater than 0 bits
1923 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1925 if (kstrtoul(buf
, 16, &pid
))
1928 spin_lock(&drvdata
->spinlock
);
1929 idx
= config
->ctxid_idx
;
1930 config
->ctxid_pid
[idx
] = (u64
)pid
;
1931 spin_unlock(&drvdata
->spinlock
);
1934 static DEVICE_ATTR_RW(ctxid_pid
);
1936 static ssize_t
ctxid_masks_show(struct device
*dev
,
1937 struct device_attribute
*attr
,
1940 unsigned long val1
, val2
;
1941 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1942 struct etmv4_config
*config
= &drvdata
->config
;
1945 * Don't use contextID tracing if coming from a PID namespace. See
1946 * comment in ctxid_pid_store().
1948 if (task_active_pid_ns(current
) != &init_pid_ns
)
1951 spin_lock(&drvdata
->spinlock
);
1952 val1
= config
->ctxid_mask0
;
1953 val2
= config
->ctxid_mask1
;
1954 spin_unlock(&drvdata
->spinlock
);
1955 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1958 static ssize_t
ctxid_masks_store(struct device
*dev
,
1959 struct device_attribute
*attr
,
1960 const char *buf
, size_t size
)
1963 unsigned long val1
, val2
, mask
;
1964 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1965 struct etmv4_config
*config
= &drvdata
->config
;
1969 * Don't use contextID tracing if coming from a PID namespace. See
1970 * comment in ctxid_pid_store().
1972 if (task_active_pid_ns(current
) != &init_pid_ns
)
1976 * only implemented when ctxid tracing is enabled, i.e. at least one
1977 * ctxid comparator is implemented and ctxid is greater than 0 bits
1980 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1982 /* one mask if <= 4 comparators, two for up to 8 */
1983 nr_inputs
= sscanf(buf
, "%lx %lx", &val1
, &val2
);
1984 if ((drvdata
->numcidc
> 4) && (nr_inputs
!= 2))
1987 spin_lock(&drvdata
->spinlock
);
1989 * each byte[0..3] controls mask value applied to ctxid
1992 switch (drvdata
->numcidc
) {
1994 /* COMP0, bits[7:0] */
1995 config
->ctxid_mask0
= val1
& 0xFF;
1998 /* COMP1, bits[15:8] */
1999 config
->ctxid_mask0
= val1
& 0xFFFF;
2002 /* COMP2, bits[23:16] */
2003 config
->ctxid_mask0
= val1
& 0xFFFFFF;
2006 /* COMP3, bits[31:24] */
2007 config
->ctxid_mask0
= val1
;
2010 /* COMP4, bits[7:0] */
2011 config
->ctxid_mask0
= val1
;
2012 config
->ctxid_mask1
= val2
& 0xFF;
2015 /* COMP5, bits[15:8] */
2016 config
->ctxid_mask0
= val1
;
2017 config
->ctxid_mask1
= val2
& 0xFFFF;
2020 /* COMP6, bits[23:16] */
2021 config
->ctxid_mask0
= val1
;
2022 config
->ctxid_mask1
= val2
& 0xFFFFFF;
2025 /* COMP7, bits[31:24] */
2026 config
->ctxid_mask0
= val1
;
2027 config
->ctxid_mask1
= val2
;
2033 * If software sets a mask bit to 1, it must program relevant byte
2034 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2035 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2036 * of ctxid comparator0 value (corresponding to byte 0) register.
2038 mask
= config
->ctxid_mask0
;
2039 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
2040 /* mask value of corresponding ctxid comparator */
2041 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2043 * each bit corresponds to a byte of respective ctxid comparator
2046 for (j
= 0; j
< 8; j
++) {
2048 config
->ctxid_pid
[i
] &= ~(0xFFUL
<< (j
* 8));
2051 /* Select the next ctxid comparator mask value */
2053 /* ctxid comparators[4-7] */
2054 mask
= config
->ctxid_mask1
;
2059 spin_unlock(&drvdata
->spinlock
);
2062 static DEVICE_ATTR_RW(ctxid_masks
);
2064 static ssize_t
vmid_idx_show(struct device
*dev
,
2065 struct device_attribute
*attr
,
2069 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2070 struct etmv4_config
*config
= &drvdata
->config
;
2072 val
= config
->vmid_idx
;
2073 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2076 static ssize_t
vmid_idx_store(struct device
*dev
,
2077 struct device_attribute
*attr
,
2078 const char *buf
, size_t size
)
2081 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2082 struct etmv4_config
*config
= &drvdata
->config
;
2084 if (kstrtoul(buf
, 16, &val
))
2086 if (val
>= drvdata
->numvmidc
)
2090 * Use spinlock to ensure index doesn't change while it gets
2091 * dereferenced multiple times within a spinlock block elsewhere.
2093 spin_lock(&drvdata
->spinlock
);
2094 config
->vmid_idx
= val
;
2095 spin_unlock(&drvdata
->spinlock
);
2098 static DEVICE_ATTR_RW(vmid_idx
);
2100 static ssize_t
vmid_val_show(struct device
*dev
,
2101 struct device_attribute
*attr
,
2105 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2106 struct etmv4_config
*config
= &drvdata
->config
;
2108 val
= (unsigned long)config
->vmid_val
[config
->vmid_idx
];
2109 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2112 static ssize_t
vmid_val_store(struct device
*dev
,
2113 struct device_attribute
*attr
,
2114 const char *buf
, size_t size
)
2117 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2118 struct etmv4_config
*config
= &drvdata
->config
;
2121 * only implemented when vmid tracing is enabled, i.e. at least one
2122 * vmid comparator is implemented and at least 8 bit vmid size
2124 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2126 if (kstrtoul(buf
, 16, &val
))
2129 spin_lock(&drvdata
->spinlock
);
2130 config
->vmid_val
[config
->vmid_idx
] = (u64
)val
;
2131 spin_unlock(&drvdata
->spinlock
);
2134 static DEVICE_ATTR_RW(vmid_val
);
2136 static ssize_t
vmid_masks_show(struct device
*dev
,
2137 struct device_attribute
*attr
, char *buf
)
2139 unsigned long val1
, val2
;
2140 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2141 struct etmv4_config
*config
= &drvdata
->config
;
2143 spin_lock(&drvdata
->spinlock
);
2144 val1
= config
->vmid_mask0
;
2145 val2
= config
->vmid_mask1
;
2146 spin_unlock(&drvdata
->spinlock
);
2147 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
2150 static ssize_t
vmid_masks_store(struct device
*dev
,
2151 struct device_attribute
*attr
,
2152 const char *buf
, size_t size
)
2155 unsigned long val1
, val2
, mask
;
2156 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2157 struct etmv4_config
*config
= &drvdata
->config
;
2161 * only implemented when vmid tracing is enabled, i.e. at least one
2162 * vmid comparator is implemented and at least 8 bit vmid size
2164 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2166 /* one mask if <= 4 comparators, two for up to 8 */
2167 nr_inputs
= sscanf(buf
, "%lx %lx", &val1
, &val2
);
2168 if ((drvdata
->numvmidc
> 4) && (nr_inputs
!= 2))
2171 spin_lock(&drvdata
->spinlock
);
2174 * each byte[0..3] controls mask value applied to vmid
2177 switch (drvdata
->numvmidc
) {
2179 /* COMP0, bits[7:0] */
2180 config
->vmid_mask0
= val1
& 0xFF;
2183 /* COMP1, bits[15:8] */
2184 config
->vmid_mask0
= val1
& 0xFFFF;
2187 /* COMP2, bits[23:16] */
2188 config
->vmid_mask0
= val1
& 0xFFFFFF;
2191 /* COMP3, bits[31:24] */
2192 config
->vmid_mask0
= val1
;
2195 /* COMP4, bits[7:0] */
2196 config
->vmid_mask0
= val1
;
2197 config
->vmid_mask1
= val2
& 0xFF;
2200 /* COMP5, bits[15:8] */
2201 config
->vmid_mask0
= val1
;
2202 config
->vmid_mask1
= val2
& 0xFFFF;
2205 /* COMP6, bits[23:16] */
2206 config
->vmid_mask0
= val1
;
2207 config
->vmid_mask1
= val2
& 0xFFFFFF;
2210 /* COMP7, bits[31:24] */
2211 config
->vmid_mask0
= val1
;
2212 config
->vmid_mask1
= val2
;
2219 * If software sets a mask bit to 1, it must program relevant byte
2220 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2221 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2222 * of vmid comparator0 value (corresponding to byte 0) register.
2224 mask
= config
->vmid_mask0
;
2225 for (i
= 0; i
< drvdata
->numvmidc
; i
++) {
2226 /* mask value of corresponding vmid comparator */
2227 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2229 * each bit corresponds to a byte of respective vmid comparator
2232 for (j
= 0; j
< 8; j
++) {
2234 config
->vmid_val
[i
] &= ~(0xFFUL
<< (j
* 8));
2237 /* Select the next vmid comparator mask value */
2239 /* vmid comparators[4-7] */
2240 mask
= config
->vmid_mask1
;
2244 spin_unlock(&drvdata
->spinlock
);
2247 static DEVICE_ATTR_RW(vmid_masks
);
2249 static ssize_t
cpu_show(struct device
*dev
,
2250 struct device_attribute
*attr
, char *buf
)
2253 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2256 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
2259 static DEVICE_ATTR_RO(cpu
);
2261 static struct attribute
*coresight_etmv4_attrs
[] = {
2262 &dev_attr_nr_pe_cmp
.attr
,
2263 &dev_attr_nr_addr_cmp
.attr
,
2264 &dev_attr_nr_cntr
.attr
,
2265 &dev_attr_nr_ext_inp
.attr
,
2266 &dev_attr_numcidc
.attr
,
2267 &dev_attr_numvmidc
.attr
,
2268 &dev_attr_nrseqstate
.attr
,
2269 &dev_attr_nr_resource
.attr
,
2270 &dev_attr_nr_ss_cmp
.attr
,
2271 &dev_attr_reset
.attr
,
2272 &dev_attr_mode
.attr
,
2274 &dev_attr_event
.attr
,
2275 &dev_attr_event_instren
.attr
,
2276 &dev_attr_event_ts
.attr
,
2277 &dev_attr_syncfreq
.attr
,
2278 &dev_attr_cyc_threshold
.attr
,
2279 &dev_attr_bb_ctrl
.attr
,
2280 &dev_attr_event_vinst
.attr
,
2281 &dev_attr_s_exlevel_vinst
.attr
,
2282 &dev_attr_ns_exlevel_vinst
.attr
,
2283 &dev_attr_addr_idx
.attr
,
2284 &dev_attr_addr_instdatatype
.attr
,
2285 &dev_attr_addr_single
.attr
,
2286 &dev_attr_addr_range
.attr
,
2287 &dev_attr_addr_start
.attr
,
2288 &dev_attr_addr_stop
.attr
,
2289 &dev_attr_addr_ctxtype
.attr
,
2290 &dev_attr_addr_context
.attr
,
2291 &dev_attr_addr_exlevel_s_ns
.attr
,
2292 &dev_attr_addr_cmp_view
.attr
,
2293 &dev_attr_vinst_pe_cmp_start_stop
.attr
,
2294 &dev_attr_sshot_idx
.attr
,
2295 &dev_attr_sshot_ctrl
.attr
,
2296 &dev_attr_sshot_pe_ctrl
.attr
,
2297 &dev_attr_sshot_status
.attr
,
2298 &dev_attr_seq_idx
.attr
,
2299 &dev_attr_seq_state
.attr
,
2300 &dev_attr_seq_event
.attr
,
2301 &dev_attr_seq_reset_event
.attr
,
2302 &dev_attr_cntr_idx
.attr
,
2303 &dev_attr_cntrldvr
.attr
,
2304 &dev_attr_cntr_val
.attr
,
2305 &dev_attr_cntr_ctrl
.attr
,
2306 &dev_attr_res_idx
.attr
,
2307 &dev_attr_res_ctrl
.attr
,
2308 &dev_attr_ctxid_idx
.attr
,
2309 &dev_attr_ctxid_pid
.attr
,
2310 &dev_attr_ctxid_masks
.attr
,
2311 &dev_attr_vmid_idx
.attr
,
2312 &dev_attr_vmid_val
.attr
,
2313 &dev_attr_vmid_masks
.attr
,
2323 static void do_smp_cross_read(void *data
)
2325 struct etmv4_reg
*reg
= data
;
2327 reg
->data
= readl_relaxed(reg
->addr
);
2330 static u32
etmv4_cross_read(const struct device
*dev
, u32 offset
)
2332 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
);
2333 struct etmv4_reg reg
;
2335 reg
.addr
= drvdata
->base
+ offset
;
2337 * smp cross call ensures the CPU will be powered up before
2338 * accessing the ETMv4 trace core registers
2340 smp_call_function_single(drvdata
->cpu
, do_smp_cross_read
, ®
, 1);
2344 #define coresight_etm4x_reg(name, offset) \
2345 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2347 #define coresight_etm4x_cross_read(name, offset) \
2348 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2351 coresight_etm4x_reg(trcpdcr
, TRCPDCR
);
2352 coresight_etm4x_reg(trcpdsr
, TRCPDSR
);
2353 coresight_etm4x_reg(trclsr
, TRCLSR
);
2354 coresight_etm4x_reg(trcauthstatus
, TRCAUTHSTATUS
);
2355 coresight_etm4x_reg(trcdevid
, TRCDEVID
);
2356 coresight_etm4x_reg(trcdevtype
, TRCDEVTYPE
);
2357 coresight_etm4x_reg(trcpidr0
, TRCPIDR0
);
2358 coresight_etm4x_reg(trcpidr1
, TRCPIDR1
);
2359 coresight_etm4x_reg(trcpidr2
, TRCPIDR2
);
2360 coresight_etm4x_reg(trcpidr3
, TRCPIDR3
);
2361 coresight_etm4x_cross_read(trcoslsr
, TRCOSLSR
);
2362 coresight_etm4x_cross_read(trcconfig
, TRCCONFIGR
);
2363 coresight_etm4x_cross_read(trctraceid
, TRCTRACEIDR
);
2365 static struct attribute
*coresight_etmv4_mgmt_attrs
[] = {
2366 &dev_attr_trcoslsr
.attr
,
2367 &dev_attr_trcpdcr
.attr
,
2368 &dev_attr_trcpdsr
.attr
,
2369 &dev_attr_trclsr
.attr
,
2370 &dev_attr_trcconfig
.attr
,
2371 &dev_attr_trctraceid
.attr
,
2372 &dev_attr_trcauthstatus
.attr
,
2373 &dev_attr_trcdevid
.attr
,
2374 &dev_attr_trcdevtype
.attr
,
2375 &dev_attr_trcpidr0
.attr
,
2376 &dev_attr_trcpidr1
.attr
,
2377 &dev_attr_trcpidr2
.attr
,
2378 &dev_attr_trcpidr3
.attr
,
2382 coresight_etm4x_cross_read(trcidr0
, TRCIDR0
);
2383 coresight_etm4x_cross_read(trcidr1
, TRCIDR1
);
2384 coresight_etm4x_cross_read(trcidr2
, TRCIDR2
);
2385 coresight_etm4x_cross_read(trcidr3
, TRCIDR3
);
2386 coresight_etm4x_cross_read(trcidr4
, TRCIDR4
);
2387 coresight_etm4x_cross_read(trcidr5
, TRCIDR5
);
2388 /* trcidr[6,7] are reserved */
2389 coresight_etm4x_cross_read(trcidr8
, TRCIDR8
);
2390 coresight_etm4x_cross_read(trcidr9
, TRCIDR9
);
2391 coresight_etm4x_cross_read(trcidr10
, TRCIDR10
);
2392 coresight_etm4x_cross_read(trcidr11
, TRCIDR11
);
2393 coresight_etm4x_cross_read(trcidr12
, TRCIDR12
);
2394 coresight_etm4x_cross_read(trcidr13
, TRCIDR13
);
2396 static struct attribute
*coresight_etmv4_trcidr_attrs
[] = {
2397 &dev_attr_trcidr0
.attr
,
2398 &dev_attr_trcidr1
.attr
,
2399 &dev_attr_trcidr2
.attr
,
2400 &dev_attr_trcidr3
.attr
,
2401 &dev_attr_trcidr4
.attr
,
2402 &dev_attr_trcidr5
.attr
,
2403 /* trcidr[6,7] are reserved */
2404 &dev_attr_trcidr8
.attr
,
2405 &dev_attr_trcidr9
.attr
,
2406 &dev_attr_trcidr10
.attr
,
2407 &dev_attr_trcidr11
.attr
,
2408 &dev_attr_trcidr12
.attr
,
2409 &dev_attr_trcidr13
.attr
,
2413 static const struct attribute_group coresight_etmv4_group
= {
2414 .attrs
= coresight_etmv4_attrs
,
2417 static const struct attribute_group coresight_etmv4_mgmt_group
= {
2418 .attrs
= coresight_etmv4_mgmt_attrs
,
2422 static const struct attribute_group coresight_etmv4_trcidr_group
= {
2423 .attrs
= coresight_etmv4_trcidr_attrs
,
2427 const struct attribute_group
*coresight_etmv4_groups
[] = {
2428 &coresight_etmv4_group
,
2429 &coresight_etmv4_mgmt_group
,
2430 &coresight_etmv4_trcidr_group
,