1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
13 static int etm4_set_mode_exclude(struct etmv4_drvdata
*drvdata
, bool exclude
)
16 struct etmv4_config
*config
= &drvdata
->config
;
18 idx
= config
->addr_idx
;
21 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 * the trace unit performs
24 if (BMVAL(config
->addr_acc
[idx
], 0, 1) == ETM_INSTR_ADDR
) {
29 * We are performing instruction address comparison. Set the
30 * relevant bit of ViewInst Include/Exclude Control register
31 * for corresponding address comparator pair.
33 if (config
->addr_type
[idx
] != ETM_ADDR_TYPE_RANGE
||
34 config
->addr_type
[idx
+ 1] != ETM_ADDR_TYPE_RANGE
)
37 if (exclude
== true) {
39 * Set exclude bit and unset the include bit
40 * corresponding to comparator pair
42 config
->viiectlr
|= BIT(idx
/ 2 + 16);
43 config
->viiectlr
&= ~BIT(idx
/ 2);
46 * Set include bit and unset exclude bit
47 * corresponding to comparator pair
49 config
->viiectlr
|= BIT(idx
/ 2);
50 config
->viiectlr
&= ~BIT(idx
/ 2 + 16);
56 static ssize_t
nr_pe_cmp_show(struct device
*dev
,
57 struct device_attribute
*attr
,
61 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
63 val
= drvdata
->nr_pe_cmp
;
64 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
66 static DEVICE_ATTR_RO(nr_pe_cmp
);
68 static ssize_t
nr_addr_cmp_show(struct device
*dev
,
69 struct device_attribute
*attr
,
73 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
75 val
= drvdata
->nr_addr_cmp
;
76 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
78 static DEVICE_ATTR_RO(nr_addr_cmp
);
80 static ssize_t
nr_cntr_show(struct device
*dev
,
81 struct device_attribute
*attr
,
85 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
87 val
= drvdata
->nr_cntr
;
88 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
90 static DEVICE_ATTR_RO(nr_cntr
);
92 static ssize_t
nr_ext_inp_show(struct device
*dev
,
93 struct device_attribute
*attr
,
97 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
99 val
= drvdata
->nr_ext_inp
;
100 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
102 static DEVICE_ATTR_RO(nr_ext_inp
);
104 static ssize_t
numcidc_show(struct device
*dev
,
105 struct device_attribute
*attr
,
109 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
111 val
= drvdata
->numcidc
;
112 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
114 static DEVICE_ATTR_RO(numcidc
);
116 static ssize_t
numvmidc_show(struct device
*dev
,
117 struct device_attribute
*attr
,
121 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
123 val
= drvdata
->numvmidc
;
124 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
126 static DEVICE_ATTR_RO(numvmidc
);
128 static ssize_t
nrseqstate_show(struct device
*dev
,
129 struct device_attribute
*attr
,
133 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
135 val
= drvdata
->nrseqstate
;
136 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
138 static DEVICE_ATTR_RO(nrseqstate
);
140 static ssize_t
nr_resource_show(struct device
*dev
,
141 struct device_attribute
*attr
,
145 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
147 val
= drvdata
->nr_resource
;
148 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
150 static DEVICE_ATTR_RO(nr_resource
);
152 static ssize_t
nr_ss_cmp_show(struct device
*dev
,
153 struct device_attribute
*attr
,
157 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
159 val
= drvdata
->nr_ss_cmp
;
160 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
162 static DEVICE_ATTR_RO(nr_ss_cmp
);
164 static ssize_t
reset_store(struct device
*dev
,
165 struct device_attribute
*attr
,
166 const char *buf
, size_t size
)
170 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
171 struct etmv4_config
*config
= &drvdata
->config
;
173 if (kstrtoul(buf
, 16, &val
))
176 spin_lock(&drvdata
->spinlock
);
180 /* Disable data tracing: do not trace load and store data transfers */
181 config
->mode
&= ~(ETM_MODE_LOAD
| ETM_MODE_STORE
);
182 config
->cfg
&= ~(BIT(1) | BIT(2));
184 /* Disable data value and data address tracing */
185 config
->mode
&= ~(ETM_MODE_DATA_TRACE_ADDR
|
186 ETM_MODE_DATA_TRACE_VAL
);
187 config
->cfg
&= ~(BIT(16) | BIT(17));
189 /* Disable all events tracing */
190 config
->eventctrl0
= 0x0;
191 config
->eventctrl1
= 0x0;
193 /* Disable timestamp event */
194 config
->ts_ctrl
= 0x0;
196 /* Disable stalling */
197 config
->stall_ctrl
= 0x0;
199 /* Reset trace synchronization period to 2^8 = 256 bytes*/
200 if (drvdata
->syncpr
== false)
201 config
->syncfreq
= 0x8;
204 * Enable ViewInst to trace everything with start-stop logic in
205 * started state. ARM recommends start-stop logic is set before
208 config
->vinst_ctrl
= BIT(0);
209 if (drvdata
->nr_addr_cmp
> 0) {
210 config
->mode
|= ETM_MODE_VIEWINST_STARTSTOP
;
211 /* SSSTATUS, bit[9] */
212 config
->vinst_ctrl
|= BIT(9);
215 /* No address range filtering for ViewInst */
216 config
->viiectlr
= 0x0;
218 /* No start-stop filtering for ViewInst */
219 config
->vissctlr
= 0x0;
220 config
->vipcssctlr
= 0x0;
222 /* Disable seq events */
223 for (i
= 0; i
< drvdata
->nrseqstate
-1; i
++)
224 config
->seq_ctrl
[i
] = 0x0;
225 config
->seq_rst
= 0x0;
226 config
->seq_state
= 0x0;
228 /* Disable external input events */
229 config
->ext_inp
= 0x0;
231 config
->cntr_idx
= 0x0;
232 for (i
= 0; i
< drvdata
->nr_cntr
; i
++) {
233 config
->cntrldvr
[i
] = 0x0;
234 config
->cntr_ctrl
[i
] = 0x0;
235 config
->cntr_val
[i
] = 0x0;
238 config
->res_idx
= 0x0;
239 for (i
= 2; i
< 2 * drvdata
->nr_resource
; i
++)
240 config
->res_ctrl
[i
] = 0x0;
242 config
->ss_idx
= 0x0;
243 for (i
= 0; i
< drvdata
->nr_ss_cmp
; i
++) {
244 config
->ss_ctrl
[i
] = 0x0;
245 config
->ss_pe_cmp
[i
] = 0x0;
248 config
->addr_idx
= 0x0;
249 for (i
= 0; i
< drvdata
->nr_addr_cmp
* 2; i
++) {
250 config
->addr_val
[i
] = 0x0;
251 config
->addr_acc
[i
] = 0x0;
252 config
->addr_type
[i
] = ETM_ADDR_TYPE_NONE
;
255 config
->ctxid_idx
= 0x0;
256 for (i
= 0; i
< drvdata
->numcidc
; i
++)
257 config
->ctxid_pid
[i
] = 0x0;
259 config
->ctxid_mask0
= 0x0;
260 config
->ctxid_mask1
= 0x0;
262 config
->vmid_idx
= 0x0;
263 for (i
= 0; i
< drvdata
->numvmidc
; i
++)
264 config
->vmid_val
[i
] = 0x0;
265 config
->vmid_mask0
= 0x0;
266 config
->vmid_mask1
= 0x0;
268 drvdata
->trcid
= drvdata
->cpu
+ 1;
270 spin_unlock(&drvdata
->spinlock
);
274 static DEVICE_ATTR_WO(reset
);
276 static ssize_t
mode_show(struct device
*dev
,
277 struct device_attribute
*attr
,
281 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
282 struct etmv4_config
*config
= &drvdata
->config
;
285 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
288 static ssize_t
mode_store(struct device
*dev
,
289 struct device_attribute
*attr
,
290 const char *buf
, size_t size
)
292 unsigned long val
, mode
;
293 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
294 struct etmv4_config
*config
= &drvdata
->config
;
296 if (kstrtoul(buf
, 16, &val
))
299 spin_lock(&drvdata
->spinlock
);
300 config
->mode
= val
& ETMv4_MODE_ALL
;
302 if (drvdata
->instrp0
== true) {
303 /* start by clearing instruction P0 field */
304 config
->cfg
&= ~(BIT(1) | BIT(2));
305 if (config
->mode
& ETM_MODE_LOAD
)
306 /* 0b01 Trace load instructions as P0 instructions */
307 config
->cfg
|= BIT(1);
308 if (config
->mode
& ETM_MODE_STORE
)
309 /* 0b10 Trace store instructions as P0 instructions */
310 config
->cfg
|= BIT(2);
311 if (config
->mode
& ETM_MODE_LOAD_STORE
)
313 * 0b11 Trace load and store instructions
316 config
->cfg
|= BIT(1) | BIT(2);
319 /* bit[3], Branch broadcast mode */
320 if ((config
->mode
& ETM_MODE_BB
) && (drvdata
->trcbb
== true))
321 config
->cfg
|= BIT(3);
323 config
->cfg
&= ~BIT(3);
325 /* bit[4], Cycle counting instruction trace bit */
326 if ((config
->mode
& ETMv4_MODE_CYCACC
) &&
327 (drvdata
->trccci
== true))
328 config
->cfg
|= BIT(4);
330 config
->cfg
&= ~BIT(4);
332 /* bit[6], Context ID tracing bit */
333 if ((config
->mode
& ETMv4_MODE_CTXID
) && (drvdata
->ctxid_size
))
334 config
->cfg
|= BIT(6);
336 config
->cfg
&= ~BIT(6);
338 if ((config
->mode
& ETM_MODE_VMID
) && (drvdata
->vmid_size
))
339 config
->cfg
|= BIT(7);
341 config
->cfg
&= ~BIT(7);
343 /* bits[10:8], Conditional instruction tracing bit */
344 mode
= ETM_MODE_COND(config
->mode
);
345 if (drvdata
->trccond
== true) {
346 config
->cfg
&= ~(BIT(8) | BIT(9) | BIT(10));
347 config
->cfg
|= mode
<< 8;
350 /* bit[11], Global timestamp tracing bit */
351 if ((config
->mode
& ETMv4_MODE_TIMESTAMP
) && (drvdata
->ts_size
))
352 config
->cfg
|= BIT(11);
354 config
->cfg
&= ~BIT(11);
356 /* bit[12], Return stack enable bit */
357 if ((config
->mode
& ETM_MODE_RETURNSTACK
) &&
358 (drvdata
->retstack
== true))
359 config
->cfg
|= BIT(12);
361 config
->cfg
&= ~BIT(12);
363 /* bits[14:13], Q element enable field */
364 mode
= ETM_MODE_QELEM(config
->mode
);
365 /* start by clearing QE bits */
366 config
->cfg
&= ~(BIT(13) | BIT(14));
367 /* if supported, Q elements with instruction counts are enabled */
368 if ((mode
& BIT(0)) && (drvdata
->q_support
& BIT(0)))
369 config
->cfg
|= BIT(13);
371 * if supported, Q elements with and without instruction
374 if ((mode
& BIT(1)) && (drvdata
->q_support
& BIT(1)))
375 config
->cfg
|= BIT(14);
377 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
378 if ((config
->mode
& ETM_MODE_ATB_TRIGGER
) &&
379 (drvdata
->atbtrig
== true))
380 config
->eventctrl1
|= BIT(11);
382 config
->eventctrl1
&= ~BIT(11);
384 /* bit[12], Low-power state behavior override bit */
385 if ((config
->mode
& ETM_MODE_LPOVERRIDE
) &&
386 (drvdata
->lpoverride
== true))
387 config
->eventctrl1
|= BIT(12);
389 config
->eventctrl1
&= ~BIT(12);
391 /* bit[8], Instruction stall bit */
392 if (config
->mode
& ETM_MODE_ISTALL_EN
)
393 config
->stall_ctrl
|= BIT(8);
395 config
->stall_ctrl
&= ~BIT(8);
397 /* bit[10], Prioritize instruction trace bit */
398 if (config
->mode
& ETM_MODE_INSTPRIO
)
399 config
->stall_ctrl
|= BIT(10);
401 config
->stall_ctrl
&= ~BIT(10);
403 /* bit[13], Trace overflow prevention bit */
404 if ((config
->mode
& ETM_MODE_NOOVERFLOW
) &&
405 (drvdata
->nooverflow
== true))
406 config
->stall_ctrl
|= BIT(13);
408 config
->stall_ctrl
&= ~BIT(13);
410 /* bit[9] Start/stop logic control bit */
411 if (config
->mode
& ETM_MODE_VIEWINST_STARTSTOP
)
412 config
->vinst_ctrl
|= BIT(9);
414 config
->vinst_ctrl
&= ~BIT(9);
416 /* bit[10], Whether a trace unit must trace a Reset exception */
417 if (config
->mode
& ETM_MODE_TRACE_RESET
)
418 config
->vinst_ctrl
|= BIT(10);
420 config
->vinst_ctrl
&= ~BIT(10);
422 /* bit[11], Whether a trace unit must trace a system error exception */
423 if ((config
->mode
& ETM_MODE_TRACE_ERR
) &&
424 (drvdata
->trc_error
== true))
425 config
->vinst_ctrl
|= BIT(11);
427 config
->vinst_ctrl
&= ~BIT(11);
429 if (config
->mode
& (ETM_MODE_EXCL_KERN
| ETM_MODE_EXCL_USER
))
430 etm4_config_trace_mode(config
);
432 spin_unlock(&drvdata
->spinlock
);
436 static DEVICE_ATTR_RW(mode
);
438 static ssize_t
pe_show(struct device
*dev
,
439 struct device_attribute
*attr
,
443 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
444 struct etmv4_config
*config
= &drvdata
->config
;
446 val
= config
->pe_sel
;
447 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
450 static ssize_t
pe_store(struct device
*dev
,
451 struct device_attribute
*attr
,
452 const char *buf
, size_t size
)
455 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
456 struct etmv4_config
*config
= &drvdata
->config
;
458 if (kstrtoul(buf
, 16, &val
))
461 spin_lock(&drvdata
->spinlock
);
462 if (val
> drvdata
->nr_pe
) {
463 spin_unlock(&drvdata
->spinlock
);
467 config
->pe_sel
= val
;
468 spin_unlock(&drvdata
->spinlock
);
471 static DEVICE_ATTR_RW(pe
);
473 static ssize_t
event_show(struct device
*dev
,
474 struct device_attribute
*attr
,
478 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
479 struct etmv4_config
*config
= &drvdata
->config
;
481 val
= config
->eventctrl0
;
482 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
485 static ssize_t
event_store(struct device
*dev
,
486 struct device_attribute
*attr
,
487 const char *buf
, size_t size
)
490 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
491 struct etmv4_config
*config
= &drvdata
->config
;
493 if (kstrtoul(buf
, 16, &val
))
496 spin_lock(&drvdata
->spinlock
);
497 switch (drvdata
->nr_event
) {
499 /* EVENT0, bits[7:0] */
500 config
->eventctrl0
= val
& 0xFF;
503 /* EVENT1, bits[15:8] */
504 config
->eventctrl0
= val
& 0xFFFF;
507 /* EVENT2, bits[23:16] */
508 config
->eventctrl0
= val
& 0xFFFFFF;
511 /* EVENT3, bits[31:24] */
512 config
->eventctrl0
= val
;
517 spin_unlock(&drvdata
->spinlock
);
520 static DEVICE_ATTR_RW(event
);
522 static ssize_t
event_instren_show(struct device
*dev
,
523 struct device_attribute
*attr
,
527 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
528 struct etmv4_config
*config
= &drvdata
->config
;
530 val
= BMVAL(config
->eventctrl1
, 0, 3);
531 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
534 static ssize_t
event_instren_store(struct device
*dev
,
535 struct device_attribute
*attr
,
536 const char *buf
, size_t size
)
539 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
540 struct etmv4_config
*config
= &drvdata
->config
;
542 if (kstrtoul(buf
, 16, &val
))
545 spin_lock(&drvdata
->spinlock
);
546 /* start by clearing all instruction event enable bits */
547 config
->eventctrl1
&= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
548 switch (drvdata
->nr_event
) {
550 /* generate Event element for event 1 */
551 config
->eventctrl1
|= val
& BIT(1);
554 /* generate Event element for event 1 and 2 */
555 config
->eventctrl1
|= val
& (BIT(0) | BIT(1));
558 /* generate Event element for event 1, 2 and 3 */
559 config
->eventctrl1
|= val
& (BIT(0) | BIT(1) | BIT(2));
562 /* generate Event element for all 4 events */
563 config
->eventctrl1
|= val
& 0xF;
568 spin_unlock(&drvdata
->spinlock
);
571 static DEVICE_ATTR_RW(event_instren
);
573 static ssize_t
event_ts_show(struct device
*dev
,
574 struct device_attribute
*attr
,
578 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
579 struct etmv4_config
*config
= &drvdata
->config
;
581 val
= config
->ts_ctrl
;
582 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
585 static ssize_t
event_ts_store(struct device
*dev
,
586 struct device_attribute
*attr
,
587 const char *buf
, size_t size
)
590 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
591 struct etmv4_config
*config
= &drvdata
->config
;
593 if (kstrtoul(buf
, 16, &val
))
595 if (!drvdata
->ts_size
)
598 config
->ts_ctrl
= val
& ETMv4_EVENT_MASK
;
601 static DEVICE_ATTR_RW(event_ts
);
603 static ssize_t
syncfreq_show(struct device
*dev
,
604 struct device_attribute
*attr
,
608 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
609 struct etmv4_config
*config
= &drvdata
->config
;
611 val
= config
->syncfreq
;
612 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
615 static ssize_t
syncfreq_store(struct device
*dev
,
616 struct device_attribute
*attr
,
617 const char *buf
, size_t size
)
620 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
621 struct etmv4_config
*config
= &drvdata
->config
;
623 if (kstrtoul(buf
, 16, &val
))
625 if (drvdata
->syncpr
== true)
628 config
->syncfreq
= val
& ETMv4_SYNC_MASK
;
631 static DEVICE_ATTR_RW(syncfreq
);
633 static ssize_t
cyc_threshold_show(struct device
*dev
,
634 struct device_attribute
*attr
,
638 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
639 struct etmv4_config
*config
= &drvdata
->config
;
641 val
= config
->ccctlr
;
642 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
645 static ssize_t
cyc_threshold_store(struct device
*dev
,
646 struct device_attribute
*attr
,
647 const char *buf
, size_t size
)
650 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
651 struct etmv4_config
*config
= &drvdata
->config
;
653 if (kstrtoul(buf
, 16, &val
))
656 /* mask off max threshold before checking min value */
657 val
&= ETM_CYC_THRESHOLD_MASK
;
658 if (val
< drvdata
->ccitmin
)
661 config
->ccctlr
= val
;
664 static DEVICE_ATTR_RW(cyc_threshold
);
666 static ssize_t
bb_ctrl_show(struct device
*dev
,
667 struct device_attribute
*attr
,
671 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
672 struct etmv4_config
*config
= &drvdata
->config
;
674 val
= config
->bb_ctrl
;
675 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
678 static ssize_t
bb_ctrl_store(struct device
*dev
,
679 struct device_attribute
*attr
,
680 const char *buf
, size_t size
)
683 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
684 struct etmv4_config
*config
= &drvdata
->config
;
686 if (kstrtoul(buf
, 16, &val
))
688 if (drvdata
->trcbb
== false)
690 if (!drvdata
->nr_addr_cmp
)
694 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
695 * individual range comparators. If include then at least 1
696 * range must be selected.
698 if ((val
& BIT(8)) && (BMVAL(val
, 0, 7) == 0))
701 config
->bb_ctrl
= val
& GENMASK(8, 0);
704 static DEVICE_ATTR_RW(bb_ctrl
);
706 static ssize_t
event_vinst_show(struct device
*dev
,
707 struct device_attribute
*attr
,
711 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
712 struct etmv4_config
*config
= &drvdata
->config
;
714 val
= config
->vinst_ctrl
& ETMv4_EVENT_MASK
;
715 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
718 static ssize_t
event_vinst_store(struct device
*dev
,
719 struct device_attribute
*attr
,
720 const char *buf
, size_t size
)
723 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
724 struct etmv4_config
*config
= &drvdata
->config
;
726 if (kstrtoul(buf
, 16, &val
))
729 spin_lock(&drvdata
->spinlock
);
730 val
&= ETMv4_EVENT_MASK
;
731 config
->vinst_ctrl
&= ~ETMv4_EVENT_MASK
;
732 config
->vinst_ctrl
|= val
;
733 spin_unlock(&drvdata
->spinlock
);
736 static DEVICE_ATTR_RW(event_vinst
);
738 static ssize_t
s_exlevel_vinst_show(struct device
*dev
,
739 struct device_attribute
*attr
,
743 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
744 struct etmv4_config
*config
= &drvdata
->config
;
746 val
= (config
->vinst_ctrl
& ETM_EXLEVEL_S_VICTLR_MASK
) >> 16;
747 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
750 static ssize_t
s_exlevel_vinst_store(struct device
*dev
,
751 struct device_attribute
*attr
,
752 const char *buf
, size_t size
)
755 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
756 struct etmv4_config
*config
= &drvdata
->config
;
758 if (kstrtoul(buf
, 16, &val
))
761 spin_lock(&drvdata
->spinlock
);
762 /* clear all EXLEVEL_S bits */
763 config
->vinst_ctrl
&= ~(ETM_EXLEVEL_S_VICTLR_MASK
);
764 /* enable instruction tracing for corresponding exception level */
765 val
&= drvdata
->s_ex_level
;
766 config
->vinst_ctrl
|= (val
<< 16);
767 spin_unlock(&drvdata
->spinlock
);
770 static DEVICE_ATTR_RW(s_exlevel_vinst
);
772 static ssize_t
ns_exlevel_vinst_show(struct device
*dev
,
773 struct device_attribute
*attr
,
777 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
778 struct etmv4_config
*config
= &drvdata
->config
;
780 /* EXLEVEL_NS, bits[23:20] */
781 val
= (config
->vinst_ctrl
& ETM_EXLEVEL_NS_VICTLR_MASK
) >> 20;
782 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
785 static ssize_t
ns_exlevel_vinst_store(struct device
*dev
,
786 struct device_attribute
*attr
,
787 const char *buf
, size_t size
)
790 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
791 struct etmv4_config
*config
= &drvdata
->config
;
793 if (kstrtoul(buf
, 16, &val
))
796 spin_lock(&drvdata
->spinlock
);
797 /* clear EXLEVEL_NS bits */
798 config
->vinst_ctrl
&= ~(ETM_EXLEVEL_NS_VICTLR_MASK
);
799 /* enable instruction tracing for corresponding exception level */
800 val
&= drvdata
->ns_ex_level
;
801 config
->vinst_ctrl
|= (val
<< 20);
802 spin_unlock(&drvdata
->spinlock
);
805 static DEVICE_ATTR_RW(ns_exlevel_vinst
);
807 static ssize_t
addr_idx_show(struct device
*dev
,
808 struct device_attribute
*attr
,
812 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
813 struct etmv4_config
*config
= &drvdata
->config
;
815 val
= config
->addr_idx
;
816 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
819 static ssize_t
addr_idx_store(struct device
*dev
,
820 struct device_attribute
*attr
,
821 const char *buf
, size_t size
)
824 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
825 struct etmv4_config
*config
= &drvdata
->config
;
827 if (kstrtoul(buf
, 16, &val
))
829 if (val
>= drvdata
->nr_addr_cmp
* 2)
833 * Use spinlock to ensure index doesn't change while it gets
834 * dereferenced multiple times within a spinlock block elsewhere.
836 spin_lock(&drvdata
->spinlock
);
837 config
->addr_idx
= val
;
838 spin_unlock(&drvdata
->spinlock
);
841 static DEVICE_ATTR_RW(addr_idx
);
843 static ssize_t
addr_instdatatype_show(struct device
*dev
,
844 struct device_attribute
*attr
,
849 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
850 struct etmv4_config
*config
= &drvdata
->config
;
852 spin_lock(&drvdata
->spinlock
);
853 idx
= config
->addr_idx
;
854 val
= BMVAL(config
->addr_acc
[idx
], 0, 1);
855 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n",
856 val
== ETM_INSTR_ADDR
? "instr" :
857 (val
== ETM_DATA_LOAD_ADDR
? "data_load" :
858 (val
== ETM_DATA_STORE_ADDR
? "data_store" :
859 "data_load_store")));
860 spin_unlock(&drvdata
->spinlock
);
864 static ssize_t
addr_instdatatype_store(struct device
*dev
,
865 struct device_attribute
*attr
,
866 const char *buf
, size_t size
)
870 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
871 struct etmv4_config
*config
= &drvdata
->config
;
873 if (strlen(buf
) >= 20)
875 if (sscanf(buf
, "%s", str
) != 1)
878 spin_lock(&drvdata
->spinlock
);
879 idx
= config
->addr_idx
;
880 if (!strcmp(str
, "instr"))
881 /* TYPE, bits[1:0] */
882 config
->addr_acc
[idx
] &= ~(BIT(0) | BIT(1));
884 spin_unlock(&drvdata
->spinlock
);
887 static DEVICE_ATTR_RW(addr_instdatatype
);
889 static ssize_t
addr_single_show(struct device
*dev
,
890 struct device_attribute
*attr
,
895 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
896 struct etmv4_config
*config
= &drvdata
->config
;
898 idx
= config
->addr_idx
;
899 spin_lock(&drvdata
->spinlock
);
900 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
901 config
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
902 spin_unlock(&drvdata
->spinlock
);
905 val
= (unsigned long)config
->addr_val
[idx
];
906 spin_unlock(&drvdata
->spinlock
);
907 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
910 static ssize_t
addr_single_store(struct device
*dev
,
911 struct device_attribute
*attr
,
912 const char *buf
, size_t size
)
916 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
917 struct etmv4_config
*config
= &drvdata
->config
;
919 if (kstrtoul(buf
, 16, &val
))
922 spin_lock(&drvdata
->spinlock
);
923 idx
= config
->addr_idx
;
924 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
925 config
->addr_type
[idx
] == ETM_ADDR_TYPE_SINGLE
)) {
926 spin_unlock(&drvdata
->spinlock
);
930 config
->addr_val
[idx
] = (u64
)val
;
931 config
->addr_type
[idx
] = ETM_ADDR_TYPE_SINGLE
;
932 spin_unlock(&drvdata
->spinlock
);
935 static DEVICE_ATTR_RW(addr_single
);
937 static ssize_t
addr_range_show(struct device
*dev
,
938 struct device_attribute
*attr
,
942 unsigned long val1
, val2
;
943 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
944 struct etmv4_config
*config
= &drvdata
->config
;
946 spin_lock(&drvdata
->spinlock
);
947 idx
= config
->addr_idx
;
949 spin_unlock(&drvdata
->spinlock
);
952 if (!((config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
953 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
954 (config
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
955 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
956 spin_unlock(&drvdata
->spinlock
);
960 val1
= (unsigned long)config
->addr_val
[idx
];
961 val2
= (unsigned long)config
->addr_val
[idx
+ 1];
962 spin_unlock(&drvdata
->spinlock
);
963 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
966 static ssize_t
addr_range_store(struct device
*dev
,
967 struct device_attribute
*attr
,
968 const char *buf
, size_t size
)
971 unsigned long val1
, val2
;
972 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
973 struct etmv4_config
*config
= &drvdata
->config
;
974 int elements
, exclude
;
976 elements
= sscanf(buf
, "%lx %lx %x", &val1
, &val2
, &exclude
);
978 /* exclude is optional, but need at least two parameter */
981 /* lower address comparator cannot have a higher address value */
985 spin_lock(&drvdata
->spinlock
);
986 idx
= config
->addr_idx
;
988 spin_unlock(&drvdata
->spinlock
);
992 if (!((config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
&&
993 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_NONE
) ||
994 (config
->addr_type
[idx
] == ETM_ADDR_TYPE_RANGE
&&
995 config
->addr_type
[idx
+ 1] == ETM_ADDR_TYPE_RANGE
))) {
996 spin_unlock(&drvdata
->spinlock
);
1000 config
->addr_val
[idx
] = (u64
)val1
;
1001 config
->addr_type
[idx
] = ETM_ADDR_TYPE_RANGE
;
1002 config
->addr_val
[idx
+ 1] = (u64
)val2
;
1003 config
->addr_type
[idx
+ 1] = ETM_ADDR_TYPE_RANGE
;
1005 * Program include or exclude control bits for vinst or vdata
1006 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1007 * use supplied value, or default to bit set in 'mode'
1010 exclude
= config
->mode
& ETM_MODE_EXCLUDE
;
1011 etm4_set_mode_exclude(drvdata
, exclude
? true : false);
1013 spin_unlock(&drvdata
->spinlock
);
1016 static DEVICE_ATTR_RW(addr_range
);
1018 static ssize_t
addr_start_show(struct device
*dev
,
1019 struct device_attribute
*attr
,
1024 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1025 struct etmv4_config
*config
= &drvdata
->config
;
1027 spin_lock(&drvdata
->spinlock
);
1028 idx
= config
->addr_idx
;
1030 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1031 config
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1032 spin_unlock(&drvdata
->spinlock
);
1036 val
= (unsigned long)config
->addr_val
[idx
];
1037 spin_unlock(&drvdata
->spinlock
);
1038 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1041 static ssize_t
addr_start_store(struct device
*dev
,
1042 struct device_attribute
*attr
,
1043 const char *buf
, size_t size
)
1047 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1048 struct etmv4_config
*config
= &drvdata
->config
;
1050 if (kstrtoul(buf
, 16, &val
))
1053 spin_lock(&drvdata
->spinlock
);
1054 idx
= config
->addr_idx
;
1055 if (!drvdata
->nr_addr_cmp
) {
1056 spin_unlock(&drvdata
->spinlock
);
1059 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1060 config
->addr_type
[idx
] == ETM_ADDR_TYPE_START
)) {
1061 spin_unlock(&drvdata
->spinlock
);
1065 config
->addr_val
[idx
] = (u64
)val
;
1066 config
->addr_type
[idx
] = ETM_ADDR_TYPE_START
;
1067 config
->vissctlr
|= BIT(idx
);
1068 spin_unlock(&drvdata
->spinlock
);
1071 static DEVICE_ATTR_RW(addr_start
);
1073 static ssize_t
addr_stop_show(struct device
*dev
,
1074 struct device_attribute
*attr
,
1079 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1080 struct etmv4_config
*config
= &drvdata
->config
;
1082 spin_lock(&drvdata
->spinlock
);
1083 idx
= config
->addr_idx
;
1085 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1086 config
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1087 spin_unlock(&drvdata
->spinlock
);
1091 val
= (unsigned long)config
->addr_val
[idx
];
1092 spin_unlock(&drvdata
->spinlock
);
1093 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1096 static ssize_t
addr_stop_store(struct device
*dev
,
1097 struct device_attribute
*attr
,
1098 const char *buf
, size_t size
)
1102 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1103 struct etmv4_config
*config
= &drvdata
->config
;
1105 if (kstrtoul(buf
, 16, &val
))
1108 spin_lock(&drvdata
->spinlock
);
1109 idx
= config
->addr_idx
;
1110 if (!drvdata
->nr_addr_cmp
) {
1111 spin_unlock(&drvdata
->spinlock
);
1114 if (!(config
->addr_type
[idx
] == ETM_ADDR_TYPE_NONE
||
1115 config
->addr_type
[idx
] == ETM_ADDR_TYPE_STOP
)) {
1116 spin_unlock(&drvdata
->spinlock
);
1120 config
->addr_val
[idx
] = (u64
)val
;
1121 config
->addr_type
[idx
] = ETM_ADDR_TYPE_STOP
;
1122 config
->vissctlr
|= BIT(idx
+ 16);
1123 spin_unlock(&drvdata
->spinlock
);
1126 static DEVICE_ATTR_RW(addr_stop
);
1128 static ssize_t
addr_ctxtype_show(struct device
*dev
,
1129 struct device_attribute
*attr
,
1134 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1135 struct etmv4_config
*config
= &drvdata
->config
;
1137 spin_lock(&drvdata
->spinlock
);
1138 idx
= config
->addr_idx
;
1139 /* CONTEXTTYPE, bits[3:2] */
1140 val
= BMVAL(config
->addr_acc
[idx
], 2, 3);
1141 len
= scnprintf(buf
, PAGE_SIZE
, "%s\n", val
== ETM_CTX_NONE
? "none" :
1142 (val
== ETM_CTX_CTXID
? "ctxid" :
1143 (val
== ETM_CTX_VMID
? "vmid" : "all")));
1144 spin_unlock(&drvdata
->spinlock
);
1148 static ssize_t
addr_ctxtype_store(struct device
*dev
,
1149 struct device_attribute
*attr
,
1150 const char *buf
, size_t size
)
1154 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1155 struct etmv4_config
*config
= &drvdata
->config
;
1157 if (strlen(buf
) >= 10)
1159 if (sscanf(buf
, "%s", str
) != 1)
1162 spin_lock(&drvdata
->spinlock
);
1163 idx
= config
->addr_idx
;
1164 if (!strcmp(str
, "none"))
1165 /* start by clearing context type bits */
1166 config
->addr_acc
[idx
] &= ~(BIT(2) | BIT(3));
1167 else if (!strcmp(str
, "ctxid")) {
1168 /* 0b01 The trace unit performs a Context ID */
1169 if (drvdata
->numcidc
) {
1170 config
->addr_acc
[idx
] |= BIT(2);
1171 config
->addr_acc
[idx
] &= ~BIT(3);
1173 } else if (!strcmp(str
, "vmid")) {
1174 /* 0b10 The trace unit performs a VMID */
1175 if (drvdata
->numvmidc
) {
1176 config
->addr_acc
[idx
] &= ~BIT(2);
1177 config
->addr_acc
[idx
] |= BIT(3);
1179 } else if (!strcmp(str
, "all")) {
1181 * 0b11 The trace unit performs a Context ID
1182 * comparison and a VMID
1184 if (drvdata
->numcidc
)
1185 config
->addr_acc
[idx
] |= BIT(2);
1186 if (drvdata
->numvmidc
)
1187 config
->addr_acc
[idx
] |= BIT(3);
1189 spin_unlock(&drvdata
->spinlock
);
1192 static DEVICE_ATTR_RW(addr_ctxtype
);
1194 static ssize_t
addr_context_show(struct device
*dev
,
1195 struct device_attribute
*attr
,
1200 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1201 struct etmv4_config
*config
= &drvdata
->config
;
1203 spin_lock(&drvdata
->spinlock
);
1204 idx
= config
->addr_idx
;
1205 /* context ID comparator bits[6:4] */
1206 val
= BMVAL(config
->addr_acc
[idx
], 4, 6);
1207 spin_unlock(&drvdata
->spinlock
);
1208 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1211 static ssize_t
addr_context_store(struct device
*dev
,
1212 struct device_attribute
*attr
,
1213 const char *buf
, size_t size
)
1217 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1218 struct etmv4_config
*config
= &drvdata
->config
;
1220 if (kstrtoul(buf
, 16, &val
))
1222 if ((drvdata
->numcidc
<= 1) && (drvdata
->numvmidc
<= 1))
1224 if (val
>= (drvdata
->numcidc
>= drvdata
->numvmidc
?
1225 drvdata
->numcidc
: drvdata
->numvmidc
))
1228 spin_lock(&drvdata
->spinlock
);
1229 idx
= config
->addr_idx
;
1230 /* clear context ID comparator bits[6:4] */
1231 config
->addr_acc
[idx
] &= ~(BIT(4) | BIT(5) | BIT(6));
1232 config
->addr_acc
[idx
] |= (val
<< 4);
1233 spin_unlock(&drvdata
->spinlock
);
1236 static DEVICE_ATTR_RW(addr_context
);
1238 static ssize_t
addr_exlevel_s_ns_show(struct device
*dev
,
1239 struct device_attribute
*attr
,
1244 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1245 struct etmv4_config
*config
= &drvdata
->config
;
1247 spin_lock(&drvdata
->spinlock
);
1248 idx
= config
->addr_idx
;
1249 val
= BMVAL(config
->addr_acc
[idx
], 8, 14);
1250 spin_unlock(&drvdata
->spinlock
);
1251 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1254 static ssize_t
addr_exlevel_s_ns_store(struct device
*dev
,
1255 struct device_attribute
*attr
,
1256 const char *buf
, size_t size
)
1260 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1261 struct etmv4_config
*config
= &drvdata
->config
;
1263 if (kstrtoul(buf
, 0, &val
))
1266 if (val
& ~((GENMASK(14, 8) >> 8)))
1269 spin_lock(&drvdata
->spinlock
);
1270 idx
= config
->addr_idx
;
1271 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1272 config
->addr_acc
[idx
] &= ~(GENMASK(14, 8));
1273 config
->addr_acc
[idx
] |= (val
<< 8);
1274 spin_unlock(&drvdata
->spinlock
);
1277 static DEVICE_ATTR_RW(addr_exlevel_s_ns
);
1279 static const char * const addr_type_names
[] = {
1287 static ssize_t
addr_cmp_view_show(struct device
*dev
,
1288 struct device_attribute
*attr
, char *buf
)
1291 unsigned long addr_v
, addr_v2
, addr_ctrl
;
1292 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1293 struct etmv4_config
*config
= &drvdata
->config
;
1295 bool exclude
= false;
1297 spin_lock(&drvdata
->spinlock
);
1298 idx
= config
->addr_idx
;
1299 addr_v
= config
->addr_val
[idx
];
1300 addr_ctrl
= config
->addr_acc
[idx
];
1301 addr_type
= config
->addr_type
[idx
];
1302 if (addr_type
== ETM_ADDR_TYPE_RANGE
) {
1306 addr_v
= config
->addr_val
[idx
];
1308 addr_v2
= config
->addr_val
[idx
+ 1];
1310 exclude
= config
->viiectlr
& BIT(idx
/ 2 + 16);
1312 spin_unlock(&drvdata
->spinlock
);
1314 size
= scnprintf(buf
, PAGE_SIZE
, "addr_cmp[%i] %s %#lx", idx
,
1315 addr_type_names
[addr_type
], addr_v
);
1316 if (addr_type
== ETM_ADDR_TYPE_RANGE
) {
1317 size
+= scnprintf(buf
+ size
, PAGE_SIZE
- size
,
1318 " %#lx %s", addr_v2
,
1319 exclude
? "exclude" : "include");
1321 size
+= scnprintf(buf
+ size
, PAGE_SIZE
- size
,
1322 " ctrl(%#lx)\n", addr_ctrl
);
1324 size
= scnprintf(buf
, PAGE_SIZE
, "addr_cmp[%i] unused\n", idx
);
1328 static DEVICE_ATTR_RO(addr_cmp_view
);
1330 static ssize_t
vinst_pe_cmp_start_stop_show(struct device
*dev
,
1331 struct device_attribute
*attr
,
1335 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1336 struct etmv4_config
*config
= &drvdata
->config
;
1338 if (!drvdata
->nr_pe_cmp
)
1340 val
= config
->vipcssctlr
;
1341 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1343 static ssize_t
vinst_pe_cmp_start_stop_store(struct device
*dev
,
1344 struct device_attribute
*attr
,
1345 const char *buf
, size_t size
)
1348 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1349 struct etmv4_config
*config
= &drvdata
->config
;
1351 if (kstrtoul(buf
, 16, &val
))
1353 if (!drvdata
->nr_pe_cmp
)
1356 spin_lock(&drvdata
->spinlock
);
1357 config
->vipcssctlr
= val
;
1358 spin_unlock(&drvdata
->spinlock
);
1361 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop
);
1363 static ssize_t
seq_idx_show(struct device
*dev
,
1364 struct device_attribute
*attr
,
1368 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1369 struct etmv4_config
*config
= &drvdata
->config
;
1371 val
= config
->seq_idx
;
1372 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1375 static ssize_t
seq_idx_store(struct device
*dev
,
1376 struct device_attribute
*attr
,
1377 const char *buf
, size_t size
)
1380 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1381 struct etmv4_config
*config
= &drvdata
->config
;
1383 if (kstrtoul(buf
, 16, &val
))
1385 if (val
>= drvdata
->nrseqstate
- 1)
1389 * Use spinlock to ensure index doesn't change while it gets
1390 * dereferenced multiple times within a spinlock block elsewhere.
1392 spin_lock(&drvdata
->spinlock
);
1393 config
->seq_idx
= val
;
1394 spin_unlock(&drvdata
->spinlock
);
1397 static DEVICE_ATTR_RW(seq_idx
);
1399 static ssize_t
seq_state_show(struct device
*dev
,
1400 struct device_attribute
*attr
,
1404 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1405 struct etmv4_config
*config
= &drvdata
->config
;
1407 val
= config
->seq_state
;
1408 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1411 static ssize_t
seq_state_store(struct device
*dev
,
1412 struct device_attribute
*attr
,
1413 const char *buf
, size_t size
)
1416 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1417 struct etmv4_config
*config
= &drvdata
->config
;
1419 if (kstrtoul(buf
, 16, &val
))
1421 if (val
>= drvdata
->nrseqstate
)
1424 config
->seq_state
= val
;
1427 static DEVICE_ATTR_RW(seq_state
);
1429 static ssize_t
seq_event_show(struct device
*dev
,
1430 struct device_attribute
*attr
,
1435 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1436 struct etmv4_config
*config
= &drvdata
->config
;
1438 spin_lock(&drvdata
->spinlock
);
1439 idx
= config
->seq_idx
;
1440 val
= config
->seq_ctrl
[idx
];
1441 spin_unlock(&drvdata
->spinlock
);
1442 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1445 static ssize_t
seq_event_store(struct device
*dev
,
1446 struct device_attribute
*attr
,
1447 const char *buf
, size_t size
)
1451 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1452 struct etmv4_config
*config
= &drvdata
->config
;
1454 if (kstrtoul(buf
, 16, &val
))
1457 spin_lock(&drvdata
->spinlock
);
1458 idx
= config
->seq_idx
;
1459 /* Seq control has two masks B[15:8] F[7:0] */
1460 config
->seq_ctrl
[idx
] = val
& 0xFFFF;
1461 spin_unlock(&drvdata
->spinlock
);
1464 static DEVICE_ATTR_RW(seq_event
);
1466 static ssize_t
seq_reset_event_show(struct device
*dev
,
1467 struct device_attribute
*attr
,
1471 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1472 struct etmv4_config
*config
= &drvdata
->config
;
1474 val
= config
->seq_rst
;
1475 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1478 static ssize_t
seq_reset_event_store(struct device
*dev
,
1479 struct device_attribute
*attr
,
1480 const char *buf
, size_t size
)
1483 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1484 struct etmv4_config
*config
= &drvdata
->config
;
1486 if (kstrtoul(buf
, 16, &val
))
1488 if (!(drvdata
->nrseqstate
))
1491 config
->seq_rst
= val
& ETMv4_EVENT_MASK
;
1494 static DEVICE_ATTR_RW(seq_reset_event
);
1496 static ssize_t
cntr_idx_show(struct device
*dev
,
1497 struct device_attribute
*attr
,
1501 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1502 struct etmv4_config
*config
= &drvdata
->config
;
1504 val
= config
->cntr_idx
;
1505 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1508 static ssize_t
cntr_idx_store(struct device
*dev
,
1509 struct device_attribute
*attr
,
1510 const char *buf
, size_t size
)
1513 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1514 struct etmv4_config
*config
= &drvdata
->config
;
1516 if (kstrtoul(buf
, 16, &val
))
1518 if (val
>= drvdata
->nr_cntr
)
1522 * Use spinlock to ensure index doesn't change while it gets
1523 * dereferenced multiple times within a spinlock block elsewhere.
1525 spin_lock(&drvdata
->spinlock
);
1526 config
->cntr_idx
= val
;
1527 spin_unlock(&drvdata
->spinlock
);
1530 static DEVICE_ATTR_RW(cntr_idx
);
1532 static ssize_t
cntrldvr_show(struct device
*dev
,
1533 struct device_attribute
*attr
,
1538 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1539 struct etmv4_config
*config
= &drvdata
->config
;
1541 spin_lock(&drvdata
->spinlock
);
1542 idx
= config
->cntr_idx
;
1543 val
= config
->cntrldvr
[idx
];
1544 spin_unlock(&drvdata
->spinlock
);
1545 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1548 static ssize_t
cntrldvr_store(struct device
*dev
,
1549 struct device_attribute
*attr
,
1550 const char *buf
, size_t size
)
1554 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1555 struct etmv4_config
*config
= &drvdata
->config
;
1557 if (kstrtoul(buf
, 16, &val
))
1559 if (val
> ETM_CNTR_MAX_VAL
)
1562 spin_lock(&drvdata
->spinlock
);
1563 idx
= config
->cntr_idx
;
1564 config
->cntrldvr
[idx
] = val
;
1565 spin_unlock(&drvdata
->spinlock
);
1568 static DEVICE_ATTR_RW(cntrldvr
);
1570 static ssize_t
cntr_val_show(struct device
*dev
,
1571 struct device_attribute
*attr
,
1576 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1577 struct etmv4_config
*config
= &drvdata
->config
;
1579 spin_lock(&drvdata
->spinlock
);
1580 idx
= config
->cntr_idx
;
1581 val
= config
->cntr_val
[idx
];
1582 spin_unlock(&drvdata
->spinlock
);
1583 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1586 static ssize_t
cntr_val_store(struct device
*dev
,
1587 struct device_attribute
*attr
,
1588 const char *buf
, size_t size
)
1592 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1593 struct etmv4_config
*config
= &drvdata
->config
;
1595 if (kstrtoul(buf
, 16, &val
))
1597 if (val
> ETM_CNTR_MAX_VAL
)
1600 spin_lock(&drvdata
->spinlock
);
1601 idx
= config
->cntr_idx
;
1602 config
->cntr_val
[idx
] = val
;
1603 spin_unlock(&drvdata
->spinlock
);
1606 static DEVICE_ATTR_RW(cntr_val
);
1608 static ssize_t
cntr_ctrl_show(struct device
*dev
,
1609 struct device_attribute
*attr
,
1614 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1615 struct etmv4_config
*config
= &drvdata
->config
;
1617 spin_lock(&drvdata
->spinlock
);
1618 idx
= config
->cntr_idx
;
1619 val
= config
->cntr_ctrl
[idx
];
1620 spin_unlock(&drvdata
->spinlock
);
1621 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1624 static ssize_t
cntr_ctrl_store(struct device
*dev
,
1625 struct device_attribute
*attr
,
1626 const char *buf
, size_t size
)
1630 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1631 struct etmv4_config
*config
= &drvdata
->config
;
1633 if (kstrtoul(buf
, 16, &val
))
1636 spin_lock(&drvdata
->spinlock
);
1637 idx
= config
->cntr_idx
;
1638 config
->cntr_ctrl
[idx
] = val
;
1639 spin_unlock(&drvdata
->spinlock
);
1642 static DEVICE_ATTR_RW(cntr_ctrl
);
1644 static ssize_t
res_idx_show(struct device
*dev
,
1645 struct device_attribute
*attr
,
1649 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1650 struct etmv4_config
*config
= &drvdata
->config
;
1652 val
= config
->res_idx
;
1653 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1656 static ssize_t
res_idx_store(struct device
*dev
,
1657 struct device_attribute
*attr
,
1658 const char *buf
, size_t size
)
1661 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1662 struct etmv4_config
*config
= &drvdata
->config
;
1664 if (kstrtoul(buf
, 16, &val
))
1667 * Resource selector pair 0 is always implemented and reserved,
1668 * namely an idx with 0 and 1 is illegal.
1670 if ((val
< 2) || (val
>= 2 * drvdata
->nr_resource
))
1674 * Use spinlock to ensure index doesn't change while it gets
1675 * dereferenced multiple times within a spinlock block elsewhere.
1677 spin_lock(&drvdata
->spinlock
);
1678 config
->res_idx
= val
;
1679 spin_unlock(&drvdata
->spinlock
);
1682 static DEVICE_ATTR_RW(res_idx
);
1684 static ssize_t
res_ctrl_show(struct device
*dev
,
1685 struct device_attribute
*attr
,
1690 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1691 struct etmv4_config
*config
= &drvdata
->config
;
1693 spin_lock(&drvdata
->spinlock
);
1694 idx
= config
->res_idx
;
1695 val
= config
->res_ctrl
[idx
];
1696 spin_unlock(&drvdata
->spinlock
);
1697 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1700 static ssize_t
res_ctrl_store(struct device
*dev
,
1701 struct device_attribute
*attr
,
1702 const char *buf
, size_t size
)
1706 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1707 struct etmv4_config
*config
= &drvdata
->config
;
1709 if (kstrtoul(buf
, 16, &val
))
1712 spin_lock(&drvdata
->spinlock
);
1713 idx
= config
->res_idx
;
1714 /* For odd idx pair inversal bit is RES0 */
1716 /* PAIRINV, bit[21] */
1718 config
->res_ctrl
[idx
] = val
& GENMASK(21, 0);
1719 spin_unlock(&drvdata
->spinlock
);
1722 static DEVICE_ATTR_RW(res_ctrl
);
1724 static ssize_t
sshot_idx_show(struct device
*dev
,
1725 struct device_attribute
*attr
, char *buf
)
1728 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1729 struct etmv4_config
*config
= &drvdata
->config
;
1731 val
= config
->ss_idx
;
1732 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1735 static ssize_t
sshot_idx_store(struct device
*dev
,
1736 struct device_attribute
*attr
,
1737 const char *buf
, size_t size
)
1740 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1741 struct etmv4_config
*config
= &drvdata
->config
;
1743 if (kstrtoul(buf
, 16, &val
))
1745 if (val
>= drvdata
->nr_ss_cmp
)
1748 spin_lock(&drvdata
->spinlock
);
1749 config
->ss_idx
= val
;
1750 spin_unlock(&drvdata
->spinlock
);
1753 static DEVICE_ATTR_RW(sshot_idx
);
1755 static ssize_t
sshot_ctrl_show(struct device
*dev
,
1756 struct device_attribute
*attr
,
1760 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1761 struct etmv4_config
*config
= &drvdata
->config
;
1763 spin_lock(&drvdata
->spinlock
);
1764 val
= config
->ss_ctrl
[config
->ss_idx
];
1765 spin_unlock(&drvdata
->spinlock
);
1766 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1769 static ssize_t
sshot_ctrl_store(struct device
*dev
,
1770 struct device_attribute
*attr
,
1771 const char *buf
, size_t size
)
1775 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1776 struct etmv4_config
*config
= &drvdata
->config
;
1778 if (kstrtoul(buf
, 16, &val
))
1781 spin_lock(&drvdata
->spinlock
);
1782 idx
= config
->ss_idx
;
1783 config
->ss_ctrl
[idx
] = val
& GENMASK(24, 0);
1784 /* must clear bit 31 in related status register on programming */
1785 config
->ss_status
[idx
] &= ~BIT(31);
1786 spin_unlock(&drvdata
->spinlock
);
1789 static DEVICE_ATTR_RW(sshot_ctrl
);
1791 static ssize_t
sshot_status_show(struct device
*dev
,
1792 struct device_attribute
*attr
, char *buf
)
1795 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1796 struct etmv4_config
*config
= &drvdata
->config
;
1798 spin_lock(&drvdata
->spinlock
);
1799 val
= config
->ss_status
[config
->ss_idx
];
1800 spin_unlock(&drvdata
->spinlock
);
1801 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1803 static DEVICE_ATTR_RO(sshot_status
);
1805 static ssize_t
sshot_pe_ctrl_show(struct device
*dev
,
1806 struct device_attribute
*attr
,
1810 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1811 struct etmv4_config
*config
= &drvdata
->config
;
1813 spin_lock(&drvdata
->spinlock
);
1814 val
= config
->ss_pe_cmp
[config
->ss_idx
];
1815 spin_unlock(&drvdata
->spinlock
);
1816 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1819 static ssize_t
sshot_pe_ctrl_store(struct device
*dev
,
1820 struct device_attribute
*attr
,
1821 const char *buf
, size_t size
)
1825 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1826 struct etmv4_config
*config
= &drvdata
->config
;
1828 if (kstrtoul(buf
, 16, &val
))
1831 spin_lock(&drvdata
->spinlock
);
1832 idx
= config
->ss_idx
;
1833 config
->ss_pe_cmp
[idx
] = val
& GENMASK(7, 0);
1834 /* must clear bit 31 in related status register on programming */
1835 config
->ss_status
[idx
] &= ~BIT(31);
1836 spin_unlock(&drvdata
->spinlock
);
1839 static DEVICE_ATTR_RW(sshot_pe_ctrl
);
1841 static ssize_t
ctxid_idx_show(struct device
*dev
,
1842 struct device_attribute
*attr
,
1846 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1847 struct etmv4_config
*config
= &drvdata
->config
;
1849 val
= config
->ctxid_idx
;
1850 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1853 static ssize_t
ctxid_idx_store(struct device
*dev
,
1854 struct device_attribute
*attr
,
1855 const char *buf
, size_t size
)
1858 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1859 struct etmv4_config
*config
= &drvdata
->config
;
1861 if (kstrtoul(buf
, 16, &val
))
1863 if (val
>= drvdata
->numcidc
)
1867 * Use spinlock to ensure index doesn't change while it gets
1868 * dereferenced multiple times within a spinlock block elsewhere.
1870 spin_lock(&drvdata
->spinlock
);
1871 config
->ctxid_idx
= val
;
1872 spin_unlock(&drvdata
->spinlock
);
1875 static DEVICE_ATTR_RW(ctxid_idx
);
1877 static ssize_t
ctxid_pid_show(struct device
*dev
,
1878 struct device_attribute
*attr
,
1883 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1884 struct etmv4_config
*config
= &drvdata
->config
;
1887 * Don't use contextID tracing if coming from a PID namespace. See
1888 * comment in ctxid_pid_store().
1890 if (task_active_pid_ns(current
) != &init_pid_ns
)
1893 spin_lock(&drvdata
->spinlock
);
1894 idx
= config
->ctxid_idx
;
1895 val
= (unsigned long)config
->ctxid_pid
[idx
];
1896 spin_unlock(&drvdata
->spinlock
);
1897 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
1900 static ssize_t
ctxid_pid_store(struct device
*dev
,
1901 struct device_attribute
*attr
,
1902 const char *buf
, size_t size
)
1906 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1907 struct etmv4_config
*config
= &drvdata
->config
;
1910 * When contextID tracing is enabled the tracers will insert the
1911 * value found in the contextID register in the trace stream. But if
1912 * a process is in a namespace the PID of that process as seen from the
1913 * namespace won't be what the kernel sees, something that makes the
1914 * feature confusing and can potentially leak kernel only information.
1915 * As such refuse to use the feature if @current is not in the initial
1918 if (task_active_pid_ns(current
) != &init_pid_ns
)
1922 * only implemented when ctxid tracing is enabled, i.e. at least one
1923 * ctxid comparator is implemented and ctxid is greater than 0 bits
1926 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1928 if (kstrtoul(buf
, 16, &pid
))
1931 spin_lock(&drvdata
->spinlock
);
1932 idx
= config
->ctxid_idx
;
1933 config
->ctxid_pid
[idx
] = (u64
)pid
;
1934 spin_unlock(&drvdata
->spinlock
);
1937 static DEVICE_ATTR_RW(ctxid_pid
);
1939 static ssize_t
ctxid_masks_show(struct device
*dev
,
1940 struct device_attribute
*attr
,
1943 unsigned long val1
, val2
;
1944 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1945 struct etmv4_config
*config
= &drvdata
->config
;
1948 * Don't use contextID tracing if coming from a PID namespace. See
1949 * comment in ctxid_pid_store().
1951 if (task_active_pid_ns(current
) != &init_pid_ns
)
1954 spin_lock(&drvdata
->spinlock
);
1955 val1
= config
->ctxid_mask0
;
1956 val2
= config
->ctxid_mask1
;
1957 spin_unlock(&drvdata
->spinlock
);
1958 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
1961 static ssize_t
ctxid_masks_store(struct device
*dev
,
1962 struct device_attribute
*attr
,
1963 const char *buf
, size_t size
)
1966 unsigned long val1
, val2
, mask
;
1967 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
1968 struct etmv4_config
*config
= &drvdata
->config
;
1972 * Don't use contextID tracing if coming from a PID namespace. See
1973 * comment in ctxid_pid_store().
1975 if (task_active_pid_ns(current
) != &init_pid_ns
)
1979 * only implemented when ctxid tracing is enabled, i.e. at least one
1980 * ctxid comparator is implemented and ctxid is greater than 0 bits
1983 if (!drvdata
->ctxid_size
|| !drvdata
->numcidc
)
1985 /* one mask if <= 4 comparators, two for up to 8 */
1986 nr_inputs
= sscanf(buf
, "%lx %lx", &val1
, &val2
);
1987 if ((drvdata
->numcidc
> 4) && (nr_inputs
!= 2))
1990 spin_lock(&drvdata
->spinlock
);
1992 * each byte[0..3] controls mask value applied to ctxid
1995 switch (drvdata
->numcidc
) {
1997 /* COMP0, bits[7:0] */
1998 config
->ctxid_mask0
= val1
& 0xFF;
2001 /* COMP1, bits[15:8] */
2002 config
->ctxid_mask0
= val1
& 0xFFFF;
2005 /* COMP2, bits[23:16] */
2006 config
->ctxid_mask0
= val1
& 0xFFFFFF;
2009 /* COMP3, bits[31:24] */
2010 config
->ctxid_mask0
= val1
;
2013 /* COMP4, bits[7:0] */
2014 config
->ctxid_mask0
= val1
;
2015 config
->ctxid_mask1
= val2
& 0xFF;
2018 /* COMP5, bits[15:8] */
2019 config
->ctxid_mask0
= val1
;
2020 config
->ctxid_mask1
= val2
& 0xFFFF;
2023 /* COMP6, bits[23:16] */
2024 config
->ctxid_mask0
= val1
;
2025 config
->ctxid_mask1
= val2
& 0xFFFFFF;
2028 /* COMP7, bits[31:24] */
2029 config
->ctxid_mask0
= val1
;
2030 config
->ctxid_mask1
= val2
;
2036 * If software sets a mask bit to 1, it must program relevant byte
2037 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2038 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2039 * of ctxid comparator0 value (corresponding to byte 0) register.
2041 mask
= config
->ctxid_mask0
;
2042 for (i
= 0; i
< drvdata
->numcidc
; i
++) {
2043 /* mask value of corresponding ctxid comparator */
2044 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2046 * each bit corresponds to a byte of respective ctxid comparator
2049 for (j
= 0; j
< 8; j
++) {
2051 config
->ctxid_pid
[i
] &= ~(0xFFUL
<< (j
* 8));
2054 /* Select the next ctxid comparator mask value */
2056 /* ctxid comparators[4-7] */
2057 mask
= config
->ctxid_mask1
;
2062 spin_unlock(&drvdata
->spinlock
);
2065 static DEVICE_ATTR_RW(ctxid_masks
);
2067 static ssize_t
vmid_idx_show(struct device
*dev
,
2068 struct device_attribute
*attr
,
2072 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2073 struct etmv4_config
*config
= &drvdata
->config
;
2075 val
= config
->vmid_idx
;
2076 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2079 static ssize_t
vmid_idx_store(struct device
*dev
,
2080 struct device_attribute
*attr
,
2081 const char *buf
, size_t size
)
2084 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2085 struct etmv4_config
*config
= &drvdata
->config
;
2087 if (kstrtoul(buf
, 16, &val
))
2089 if (val
>= drvdata
->numvmidc
)
2093 * Use spinlock to ensure index doesn't change while it gets
2094 * dereferenced multiple times within a spinlock block elsewhere.
2096 spin_lock(&drvdata
->spinlock
);
2097 config
->vmid_idx
= val
;
2098 spin_unlock(&drvdata
->spinlock
);
2101 static DEVICE_ATTR_RW(vmid_idx
);
2103 static ssize_t
vmid_val_show(struct device
*dev
,
2104 struct device_attribute
*attr
,
2108 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2109 struct etmv4_config
*config
= &drvdata
->config
;
2111 val
= (unsigned long)config
->vmid_val
[config
->vmid_idx
];
2112 return scnprintf(buf
, PAGE_SIZE
, "%#lx\n", val
);
2115 static ssize_t
vmid_val_store(struct device
*dev
,
2116 struct device_attribute
*attr
,
2117 const char *buf
, size_t size
)
2120 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2121 struct etmv4_config
*config
= &drvdata
->config
;
2124 * only implemented when vmid tracing is enabled, i.e. at least one
2125 * vmid comparator is implemented and at least 8 bit vmid size
2127 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2129 if (kstrtoul(buf
, 16, &val
))
2132 spin_lock(&drvdata
->spinlock
);
2133 config
->vmid_val
[config
->vmid_idx
] = (u64
)val
;
2134 spin_unlock(&drvdata
->spinlock
);
2137 static DEVICE_ATTR_RW(vmid_val
);
2139 static ssize_t
vmid_masks_show(struct device
*dev
,
2140 struct device_attribute
*attr
, char *buf
)
2142 unsigned long val1
, val2
;
2143 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2144 struct etmv4_config
*config
= &drvdata
->config
;
2146 spin_lock(&drvdata
->spinlock
);
2147 val1
= config
->vmid_mask0
;
2148 val2
= config
->vmid_mask1
;
2149 spin_unlock(&drvdata
->spinlock
);
2150 return scnprintf(buf
, PAGE_SIZE
, "%#lx %#lx\n", val1
, val2
);
2153 static ssize_t
vmid_masks_store(struct device
*dev
,
2154 struct device_attribute
*attr
,
2155 const char *buf
, size_t size
)
2158 unsigned long val1
, val2
, mask
;
2159 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2160 struct etmv4_config
*config
= &drvdata
->config
;
2164 * only implemented when vmid tracing is enabled, i.e. at least one
2165 * vmid comparator is implemented and at least 8 bit vmid size
2167 if (!drvdata
->vmid_size
|| !drvdata
->numvmidc
)
2169 /* one mask if <= 4 comparators, two for up to 8 */
2170 nr_inputs
= sscanf(buf
, "%lx %lx", &val1
, &val2
);
2171 if ((drvdata
->numvmidc
> 4) && (nr_inputs
!= 2))
2174 spin_lock(&drvdata
->spinlock
);
2177 * each byte[0..3] controls mask value applied to vmid
2180 switch (drvdata
->numvmidc
) {
2182 /* COMP0, bits[7:0] */
2183 config
->vmid_mask0
= val1
& 0xFF;
2186 /* COMP1, bits[15:8] */
2187 config
->vmid_mask0
= val1
& 0xFFFF;
2190 /* COMP2, bits[23:16] */
2191 config
->vmid_mask0
= val1
& 0xFFFFFF;
2194 /* COMP3, bits[31:24] */
2195 config
->vmid_mask0
= val1
;
2198 /* COMP4, bits[7:0] */
2199 config
->vmid_mask0
= val1
;
2200 config
->vmid_mask1
= val2
& 0xFF;
2203 /* COMP5, bits[15:8] */
2204 config
->vmid_mask0
= val1
;
2205 config
->vmid_mask1
= val2
& 0xFFFF;
2208 /* COMP6, bits[23:16] */
2209 config
->vmid_mask0
= val1
;
2210 config
->vmid_mask1
= val2
& 0xFFFFFF;
2213 /* COMP7, bits[31:24] */
2214 config
->vmid_mask0
= val1
;
2215 config
->vmid_mask1
= val2
;
2222 * If software sets a mask bit to 1, it must program relevant byte
2223 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2224 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2225 * of vmid comparator0 value (corresponding to byte 0) register.
2227 mask
= config
->vmid_mask0
;
2228 for (i
= 0; i
< drvdata
->numvmidc
; i
++) {
2229 /* mask value of corresponding vmid comparator */
2230 maskbyte
= mask
& ETMv4_EVENT_MASK
;
2232 * each bit corresponds to a byte of respective vmid comparator
2235 for (j
= 0; j
< 8; j
++) {
2237 config
->vmid_val
[i
] &= ~(0xFFUL
<< (j
* 8));
2240 /* Select the next vmid comparator mask value */
2242 /* vmid comparators[4-7] */
2243 mask
= config
->vmid_mask1
;
2247 spin_unlock(&drvdata
->spinlock
);
2250 static DEVICE_ATTR_RW(vmid_masks
);
2252 static ssize_t
cpu_show(struct device
*dev
,
2253 struct device_attribute
*attr
, char *buf
)
2256 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
->parent
);
2259 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
2262 static DEVICE_ATTR_RO(cpu
);
2264 static struct attribute
*coresight_etmv4_attrs
[] = {
2265 &dev_attr_nr_pe_cmp
.attr
,
2266 &dev_attr_nr_addr_cmp
.attr
,
2267 &dev_attr_nr_cntr
.attr
,
2268 &dev_attr_nr_ext_inp
.attr
,
2269 &dev_attr_numcidc
.attr
,
2270 &dev_attr_numvmidc
.attr
,
2271 &dev_attr_nrseqstate
.attr
,
2272 &dev_attr_nr_resource
.attr
,
2273 &dev_attr_nr_ss_cmp
.attr
,
2274 &dev_attr_reset
.attr
,
2275 &dev_attr_mode
.attr
,
2277 &dev_attr_event
.attr
,
2278 &dev_attr_event_instren
.attr
,
2279 &dev_attr_event_ts
.attr
,
2280 &dev_attr_syncfreq
.attr
,
2281 &dev_attr_cyc_threshold
.attr
,
2282 &dev_attr_bb_ctrl
.attr
,
2283 &dev_attr_event_vinst
.attr
,
2284 &dev_attr_s_exlevel_vinst
.attr
,
2285 &dev_attr_ns_exlevel_vinst
.attr
,
2286 &dev_attr_addr_idx
.attr
,
2287 &dev_attr_addr_instdatatype
.attr
,
2288 &dev_attr_addr_single
.attr
,
2289 &dev_attr_addr_range
.attr
,
2290 &dev_attr_addr_start
.attr
,
2291 &dev_attr_addr_stop
.attr
,
2292 &dev_attr_addr_ctxtype
.attr
,
2293 &dev_attr_addr_context
.attr
,
2294 &dev_attr_addr_exlevel_s_ns
.attr
,
2295 &dev_attr_addr_cmp_view
.attr
,
2296 &dev_attr_vinst_pe_cmp_start_stop
.attr
,
2297 &dev_attr_sshot_idx
.attr
,
2298 &dev_attr_sshot_ctrl
.attr
,
2299 &dev_attr_sshot_pe_ctrl
.attr
,
2300 &dev_attr_sshot_status
.attr
,
2301 &dev_attr_seq_idx
.attr
,
2302 &dev_attr_seq_state
.attr
,
2303 &dev_attr_seq_event
.attr
,
2304 &dev_attr_seq_reset_event
.attr
,
2305 &dev_attr_cntr_idx
.attr
,
2306 &dev_attr_cntrldvr
.attr
,
2307 &dev_attr_cntr_val
.attr
,
2308 &dev_attr_cntr_ctrl
.attr
,
2309 &dev_attr_res_idx
.attr
,
2310 &dev_attr_res_ctrl
.attr
,
2311 &dev_attr_ctxid_idx
.attr
,
2312 &dev_attr_ctxid_pid
.attr
,
2313 &dev_attr_ctxid_masks
.attr
,
2314 &dev_attr_vmid_idx
.attr
,
2315 &dev_attr_vmid_val
.attr
,
2316 &dev_attr_vmid_masks
.attr
,
2326 static void do_smp_cross_read(void *data
)
2328 struct etmv4_reg
*reg
= data
;
2330 reg
->data
= readl_relaxed(reg
->addr
);
2333 static u32
etmv4_cross_read(const struct device
*dev
, u32 offset
)
2335 struct etmv4_drvdata
*drvdata
= dev_get_drvdata(dev
);
2336 struct etmv4_reg reg
;
2338 reg
.addr
= drvdata
->base
+ offset
;
2340 * smp cross call ensures the CPU will be powered up before
2341 * accessing the ETMv4 trace core registers
2343 smp_call_function_single(drvdata
->cpu
, do_smp_cross_read
, ®
, 1);
2347 #define coresight_etm4x_reg(name, offset) \
2348 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2350 #define coresight_etm4x_cross_read(name, offset) \
2351 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2354 coresight_etm4x_reg(trcpdcr
, TRCPDCR
);
2355 coresight_etm4x_reg(trcpdsr
, TRCPDSR
);
2356 coresight_etm4x_reg(trclsr
, TRCLSR
);
2357 coresight_etm4x_reg(trcauthstatus
, TRCAUTHSTATUS
);
2358 coresight_etm4x_reg(trcdevid
, TRCDEVID
);
2359 coresight_etm4x_reg(trcdevtype
, TRCDEVTYPE
);
2360 coresight_etm4x_reg(trcpidr0
, TRCPIDR0
);
2361 coresight_etm4x_reg(trcpidr1
, TRCPIDR1
);
2362 coresight_etm4x_reg(trcpidr2
, TRCPIDR2
);
2363 coresight_etm4x_reg(trcpidr3
, TRCPIDR3
);
2364 coresight_etm4x_cross_read(trcoslsr
, TRCOSLSR
);
2365 coresight_etm4x_cross_read(trcconfig
, TRCCONFIGR
);
2366 coresight_etm4x_cross_read(trctraceid
, TRCTRACEIDR
);
2368 static struct attribute
*coresight_etmv4_mgmt_attrs
[] = {
2369 &dev_attr_trcoslsr
.attr
,
2370 &dev_attr_trcpdcr
.attr
,
2371 &dev_attr_trcpdsr
.attr
,
2372 &dev_attr_trclsr
.attr
,
2373 &dev_attr_trcconfig
.attr
,
2374 &dev_attr_trctraceid
.attr
,
2375 &dev_attr_trcauthstatus
.attr
,
2376 &dev_attr_trcdevid
.attr
,
2377 &dev_attr_trcdevtype
.attr
,
2378 &dev_attr_trcpidr0
.attr
,
2379 &dev_attr_trcpidr1
.attr
,
2380 &dev_attr_trcpidr2
.attr
,
2381 &dev_attr_trcpidr3
.attr
,
2385 coresight_etm4x_cross_read(trcidr0
, TRCIDR0
);
2386 coresight_etm4x_cross_read(trcidr1
, TRCIDR1
);
2387 coresight_etm4x_cross_read(trcidr2
, TRCIDR2
);
2388 coresight_etm4x_cross_read(trcidr3
, TRCIDR3
);
2389 coresight_etm4x_cross_read(trcidr4
, TRCIDR4
);
2390 coresight_etm4x_cross_read(trcidr5
, TRCIDR5
);
2391 /* trcidr[6,7] are reserved */
2392 coresight_etm4x_cross_read(trcidr8
, TRCIDR8
);
2393 coresight_etm4x_cross_read(trcidr9
, TRCIDR9
);
2394 coresight_etm4x_cross_read(trcidr10
, TRCIDR10
);
2395 coresight_etm4x_cross_read(trcidr11
, TRCIDR11
);
2396 coresight_etm4x_cross_read(trcidr12
, TRCIDR12
);
2397 coresight_etm4x_cross_read(trcidr13
, TRCIDR13
);
2399 static struct attribute
*coresight_etmv4_trcidr_attrs
[] = {
2400 &dev_attr_trcidr0
.attr
,
2401 &dev_attr_trcidr1
.attr
,
2402 &dev_attr_trcidr2
.attr
,
2403 &dev_attr_trcidr3
.attr
,
2404 &dev_attr_trcidr4
.attr
,
2405 &dev_attr_trcidr5
.attr
,
2406 /* trcidr[6,7] are reserved */
2407 &dev_attr_trcidr8
.attr
,
2408 &dev_attr_trcidr9
.attr
,
2409 &dev_attr_trcidr10
.attr
,
2410 &dev_attr_trcidr11
.attr
,
2411 &dev_attr_trcidr12
.attr
,
2412 &dev_attr_trcidr13
.attr
,
2416 static const struct attribute_group coresight_etmv4_group
= {
2417 .attrs
= coresight_etmv4_attrs
,
2420 static const struct attribute_group coresight_etmv4_mgmt_group
= {
2421 .attrs
= coresight_etmv4_mgmt_attrs
,
2425 static const struct attribute_group coresight_etmv4_trcidr_group
= {
2426 .attrs
= coresight_etmv4_trcidr_attrs
,
2430 const struct attribute_group
*coresight_etmv4_groups
[] = {
2431 &coresight_etmv4_group
,
2432 &coresight_etmv4_mgmt_group
,
2433 &coresight_etmv4_trcidr_group
,