Linux 4.16.11
[linux/fpc-iii.git] / drivers / hwtracing / coresight / coresight-etm4x-sysfs.c
blob4e6eab53e34e5ef2aa1b931ecfeca8ab39a20b9b
1 /*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/pm_runtime.h>
19 #include <linux/sysfs.h>
20 #include "coresight-etm4x.h"
21 #include "coresight-priv.h"
23 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
25 u8 idx;
26 struct etmv4_config *config = &drvdata->config;
28 idx = config->addr_idx;
31 * TRCACATRn.TYPE bit[1:0]: type of comparison
32 * the trace unit performs
34 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
35 if (idx % 2 != 0)
36 return -EINVAL;
39 * We are performing instruction address comparison. Set the
40 * relevant bit of ViewInst Include/Exclude Control register
41 * for corresponding address comparator pair.
43 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
44 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
45 return -EINVAL;
47 if (exclude == true) {
49 * Set exclude bit and unset the include bit
50 * corresponding to comparator pair
52 config->viiectlr |= BIT(idx / 2 + 16);
53 config->viiectlr &= ~BIT(idx / 2);
54 } else {
56 * Set include bit and unset exclude bit
57 * corresponding to comparator pair
59 config->viiectlr |= BIT(idx / 2);
60 config->viiectlr &= ~BIT(idx / 2 + 16);
63 return 0;
66 static ssize_t nr_pe_cmp_show(struct device *dev,
67 struct device_attribute *attr,
68 char *buf)
70 unsigned long val;
71 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
73 val = drvdata->nr_pe_cmp;
74 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
76 static DEVICE_ATTR_RO(nr_pe_cmp);
78 static ssize_t nr_addr_cmp_show(struct device *dev,
79 struct device_attribute *attr,
80 char *buf)
82 unsigned long val;
83 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
85 val = drvdata->nr_addr_cmp;
86 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
88 static DEVICE_ATTR_RO(nr_addr_cmp);
90 static ssize_t nr_cntr_show(struct device *dev,
91 struct device_attribute *attr,
92 char *buf)
94 unsigned long val;
95 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
97 val = drvdata->nr_cntr;
98 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
100 static DEVICE_ATTR_RO(nr_cntr);
102 static ssize_t nr_ext_inp_show(struct device *dev,
103 struct device_attribute *attr,
104 char *buf)
106 unsigned long val;
107 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
109 val = drvdata->nr_ext_inp;
110 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
112 static DEVICE_ATTR_RO(nr_ext_inp);
114 static ssize_t numcidc_show(struct device *dev,
115 struct device_attribute *attr,
116 char *buf)
118 unsigned long val;
119 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
121 val = drvdata->numcidc;
122 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
124 static DEVICE_ATTR_RO(numcidc);
126 static ssize_t numvmidc_show(struct device *dev,
127 struct device_attribute *attr,
128 char *buf)
130 unsigned long val;
131 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
133 val = drvdata->numvmidc;
134 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
136 static DEVICE_ATTR_RO(numvmidc);
138 static ssize_t nrseqstate_show(struct device *dev,
139 struct device_attribute *attr,
140 char *buf)
142 unsigned long val;
143 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
145 val = drvdata->nrseqstate;
146 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
148 static DEVICE_ATTR_RO(nrseqstate);
150 static ssize_t nr_resource_show(struct device *dev,
151 struct device_attribute *attr,
152 char *buf)
154 unsigned long val;
155 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
157 val = drvdata->nr_resource;
158 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
160 static DEVICE_ATTR_RO(nr_resource);
162 static ssize_t nr_ss_cmp_show(struct device *dev,
163 struct device_attribute *attr,
164 char *buf)
166 unsigned long val;
167 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
169 val = drvdata->nr_ss_cmp;
170 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
172 static DEVICE_ATTR_RO(nr_ss_cmp);
174 static ssize_t reset_store(struct device *dev,
175 struct device_attribute *attr,
176 const char *buf, size_t size)
178 int i;
179 unsigned long val;
180 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
181 struct etmv4_config *config = &drvdata->config;
183 if (kstrtoul(buf, 16, &val))
184 return -EINVAL;
186 spin_lock(&drvdata->spinlock);
187 if (val)
188 config->mode = 0x0;
190 /* Disable data tracing: do not trace load and store data transfers */
191 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
192 config->cfg &= ~(BIT(1) | BIT(2));
194 /* Disable data value and data address tracing */
195 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
196 ETM_MODE_DATA_TRACE_VAL);
197 config->cfg &= ~(BIT(16) | BIT(17));
199 /* Disable all events tracing */
200 config->eventctrl0 = 0x0;
201 config->eventctrl1 = 0x0;
203 /* Disable timestamp event */
204 config->ts_ctrl = 0x0;
206 /* Disable stalling */
207 config->stall_ctrl = 0x0;
209 /* Reset trace synchronization period to 2^8 = 256 bytes*/
210 if (drvdata->syncpr == false)
211 config->syncfreq = 0x8;
214 * Enable ViewInst to trace everything with start-stop logic in
215 * started state. ARM recommends start-stop logic is set before
216 * each trace run.
218 config->vinst_ctrl |= BIT(0);
219 if (drvdata->nr_addr_cmp == true) {
220 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
221 /* SSSTATUS, bit[9] */
222 config->vinst_ctrl |= BIT(9);
225 /* No address range filtering for ViewInst */
226 config->viiectlr = 0x0;
228 /* No start-stop filtering for ViewInst */
229 config->vissctlr = 0x0;
231 /* Disable seq events */
232 for (i = 0; i < drvdata->nrseqstate-1; i++)
233 config->seq_ctrl[i] = 0x0;
234 config->seq_rst = 0x0;
235 config->seq_state = 0x0;
237 /* Disable external input events */
238 config->ext_inp = 0x0;
240 config->cntr_idx = 0x0;
241 for (i = 0; i < drvdata->nr_cntr; i++) {
242 config->cntrldvr[i] = 0x0;
243 config->cntr_ctrl[i] = 0x0;
244 config->cntr_val[i] = 0x0;
247 config->res_idx = 0x0;
248 for (i = 0; i < drvdata->nr_resource; i++)
249 config->res_ctrl[i] = 0x0;
251 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
252 config->ss_ctrl[i] = 0x0;
253 config->ss_pe_cmp[i] = 0x0;
256 config->addr_idx = 0x0;
257 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
258 config->addr_val[i] = 0x0;
259 config->addr_acc[i] = 0x0;
260 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
263 config->ctxid_idx = 0x0;
264 for (i = 0; i < drvdata->numcidc; i++) {
265 config->ctxid_pid[i] = 0x0;
266 config->ctxid_vpid[i] = 0x0;
269 config->ctxid_mask0 = 0x0;
270 config->ctxid_mask1 = 0x0;
272 config->vmid_idx = 0x0;
273 for (i = 0; i < drvdata->numvmidc; i++)
274 config->vmid_val[i] = 0x0;
275 config->vmid_mask0 = 0x0;
276 config->vmid_mask1 = 0x0;
278 drvdata->trcid = drvdata->cpu + 1;
280 spin_unlock(&drvdata->spinlock);
282 return size;
284 static DEVICE_ATTR_WO(reset);
286 static ssize_t mode_show(struct device *dev,
287 struct device_attribute *attr,
288 char *buf)
290 unsigned long val;
291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 struct etmv4_config *config = &drvdata->config;
294 val = config->mode;
295 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
298 static ssize_t mode_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t size)
302 unsigned long val, mode;
303 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
304 struct etmv4_config *config = &drvdata->config;
306 if (kstrtoul(buf, 16, &val))
307 return -EINVAL;
309 spin_lock(&drvdata->spinlock);
310 config->mode = val & ETMv4_MODE_ALL;
312 if (config->mode & ETM_MODE_EXCLUDE)
313 etm4_set_mode_exclude(drvdata, true);
314 else
315 etm4_set_mode_exclude(drvdata, false);
317 if (drvdata->instrp0 == true) {
318 /* start by clearing instruction P0 field */
319 config->cfg &= ~(BIT(1) | BIT(2));
320 if (config->mode & ETM_MODE_LOAD)
321 /* 0b01 Trace load instructions as P0 instructions */
322 config->cfg |= BIT(1);
323 if (config->mode & ETM_MODE_STORE)
324 /* 0b10 Trace store instructions as P0 instructions */
325 config->cfg |= BIT(2);
326 if (config->mode & ETM_MODE_LOAD_STORE)
328 * 0b11 Trace load and store instructions
329 * as P0 instructions
331 config->cfg |= BIT(1) | BIT(2);
334 /* bit[3], Branch broadcast mode */
335 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
336 config->cfg |= BIT(3);
337 else
338 config->cfg &= ~BIT(3);
340 /* bit[4], Cycle counting instruction trace bit */
341 if ((config->mode & ETMv4_MODE_CYCACC) &&
342 (drvdata->trccci == true))
343 config->cfg |= BIT(4);
344 else
345 config->cfg &= ~BIT(4);
347 /* bit[6], Context ID tracing bit */
348 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
349 config->cfg |= BIT(6);
350 else
351 config->cfg &= ~BIT(6);
353 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
354 config->cfg |= BIT(7);
355 else
356 config->cfg &= ~BIT(7);
358 /* bits[10:8], Conditional instruction tracing bit */
359 mode = ETM_MODE_COND(config->mode);
360 if (drvdata->trccond == true) {
361 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
362 config->cfg |= mode << 8;
365 /* bit[11], Global timestamp tracing bit */
366 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
367 config->cfg |= BIT(11);
368 else
369 config->cfg &= ~BIT(11);
371 /* bit[12], Return stack enable bit */
372 if ((config->mode & ETM_MODE_RETURNSTACK) &&
373 (drvdata->retstack == true))
374 config->cfg |= BIT(12);
375 else
376 config->cfg &= ~BIT(12);
378 /* bits[14:13], Q element enable field */
379 mode = ETM_MODE_QELEM(config->mode);
380 /* start by clearing QE bits */
381 config->cfg &= ~(BIT(13) | BIT(14));
382 /* if supported, Q elements with instruction counts are enabled */
383 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
384 config->cfg |= BIT(13);
386 * if supported, Q elements with and without instruction
387 * counts are enabled
389 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
390 config->cfg |= BIT(14);
392 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
393 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
394 (drvdata->atbtrig == true))
395 config->eventctrl1 |= BIT(11);
396 else
397 config->eventctrl1 &= ~BIT(11);
399 /* bit[12], Low-power state behavior override bit */
400 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
401 (drvdata->lpoverride == true))
402 config->eventctrl1 |= BIT(12);
403 else
404 config->eventctrl1 &= ~BIT(12);
406 /* bit[8], Instruction stall bit */
407 if (config->mode & ETM_MODE_ISTALL_EN)
408 config->stall_ctrl |= BIT(8);
409 else
410 config->stall_ctrl &= ~BIT(8);
412 /* bit[10], Prioritize instruction trace bit */
413 if (config->mode & ETM_MODE_INSTPRIO)
414 config->stall_ctrl |= BIT(10);
415 else
416 config->stall_ctrl &= ~BIT(10);
418 /* bit[13], Trace overflow prevention bit */
419 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
420 (drvdata->nooverflow == true))
421 config->stall_ctrl |= BIT(13);
422 else
423 config->stall_ctrl &= ~BIT(13);
425 /* bit[9] Start/stop logic control bit */
426 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
427 config->vinst_ctrl |= BIT(9);
428 else
429 config->vinst_ctrl &= ~BIT(9);
431 /* bit[10], Whether a trace unit must trace a Reset exception */
432 if (config->mode & ETM_MODE_TRACE_RESET)
433 config->vinst_ctrl |= BIT(10);
434 else
435 config->vinst_ctrl &= ~BIT(10);
437 /* bit[11], Whether a trace unit must trace a system error exception */
438 if ((config->mode & ETM_MODE_TRACE_ERR) &&
439 (drvdata->trc_error == true))
440 config->vinst_ctrl |= BIT(11);
441 else
442 config->vinst_ctrl &= ~BIT(11);
444 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
445 etm4_config_trace_mode(config);
447 spin_unlock(&drvdata->spinlock);
449 return size;
451 static DEVICE_ATTR_RW(mode);
453 static ssize_t pe_show(struct device *dev,
454 struct device_attribute *attr,
455 char *buf)
457 unsigned long val;
458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459 struct etmv4_config *config = &drvdata->config;
461 val = config->pe_sel;
462 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
465 static ssize_t pe_store(struct device *dev,
466 struct device_attribute *attr,
467 const char *buf, size_t size)
469 unsigned long val;
470 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
471 struct etmv4_config *config = &drvdata->config;
473 if (kstrtoul(buf, 16, &val))
474 return -EINVAL;
476 spin_lock(&drvdata->spinlock);
477 if (val > drvdata->nr_pe) {
478 spin_unlock(&drvdata->spinlock);
479 return -EINVAL;
482 config->pe_sel = val;
483 spin_unlock(&drvdata->spinlock);
484 return size;
486 static DEVICE_ATTR_RW(pe);
488 static ssize_t event_show(struct device *dev,
489 struct device_attribute *attr,
490 char *buf)
492 unsigned long val;
493 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494 struct etmv4_config *config = &drvdata->config;
496 val = config->eventctrl0;
497 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
500 static ssize_t event_store(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t size)
504 unsigned long val;
505 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
506 struct etmv4_config *config = &drvdata->config;
508 if (kstrtoul(buf, 16, &val))
509 return -EINVAL;
511 spin_lock(&drvdata->spinlock);
512 switch (drvdata->nr_event) {
513 case 0x0:
514 /* EVENT0, bits[7:0] */
515 config->eventctrl0 = val & 0xFF;
516 break;
517 case 0x1:
518 /* EVENT1, bits[15:8] */
519 config->eventctrl0 = val & 0xFFFF;
520 break;
521 case 0x2:
522 /* EVENT2, bits[23:16] */
523 config->eventctrl0 = val & 0xFFFFFF;
524 break;
525 case 0x3:
526 /* EVENT3, bits[31:24] */
527 config->eventctrl0 = val;
528 break;
529 default:
530 break;
532 spin_unlock(&drvdata->spinlock);
533 return size;
535 static DEVICE_ATTR_RW(event);
537 static ssize_t event_instren_show(struct device *dev,
538 struct device_attribute *attr,
539 char *buf)
541 unsigned long val;
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 struct etmv4_config *config = &drvdata->config;
545 val = BMVAL(config->eventctrl1, 0, 3);
546 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
549 static ssize_t event_instren_store(struct device *dev,
550 struct device_attribute *attr,
551 const char *buf, size_t size)
553 unsigned long val;
554 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
555 struct etmv4_config *config = &drvdata->config;
557 if (kstrtoul(buf, 16, &val))
558 return -EINVAL;
560 spin_lock(&drvdata->spinlock);
561 /* start by clearing all instruction event enable bits */
562 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
563 switch (drvdata->nr_event) {
564 case 0x0:
565 /* generate Event element for event 1 */
566 config->eventctrl1 |= val & BIT(1);
567 break;
568 case 0x1:
569 /* generate Event element for event 1 and 2 */
570 config->eventctrl1 |= val & (BIT(0) | BIT(1));
571 break;
572 case 0x2:
573 /* generate Event element for event 1, 2 and 3 */
574 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
575 break;
576 case 0x3:
577 /* generate Event element for all 4 events */
578 config->eventctrl1 |= val & 0xF;
579 break;
580 default:
581 break;
583 spin_unlock(&drvdata->spinlock);
584 return size;
586 static DEVICE_ATTR_RW(event_instren);
588 static ssize_t event_ts_show(struct device *dev,
589 struct device_attribute *attr,
590 char *buf)
592 unsigned long val;
593 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 struct etmv4_config *config = &drvdata->config;
596 val = config->ts_ctrl;
597 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
600 static ssize_t event_ts_store(struct device *dev,
601 struct device_attribute *attr,
602 const char *buf, size_t size)
604 unsigned long val;
605 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
606 struct etmv4_config *config = &drvdata->config;
608 if (kstrtoul(buf, 16, &val))
609 return -EINVAL;
610 if (!drvdata->ts_size)
611 return -EINVAL;
613 config->ts_ctrl = val & ETMv4_EVENT_MASK;
614 return size;
616 static DEVICE_ATTR_RW(event_ts);
618 static ssize_t syncfreq_show(struct device *dev,
619 struct device_attribute *attr,
620 char *buf)
622 unsigned long val;
623 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 struct etmv4_config *config = &drvdata->config;
626 val = config->syncfreq;
627 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
630 static ssize_t syncfreq_store(struct device *dev,
631 struct device_attribute *attr,
632 const char *buf, size_t size)
634 unsigned long val;
635 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
636 struct etmv4_config *config = &drvdata->config;
638 if (kstrtoul(buf, 16, &val))
639 return -EINVAL;
640 if (drvdata->syncpr == true)
641 return -EINVAL;
643 config->syncfreq = val & ETMv4_SYNC_MASK;
644 return size;
646 static DEVICE_ATTR_RW(syncfreq);
648 static ssize_t cyc_threshold_show(struct device *dev,
649 struct device_attribute *attr,
650 char *buf)
652 unsigned long val;
653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 struct etmv4_config *config = &drvdata->config;
656 val = config->ccctlr;
657 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
660 static ssize_t cyc_threshold_store(struct device *dev,
661 struct device_attribute *attr,
662 const char *buf, size_t size)
664 unsigned long val;
665 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
666 struct etmv4_config *config = &drvdata->config;
668 if (kstrtoul(buf, 16, &val))
669 return -EINVAL;
670 if (val < drvdata->ccitmin)
671 return -EINVAL;
673 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
674 return size;
676 static DEVICE_ATTR_RW(cyc_threshold);
678 static ssize_t bb_ctrl_show(struct device *dev,
679 struct device_attribute *attr,
680 char *buf)
682 unsigned long val;
683 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 struct etmv4_config *config = &drvdata->config;
686 val = config->bb_ctrl;
687 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
690 static ssize_t bb_ctrl_store(struct device *dev,
691 struct device_attribute *attr,
692 const char *buf, size_t size)
694 unsigned long val;
695 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
696 struct etmv4_config *config = &drvdata->config;
698 if (kstrtoul(buf, 16, &val))
699 return -EINVAL;
700 if (drvdata->trcbb == false)
701 return -EINVAL;
702 if (!drvdata->nr_addr_cmp)
703 return -EINVAL;
705 * Bit[7:0] selects which address range comparator is used for
706 * branch broadcast control.
708 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
709 return -EINVAL;
711 config->bb_ctrl = val;
712 return size;
714 static DEVICE_ATTR_RW(bb_ctrl);
716 static ssize_t event_vinst_show(struct device *dev,
717 struct device_attribute *attr,
718 char *buf)
720 unsigned long val;
721 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
722 struct etmv4_config *config = &drvdata->config;
724 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
725 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
728 static ssize_t event_vinst_store(struct device *dev,
729 struct device_attribute *attr,
730 const char *buf, size_t size)
732 unsigned long val;
733 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
734 struct etmv4_config *config = &drvdata->config;
736 if (kstrtoul(buf, 16, &val))
737 return -EINVAL;
739 spin_lock(&drvdata->spinlock);
740 val &= ETMv4_EVENT_MASK;
741 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
742 config->vinst_ctrl |= val;
743 spin_unlock(&drvdata->spinlock);
744 return size;
746 static DEVICE_ATTR_RW(event_vinst);
748 static ssize_t s_exlevel_vinst_show(struct device *dev,
749 struct device_attribute *attr,
750 char *buf)
752 unsigned long val;
753 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
754 struct etmv4_config *config = &drvdata->config;
756 val = BMVAL(config->vinst_ctrl, 16, 19);
757 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
760 static ssize_t s_exlevel_vinst_store(struct device *dev,
761 struct device_attribute *attr,
762 const char *buf, size_t size)
764 unsigned long val;
765 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
766 struct etmv4_config *config = &drvdata->config;
768 if (kstrtoul(buf, 16, &val))
769 return -EINVAL;
771 spin_lock(&drvdata->spinlock);
772 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
773 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
774 /* enable instruction tracing for corresponding exception level */
775 val &= drvdata->s_ex_level;
776 config->vinst_ctrl |= (val << 16);
777 spin_unlock(&drvdata->spinlock);
778 return size;
780 static DEVICE_ATTR_RW(s_exlevel_vinst);
782 static ssize_t ns_exlevel_vinst_show(struct device *dev,
783 struct device_attribute *attr,
784 char *buf)
786 unsigned long val;
787 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
788 struct etmv4_config *config = &drvdata->config;
790 /* EXLEVEL_NS, bits[23:20] */
791 val = BMVAL(config->vinst_ctrl, 20, 23);
792 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
795 static ssize_t ns_exlevel_vinst_store(struct device *dev,
796 struct device_attribute *attr,
797 const char *buf, size_t size)
799 unsigned long val;
800 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
801 struct etmv4_config *config = &drvdata->config;
803 if (kstrtoul(buf, 16, &val))
804 return -EINVAL;
806 spin_lock(&drvdata->spinlock);
807 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
808 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
809 /* enable instruction tracing for corresponding exception level */
810 val &= drvdata->ns_ex_level;
811 config->vinst_ctrl |= (val << 20);
812 spin_unlock(&drvdata->spinlock);
813 return size;
815 static DEVICE_ATTR_RW(ns_exlevel_vinst);
817 static ssize_t addr_idx_show(struct device *dev,
818 struct device_attribute *attr,
819 char *buf)
821 unsigned long val;
822 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
823 struct etmv4_config *config = &drvdata->config;
825 val = config->addr_idx;
826 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
829 static ssize_t addr_idx_store(struct device *dev,
830 struct device_attribute *attr,
831 const char *buf, size_t size)
833 unsigned long val;
834 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
835 struct etmv4_config *config = &drvdata->config;
837 if (kstrtoul(buf, 16, &val))
838 return -EINVAL;
839 if (val >= drvdata->nr_addr_cmp * 2)
840 return -EINVAL;
843 * Use spinlock to ensure index doesn't change while it gets
844 * dereferenced multiple times within a spinlock block elsewhere.
846 spin_lock(&drvdata->spinlock);
847 config->addr_idx = val;
848 spin_unlock(&drvdata->spinlock);
849 return size;
851 static DEVICE_ATTR_RW(addr_idx);
853 static ssize_t addr_instdatatype_show(struct device *dev,
854 struct device_attribute *attr,
855 char *buf)
857 ssize_t len;
858 u8 val, idx;
859 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
860 struct etmv4_config *config = &drvdata->config;
862 spin_lock(&drvdata->spinlock);
863 idx = config->addr_idx;
864 val = BMVAL(config->addr_acc[idx], 0, 1);
865 len = scnprintf(buf, PAGE_SIZE, "%s\n",
866 val == ETM_INSTR_ADDR ? "instr" :
867 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
868 (val == ETM_DATA_STORE_ADDR ? "data_store" :
869 "data_load_store")));
870 spin_unlock(&drvdata->spinlock);
871 return len;
874 static ssize_t addr_instdatatype_store(struct device *dev,
875 struct device_attribute *attr,
876 const char *buf, size_t size)
878 u8 idx;
879 char str[20] = "";
880 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
881 struct etmv4_config *config = &drvdata->config;
883 if (strlen(buf) >= 20)
884 return -EINVAL;
885 if (sscanf(buf, "%s", str) != 1)
886 return -EINVAL;
888 spin_lock(&drvdata->spinlock);
889 idx = config->addr_idx;
890 if (!strcmp(str, "instr"))
891 /* TYPE, bits[1:0] */
892 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
894 spin_unlock(&drvdata->spinlock);
895 return size;
897 static DEVICE_ATTR_RW(addr_instdatatype);
899 static ssize_t addr_single_show(struct device *dev,
900 struct device_attribute *attr,
901 char *buf)
903 u8 idx;
904 unsigned long val;
905 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
906 struct etmv4_config *config = &drvdata->config;
908 idx = config->addr_idx;
909 spin_lock(&drvdata->spinlock);
910 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
911 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
912 spin_unlock(&drvdata->spinlock);
913 return -EPERM;
915 val = (unsigned long)config->addr_val[idx];
916 spin_unlock(&drvdata->spinlock);
917 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
920 static ssize_t addr_single_store(struct device *dev,
921 struct device_attribute *attr,
922 const char *buf, size_t size)
924 u8 idx;
925 unsigned long val;
926 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
927 struct etmv4_config *config = &drvdata->config;
929 if (kstrtoul(buf, 16, &val))
930 return -EINVAL;
932 spin_lock(&drvdata->spinlock);
933 idx = config->addr_idx;
934 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
935 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
936 spin_unlock(&drvdata->spinlock);
937 return -EPERM;
940 config->addr_val[idx] = (u64)val;
941 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
942 spin_unlock(&drvdata->spinlock);
943 return size;
945 static DEVICE_ATTR_RW(addr_single);
947 static ssize_t addr_range_show(struct device *dev,
948 struct device_attribute *attr,
949 char *buf)
951 u8 idx;
952 unsigned long val1, val2;
953 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
954 struct etmv4_config *config = &drvdata->config;
956 spin_lock(&drvdata->spinlock);
957 idx = config->addr_idx;
958 if (idx % 2 != 0) {
959 spin_unlock(&drvdata->spinlock);
960 return -EPERM;
962 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
963 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
964 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
965 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
966 spin_unlock(&drvdata->spinlock);
967 return -EPERM;
970 val1 = (unsigned long)config->addr_val[idx];
971 val2 = (unsigned long)config->addr_val[idx + 1];
972 spin_unlock(&drvdata->spinlock);
973 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
976 static ssize_t addr_range_store(struct device *dev,
977 struct device_attribute *attr,
978 const char *buf, size_t size)
980 u8 idx;
981 unsigned long val1, val2;
982 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
983 struct etmv4_config *config = &drvdata->config;
985 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
986 return -EINVAL;
987 /* lower address comparator cannot have a higher address value */
988 if (val1 > val2)
989 return -EINVAL;
991 spin_lock(&drvdata->spinlock);
992 idx = config->addr_idx;
993 if (idx % 2 != 0) {
994 spin_unlock(&drvdata->spinlock);
995 return -EPERM;
998 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
999 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1000 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1001 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1002 spin_unlock(&drvdata->spinlock);
1003 return -EPERM;
1006 config->addr_val[idx] = (u64)val1;
1007 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1008 config->addr_val[idx + 1] = (u64)val2;
1009 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1011 * Program include or exclude control bits for vinst or vdata
1012 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1014 if (config->mode & ETM_MODE_EXCLUDE)
1015 etm4_set_mode_exclude(drvdata, true);
1016 else
1017 etm4_set_mode_exclude(drvdata, false);
1019 spin_unlock(&drvdata->spinlock);
1020 return size;
1022 static DEVICE_ATTR_RW(addr_range);
1024 static ssize_t addr_start_show(struct device *dev,
1025 struct device_attribute *attr,
1026 char *buf)
1028 u8 idx;
1029 unsigned long val;
1030 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1031 struct etmv4_config *config = &drvdata->config;
1033 spin_lock(&drvdata->spinlock);
1034 idx = config->addr_idx;
1036 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1037 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1038 spin_unlock(&drvdata->spinlock);
1039 return -EPERM;
1042 val = (unsigned long)config->addr_val[idx];
1043 spin_unlock(&drvdata->spinlock);
1044 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1047 static ssize_t addr_start_store(struct device *dev,
1048 struct device_attribute *attr,
1049 const char *buf, size_t size)
1051 u8 idx;
1052 unsigned long val;
1053 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1054 struct etmv4_config *config = &drvdata->config;
1056 if (kstrtoul(buf, 16, &val))
1057 return -EINVAL;
1059 spin_lock(&drvdata->spinlock);
1060 idx = config->addr_idx;
1061 if (!drvdata->nr_addr_cmp) {
1062 spin_unlock(&drvdata->spinlock);
1063 return -EINVAL;
1065 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1066 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1067 spin_unlock(&drvdata->spinlock);
1068 return -EPERM;
1071 config->addr_val[idx] = (u64)val;
1072 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1073 config->vissctlr |= BIT(idx);
1074 /* SSSTATUS, bit[9] - turn on start/stop logic */
1075 config->vinst_ctrl |= BIT(9);
1076 spin_unlock(&drvdata->spinlock);
1077 return size;
1079 static DEVICE_ATTR_RW(addr_start);
1081 static ssize_t addr_stop_show(struct device *dev,
1082 struct device_attribute *attr,
1083 char *buf)
1085 u8 idx;
1086 unsigned long val;
1087 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1088 struct etmv4_config *config = &drvdata->config;
1090 spin_lock(&drvdata->spinlock);
1091 idx = config->addr_idx;
1093 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1094 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1095 spin_unlock(&drvdata->spinlock);
1096 return -EPERM;
1099 val = (unsigned long)config->addr_val[idx];
1100 spin_unlock(&drvdata->spinlock);
1101 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1104 static ssize_t addr_stop_store(struct device *dev,
1105 struct device_attribute *attr,
1106 const char *buf, size_t size)
1108 u8 idx;
1109 unsigned long val;
1110 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1111 struct etmv4_config *config = &drvdata->config;
1113 if (kstrtoul(buf, 16, &val))
1114 return -EINVAL;
1116 spin_lock(&drvdata->spinlock);
1117 idx = config->addr_idx;
1118 if (!drvdata->nr_addr_cmp) {
1119 spin_unlock(&drvdata->spinlock);
1120 return -EINVAL;
1122 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1123 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1124 spin_unlock(&drvdata->spinlock);
1125 return -EPERM;
1128 config->addr_val[idx] = (u64)val;
1129 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1130 config->vissctlr |= BIT(idx + 16);
1131 /* SSSTATUS, bit[9] - turn on start/stop logic */
1132 config->vinst_ctrl |= BIT(9);
1133 spin_unlock(&drvdata->spinlock);
1134 return size;
1136 static DEVICE_ATTR_RW(addr_stop);
1138 static ssize_t addr_ctxtype_show(struct device *dev,
1139 struct device_attribute *attr,
1140 char *buf)
1142 ssize_t len;
1143 u8 idx, val;
1144 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1145 struct etmv4_config *config = &drvdata->config;
1147 spin_lock(&drvdata->spinlock);
1148 idx = config->addr_idx;
1149 /* CONTEXTTYPE, bits[3:2] */
1150 val = BMVAL(config->addr_acc[idx], 2, 3);
1151 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1152 (val == ETM_CTX_CTXID ? "ctxid" :
1153 (val == ETM_CTX_VMID ? "vmid" : "all")));
1154 spin_unlock(&drvdata->spinlock);
1155 return len;
1158 static ssize_t addr_ctxtype_store(struct device *dev,
1159 struct device_attribute *attr,
1160 const char *buf, size_t size)
1162 u8 idx;
1163 char str[10] = "";
1164 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1165 struct etmv4_config *config = &drvdata->config;
1167 if (strlen(buf) >= 10)
1168 return -EINVAL;
1169 if (sscanf(buf, "%s", str) != 1)
1170 return -EINVAL;
1172 spin_lock(&drvdata->spinlock);
1173 idx = config->addr_idx;
1174 if (!strcmp(str, "none"))
1175 /* start by clearing context type bits */
1176 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1177 else if (!strcmp(str, "ctxid")) {
1178 /* 0b01 The trace unit performs a Context ID */
1179 if (drvdata->numcidc) {
1180 config->addr_acc[idx] |= BIT(2);
1181 config->addr_acc[idx] &= ~BIT(3);
1183 } else if (!strcmp(str, "vmid")) {
1184 /* 0b10 The trace unit performs a VMID */
1185 if (drvdata->numvmidc) {
1186 config->addr_acc[idx] &= ~BIT(2);
1187 config->addr_acc[idx] |= BIT(3);
1189 } else if (!strcmp(str, "all")) {
1191 * 0b11 The trace unit performs a Context ID
1192 * comparison and a VMID
1194 if (drvdata->numcidc)
1195 config->addr_acc[idx] |= BIT(2);
1196 if (drvdata->numvmidc)
1197 config->addr_acc[idx] |= BIT(3);
1199 spin_unlock(&drvdata->spinlock);
1200 return size;
1202 static DEVICE_ATTR_RW(addr_ctxtype);
1204 static ssize_t addr_context_show(struct device *dev,
1205 struct device_attribute *attr,
1206 char *buf)
1208 u8 idx;
1209 unsigned long val;
1210 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211 struct etmv4_config *config = &drvdata->config;
1213 spin_lock(&drvdata->spinlock);
1214 idx = config->addr_idx;
1215 /* context ID comparator bits[6:4] */
1216 val = BMVAL(config->addr_acc[idx], 4, 6);
1217 spin_unlock(&drvdata->spinlock);
1218 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1221 static ssize_t addr_context_store(struct device *dev,
1222 struct device_attribute *attr,
1223 const char *buf, size_t size)
1225 u8 idx;
1226 unsigned long val;
1227 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1228 struct etmv4_config *config = &drvdata->config;
1230 if (kstrtoul(buf, 16, &val))
1231 return -EINVAL;
1232 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1233 return -EINVAL;
1234 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1235 drvdata->numcidc : drvdata->numvmidc))
1236 return -EINVAL;
1238 spin_lock(&drvdata->spinlock);
1239 idx = config->addr_idx;
1240 /* clear context ID comparator bits[6:4] */
1241 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1242 config->addr_acc[idx] |= (val << 4);
1243 spin_unlock(&drvdata->spinlock);
1244 return size;
1246 static DEVICE_ATTR_RW(addr_context);
1248 static ssize_t seq_idx_show(struct device *dev,
1249 struct device_attribute *attr,
1250 char *buf)
1252 unsigned long val;
1253 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1254 struct etmv4_config *config = &drvdata->config;
1256 val = config->seq_idx;
1257 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1260 static ssize_t seq_idx_store(struct device *dev,
1261 struct device_attribute *attr,
1262 const char *buf, size_t size)
1264 unsigned long val;
1265 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1266 struct etmv4_config *config = &drvdata->config;
1268 if (kstrtoul(buf, 16, &val))
1269 return -EINVAL;
1270 if (val >= drvdata->nrseqstate - 1)
1271 return -EINVAL;
1274 * Use spinlock to ensure index doesn't change while it gets
1275 * dereferenced multiple times within a spinlock block elsewhere.
1277 spin_lock(&drvdata->spinlock);
1278 config->seq_idx = val;
1279 spin_unlock(&drvdata->spinlock);
1280 return size;
1282 static DEVICE_ATTR_RW(seq_idx);
1284 static ssize_t seq_state_show(struct device *dev,
1285 struct device_attribute *attr,
1286 char *buf)
1288 unsigned long val;
1289 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1290 struct etmv4_config *config = &drvdata->config;
1292 val = config->seq_state;
1293 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1296 static ssize_t seq_state_store(struct device *dev,
1297 struct device_attribute *attr,
1298 const char *buf, size_t size)
1300 unsigned long val;
1301 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1302 struct etmv4_config *config = &drvdata->config;
1304 if (kstrtoul(buf, 16, &val))
1305 return -EINVAL;
1306 if (val >= drvdata->nrseqstate)
1307 return -EINVAL;
1309 config->seq_state = val;
1310 return size;
1312 static DEVICE_ATTR_RW(seq_state);
1314 static ssize_t seq_event_show(struct device *dev,
1315 struct device_attribute *attr,
1316 char *buf)
1318 u8 idx;
1319 unsigned long val;
1320 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1321 struct etmv4_config *config = &drvdata->config;
1323 spin_lock(&drvdata->spinlock);
1324 idx = config->seq_idx;
1325 val = config->seq_ctrl[idx];
1326 spin_unlock(&drvdata->spinlock);
1327 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1330 static ssize_t seq_event_store(struct device *dev,
1331 struct device_attribute *attr,
1332 const char *buf, size_t size)
1334 u8 idx;
1335 unsigned long val;
1336 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1337 struct etmv4_config *config = &drvdata->config;
1339 if (kstrtoul(buf, 16, &val))
1340 return -EINVAL;
1342 spin_lock(&drvdata->spinlock);
1343 idx = config->seq_idx;
1344 /* RST, bits[7:0] */
1345 config->seq_ctrl[idx] = val & 0xFF;
1346 spin_unlock(&drvdata->spinlock);
1347 return size;
1349 static DEVICE_ATTR_RW(seq_event);
1351 static ssize_t seq_reset_event_show(struct device *dev,
1352 struct device_attribute *attr,
1353 char *buf)
1355 unsigned long val;
1356 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1357 struct etmv4_config *config = &drvdata->config;
1359 val = config->seq_rst;
1360 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1363 static ssize_t seq_reset_event_store(struct device *dev,
1364 struct device_attribute *attr,
1365 const char *buf, size_t size)
1367 unsigned long val;
1368 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1369 struct etmv4_config *config = &drvdata->config;
1371 if (kstrtoul(buf, 16, &val))
1372 return -EINVAL;
1373 if (!(drvdata->nrseqstate))
1374 return -EINVAL;
1376 config->seq_rst = val & ETMv4_EVENT_MASK;
1377 return size;
1379 static DEVICE_ATTR_RW(seq_reset_event);
1381 static ssize_t cntr_idx_show(struct device *dev,
1382 struct device_attribute *attr,
1383 char *buf)
1385 unsigned long val;
1386 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1387 struct etmv4_config *config = &drvdata->config;
1389 val = config->cntr_idx;
1390 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1393 static ssize_t cntr_idx_store(struct device *dev,
1394 struct device_attribute *attr,
1395 const char *buf, size_t size)
1397 unsigned long val;
1398 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1399 struct etmv4_config *config = &drvdata->config;
1401 if (kstrtoul(buf, 16, &val))
1402 return -EINVAL;
1403 if (val >= drvdata->nr_cntr)
1404 return -EINVAL;
1407 * Use spinlock to ensure index doesn't change while it gets
1408 * dereferenced multiple times within a spinlock block elsewhere.
1410 spin_lock(&drvdata->spinlock);
1411 config->cntr_idx = val;
1412 spin_unlock(&drvdata->spinlock);
1413 return size;
1415 static DEVICE_ATTR_RW(cntr_idx);
1417 static ssize_t cntrldvr_show(struct device *dev,
1418 struct device_attribute *attr,
1419 char *buf)
1421 u8 idx;
1422 unsigned long val;
1423 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1424 struct etmv4_config *config = &drvdata->config;
1426 spin_lock(&drvdata->spinlock);
1427 idx = config->cntr_idx;
1428 val = config->cntrldvr[idx];
1429 spin_unlock(&drvdata->spinlock);
1430 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1433 static ssize_t cntrldvr_store(struct device *dev,
1434 struct device_attribute *attr,
1435 const char *buf, size_t size)
1437 u8 idx;
1438 unsigned long val;
1439 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1440 struct etmv4_config *config = &drvdata->config;
1442 if (kstrtoul(buf, 16, &val))
1443 return -EINVAL;
1444 if (val > ETM_CNTR_MAX_VAL)
1445 return -EINVAL;
1447 spin_lock(&drvdata->spinlock);
1448 idx = config->cntr_idx;
1449 config->cntrldvr[idx] = val;
1450 spin_unlock(&drvdata->spinlock);
1451 return size;
1453 static DEVICE_ATTR_RW(cntrldvr);
1455 static ssize_t cntr_val_show(struct device *dev,
1456 struct device_attribute *attr,
1457 char *buf)
1459 u8 idx;
1460 unsigned long val;
1461 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1462 struct etmv4_config *config = &drvdata->config;
1464 spin_lock(&drvdata->spinlock);
1465 idx = config->cntr_idx;
1466 val = config->cntr_val[idx];
1467 spin_unlock(&drvdata->spinlock);
1468 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1471 static ssize_t cntr_val_store(struct device *dev,
1472 struct device_attribute *attr,
1473 const char *buf, size_t size)
1475 u8 idx;
1476 unsigned long val;
1477 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1478 struct etmv4_config *config = &drvdata->config;
1480 if (kstrtoul(buf, 16, &val))
1481 return -EINVAL;
1482 if (val > ETM_CNTR_MAX_VAL)
1483 return -EINVAL;
1485 spin_lock(&drvdata->spinlock);
1486 idx = config->cntr_idx;
1487 config->cntr_val[idx] = val;
1488 spin_unlock(&drvdata->spinlock);
1489 return size;
1491 static DEVICE_ATTR_RW(cntr_val);
1493 static ssize_t cntr_ctrl_show(struct device *dev,
1494 struct device_attribute *attr,
1495 char *buf)
1497 u8 idx;
1498 unsigned long val;
1499 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1500 struct etmv4_config *config = &drvdata->config;
1502 spin_lock(&drvdata->spinlock);
1503 idx = config->cntr_idx;
1504 val = config->cntr_ctrl[idx];
1505 spin_unlock(&drvdata->spinlock);
1506 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1509 static ssize_t cntr_ctrl_store(struct device *dev,
1510 struct device_attribute *attr,
1511 const char *buf, size_t size)
1513 u8 idx;
1514 unsigned long val;
1515 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1516 struct etmv4_config *config = &drvdata->config;
1518 if (kstrtoul(buf, 16, &val))
1519 return -EINVAL;
1521 spin_lock(&drvdata->spinlock);
1522 idx = config->cntr_idx;
1523 config->cntr_ctrl[idx] = val;
1524 spin_unlock(&drvdata->spinlock);
1525 return size;
1527 static DEVICE_ATTR_RW(cntr_ctrl);
1529 static ssize_t res_idx_show(struct device *dev,
1530 struct device_attribute *attr,
1531 char *buf)
1533 unsigned long val;
1534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1535 struct etmv4_config *config = &drvdata->config;
1537 val = config->res_idx;
1538 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1541 static ssize_t res_idx_store(struct device *dev,
1542 struct device_attribute *attr,
1543 const char *buf, size_t size)
1545 unsigned long val;
1546 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1547 struct etmv4_config *config = &drvdata->config;
1549 if (kstrtoul(buf, 16, &val))
1550 return -EINVAL;
1551 /* Resource selector pair 0 is always implemented and reserved */
1552 if ((val == 0) || (val >= drvdata->nr_resource))
1553 return -EINVAL;
1556 * Use spinlock to ensure index doesn't change while it gets
1557 * dereferenced multiple times within a spinlock block elsewhere.
1559 spin_lock(&drvdata->spinlock);
1560 config->res_idx = val;
1561 spin_unlock(&drvdata->spinlock);
1562 return size;
1564 static DEVICE_ATTR_RW(res_idx);
1566 static ssize_t res_ctrl_show(struct device *dev,
1567 struct device_attribute *attr,
1568 char *buf)
1570 u8 idx;
1571 unsigned long val;
1572 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1573 struct etmv4_config *config = &drvdata->config;
1575 spin_lock(&drvdata->spinlock);
1576 idx = config->res_idx;
1577 val = config->res_ctrl[idx];
1578 spin_unlock(&drvdata->spinlock);
1579 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1582 static ssize_t res_ctrl_store(struct device *dev,
1583 struct device_attribute *attr,
1584 const char *buf, size_t size)
1586 u8 idx;
1587 unsigned long val;
1588 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589 struct etmv4_config *config = &drvdata->config;
1591 if (kstrtoul(buf, 16, &val))
1592 return -EINVAL;
1594 spin_lock(&drvdata->spinlock);
1595 idx = config->res_idx;
1596 /* For odd idx pair inversal bit is RES0 */
1597 if (idx % 2 != 0)
1598 /* PAIRINV, bit[21] */
1599 val &= ~BIT(21);
1600 config->res_ctrl[idx] = val;
1601 spin_unlock(&drvdata->spinlock);
1602 return size;
1604 static DEVICE_ATTR_RW(res_ctrl);
1606 static ssize_t ctxid_idx_show(struct device *dev,
1607 struct device_attribute *attr,
1608 char *buf)
1610 unsigned long val;
1611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1612 struct etmv4_config *config = &drvdata->config;
1614 val = config->ctxid_idx;
1615 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1618 static ssize_t ctxid_idx_store(struct device *dev,
1619 struct device_attribute *attr,
1620 const char *buf, size_t size)
1622 unsigned long val;
1623 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1624 struct etmv4_config *config = &drvdata->config;
1626 if (kstrtoul(buf, 16, &val))
1627 return -EINVAL;
1628 if (val >= drvdata->numcidc)
1629 return -EINVAL;
1632 * Use spinlock to ensure index doesn't change while it gets
1633 * dereferenced multiple times within a spinlock block elsewhere.
1635 spin_lock(&drvdata->spinlock);
1636 config->ctxid_idx = val;
1637 spin_unlock(&drvdata->spinlock);
1638 return size;
1640 static DEVICE_ATTR_RW(ctxid_idx);
1642 static ssize_t ctxid_pid_show(struct device *dev,
1643 struct device_attribute *attr,
1644 char *buf)
1646 u8 idx;
1647 unsigned long val;
1648 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1649 struct etmv4_config *config = &drvdata->config;
1651 spin_lock(&drvdata->spinlock);
1652 idx = config->ctxid_idx;
1653 val = (unsigned long)config->ctxid_vpid[idx];
1654 spin_unlock(&drvdata->spinlock);
1655 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1658 static ssize_t ctxid_pid_store(struct device *dev,
1659 struct device_attribute *attr,
1660 const char *buf, size_t size)
1662 u8 idx;
1663 unsigned long vpid, pid;
1664 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1665 struct etmv4_config *config = &drvdata->config;
1668 * only implemented when ctxid tracing is enabled, i.e. at least one
1669 * ctxid comparator is implemented and ctxid is greater than 0 bits
1670 * in length
1672 if (!drvdata->ctxid_size || !drvdata->numcidc)
1673 return -EINVAL;
1674 if (kstrtoul(buf, 16, &vpid))
1675 return -EINVAL;
1677 pid = coresight_vpid_to_pid(vpid);
1679 spin_lock(&drvdata->spinlock);
1680 idx = config->ctxid_idx;
1681 config->ctxid_pid[idx] = (u64)pid;
1682 config->ctxid_vpid[idx] = (u64)vpid;
1683 spin_unlock(&drvdata->spinlock);
1684 return size;
1686 static DEVICE_ATTR_RW(ctxid_pid);
1688 static ssize_t ctxid_masks_show(struct device *dev,
1689 struct device_attribute *attr,
1690 char *buf)
1692 unsigned long val1, val2;
1693 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1694 struct etmv4_config *config = &drvdata->config;
1696 spin_lock(&drvdata->spinlock);
1697 val1 = config->ctxid_mask0;
1698 val2 = config->ctxid_mask1;
1699 spin_unlock(&drvdata->spinlock);
1700 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1703 static ssize_t ctxid_masks_store(struct device *dev,
1704 struct device_attribute *attr,
1705 const char *buf, size_t size)
1707 u8 i, j, maskbyte;
1708 unsigned long val1, val2, mask;
1709 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1710 struct etmv4_config *config = &drvdata->config;
1713 * only implemented when ctxid tracing is enabled, i.e. at least one
1714 * ctxid comparator is implemented and ctxid is greater than 0 bits
1715 * in length
1717 if (!drvdata->ctxid_size || !drvdata->numcidc)
1718 return -EINVAL;
1719 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1720 return -EINVAL;
1722 spin_lock(&drvdata->spinlock);
1724 * each byte[0..3] controls mask value applied to ctxid
1725 * comparator[0..3]
1727 switch (drvdata->numcidc) {
1728 case 0x1:
1729 /* COMP0, bits[7:0] */
1730 config->ctxid_mask0 = val1 & 0xFF;
1731 break;
1732 case 0x2:
1733 /* COMP1, bits[15:8] */
1734 config->ctxid_mask0 = val1 & 0xFFFF;
1735 break;
1736 case 0x3:
1737 /* COMP2, bits[23:16] */
1738 config->ctxid_mask0 = val1 & 0xFFFFFF;
1739 break;
1740 case 0x4:
1741 /* COMP3, bits[31:24] */
1742 config->ctxid_mask0 = val1;
1743 break;
1744 case 0x5:
1745 /* COMP4, bits[7:0] */
1746 config->ctxid_mask0 = val1;
1747 config->ctxid_mask1 = val2 & 0xFF;
1748 break;
1749 case 0x6:
1750 /* COMP5, bits[15:8] */
1751 config->ctxid_mask0 = val1;
1752 config->ctxid_mask1 = val2 & 0xFFFF;
1753 break;
1754 case 0x7:
1755 /* COMP6, bits[23:16] */
1756 config->ctxid_mask0 = val1;
1757 config->ctxid_mask1 = val2 & 0xFFFFFF;
1758 break;
1759 case 0x8:
1760 /* COMP7, bits[31:24] */
1761 config->ctxid_mask0 = val1;
1762 config->ctxid_mask1 = val2;
1763 break;
1764 default:
1765 break;
1768 * If software sets a mask bit to 1, it must program relevant byte
1769 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1770 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1771 * of ctxid comparator0 value (corresponding to byte 0) register.
1773 mask = config->ctxid_mask0;
1774 for (i = 0; i < drvdata->numcidc; i++) {
1775 /* mask value of corresponding ctxid comparator */
1776 maskbyte = mask & ETMv4_EVENT_MASK;
1778 * each bit corresponds to a byte of respective ctxid comparator
1779 * value register
1781 for (j = 0; j < 8; j++) {
1782 if (maskbyte & 1)
1783 config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1784 maskbyte >>= 1;
1786 /* Select the next ctxid comparator mask value */
1787 if (i == 3)
1788 /* ctxid comparators[4-7] */
1789 mask = config->ctxid_mask1;
1790 else
1791 mask >>= 0x8;
1794 spin_unlock(&drvdata->spinlock);
1795 return size;
1797 static DEVICE_ATTR_RW(ctxid_masks);
1799 static ssize_t vmid_idx_show(struct device *dev,
1800 struct device_attribute *attr,
1801 char *buf)
1803 unsigned long val;
1804 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1805 struct etmv4_config *config = &drvdata->config;
1807 val = config->vmid_idx;
1808 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1811 static ssize_t vmid_idx_store(struct device *dev,
1812 struct device_attribute *attr,
1813 const char *buf, size_t size)
1815 unsigned long val;
1816 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1817 struct etmv4_config *config = &drvdata->config;
1819 if (kstrtoul(buf, 16, &val))
1820 return -EINVAL;
1821 if (val >= drvdata->numvmidc)
1822 return -EINVAL;
1825 * Use spinlock to ensure index doesn't change while it gets
1826 * dereferenced multiple times within a spinlock block elsewhere.
1828 spin_lock(&drvdata->spinlock);
1829 config->vmid_idx = val;
1830 spin_unlock(&drvdata->spinlock);
1831 return size;
1833 static DEVICE_ATTR_RW(vmid_idx);
1835 static ssize_t vmid_val_show(struct device *dev,
1836 struct device_attribute *attr,
1837 char *buf)
1839 unsigned long val;
1840 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841 struct etmv4_config *config = &drvdata->config;
1843 val = (unsigned long)config->vmid_val[config->vmid_idx];
1844 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1847 static ssize_t vmid_val_store(struct device *dev,
1848 struct device_attribute *attr,
1849 const char *buf, size_t size)
1851 unsigned long val;
1852 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1853 struct etmv4_config *config = &drvdata->config;
1856 * only implemented when vmid tracing is enabled, i.e. at least one
1857 * vmid comparator is implemented and at least 8 bit vmid size
1859 if (!drvdata->vmid_size || !drvdata->numvmidc)
1860 return -EINVAL;
1861 if (kstrtoul(buf, 16, &val))
1862 return -EINVAL;
1864 spin_lock(&drvdata->spinlock);
1865 config->vmid_val[config->vmid_idx] = (u64)val;
1866 spin_unlock(&drvdata->spinlock);
1867 return size;
1869 static DEVICE_ATTR_RW(vmid_val);
1871 static ssize_t vmid_masks_show(struct device *dev,
1872 struct device_attribute *attr, char *buf)
1874 unsigned long val1, val2;
1875 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1876 struct etmv4_config *config = &drvdata->config;
1878 spin_lock(&drvdata->spinlock);
1879 val1 = config->vmid_mask0;
1880 val2 = config->vmid_mask1;
1881 spin_unlock(&drvdata->spinlock);
1882 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1885 static ssize_t vmid_masks_store(struct device *dev,
1886 struct device_attribute *attr,
1887 const char *buf, size_t size)
1889 u8 i, j, maskbyte;
1890 unsigned long val1, val2, mask;
1891 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1892 struct etmv4_config *config = &drvdata->config;
1895 * only implemented when vmid tracing is enabled, i.e. at least one
1896 * vmid comparator is implemented and at least 8 bit vmid size
1898 if (!drvdata->vmid_size || !drvdata->numvmidc)
1899 return -EINVAL;
1900 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1901 return -EINVAL;
1903 spin_lock(&drvdata->spinlock);
1906 * each byte[0..3] controls mask value applied to vmid
1907 * comparator[0..3]
1909 switch (drvdata->numvmidc) {
1910 case 0x1:
1911 /* COMP0, bits[7:0] */
1912 config->vmid_mask0 = val1 & 0xFF;
1913 break;
1914 case 0x2:
1915 /* COMP1, bits[15:8] */
1916 config->vmid_mask0 = val1 & 0xFFFF;
1917 break;
1918 case 0x3:
1919 /* COMP2, bits[23:16] */
1920 config->vmid_mask0 = val1 & 0xFFFFFF;
1921 break;
1922 case 0x4:
1923 /* COMP3, bits[31:24] */
1924 config->vmid_mask0 = val1;
1925 break;
1926 case 0x5:
1927 /* COMP4, bits[7:0] */
1928 config->vmid_mask0 = val1;
1929 config->vmid_mask1 = val2 & 0xFF;
1930 break;
1931 case 0x6:
1932 /* COMP5, bits[15:8] */
1933 config->vmid_mask0 = val1;
1934 config->vmid_mask1 = val2 & 0xFFFF;
1935 break;
1936 case 0x7:
1937 /* COMP6, bits[23:16] */
1938 config->vmid_mask0 = val1;
1939 config->vmid_mask1 = val2 & 0xFFFFFF;
1940 break;
1941 case 0x8:
1942 /* COMP7, bits[31:24] */
1943 config->vmid_mask0 = val1;
1944 config->vmid_mask1 = val2;
1945 break;
1946 default:
1947 break;
1951 * If software sets a mask bit to 1, it must program relevant byte
1952 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1953 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1954 * of vmid comparator0 value (corresponding to byte 0) register.
1956 mask = config->vmid_mask0;
1957 for (i = 0; i < drvdata->numvmidc; i++) {
1958 /* mask value of corresponding vmid comparator */
1959 maskbyte = mask & ETMv4_EVENT_MASK;
1961 * each bit corresponds to a byte of respective vmid comparator
1962 * value register
1964 for (j = 0; j < 8; j++) {
1965 if (maskbyte & 1)
1966 config->vmid_val[i] &= ~(0xFF << (j * 8));
1967 maskbyte >>= 1;
1969 /* Select the next vmid comparator mask value */
1970 if (i == 3)
1971 /* vmid comparators[4-7] */
1972 mask = config->vmid_mask1;
1973 else
1974 mask >>= 0x8;
1976 spin_unlock(&drvdata->spinlock);
1977 return size;
1979 static DEVICE_ATTR_RW(vmid_masks);
1981 static ssize_t cpu_show(struct device *dev,
1982 struct device_attribute *attr, char *buf)
1984 int val;
1985 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1987 val = drvdata->cpu;
1988 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1991 static DEVICE_ATTR_RO(cpu);
1993 static struct attribute *coresight_etmv4_attrs[] = {
1994 &dev_attr_nr_pe_cmp.attr,
1995 &dev_attr_nr_addr_cmp.attr,
1996 &dev_attr_nr_cntr.attr,
1997 &dev_attr_nr_ext_inp.attr,
1998 &dev_attr_numcidc.attr,
1999 &dev_attr_numvmidc.attr,
2000 &dev_attr_nrseqstate.attr,
2001 &dev_attr_nr_resource.attr,
2002 &dev_attr_nr_ss_cmp.attr,
2003 &dev_attr_reset.attr,
2004 &dev_attr_mode.attr,
2005 &dev_attr_pe.attr,
2006 &dev_attr_event.attr,
2007 &dev_attr_event_instren.attr,
2008 &dev_attr_event_ts.attr,
2009 &dev_attr_syncfreq.attr,
2010 &dev_attr_cyc_threshold.attr,
2011 &dev_attr_bb_ctrl.attr,
2012 &dev_attr_event_vinst.attr,
2013 &dev_attr_s_exlevel_vinst.attr,
2014 &dev_attr_ns_exlevel_vinst.attr,
2015 &dev_attr_addr_idx.attr,
2016 &dev_attr_addr_instdatatype.attr,
2017 &dev_attr_addr_single.attr,
2018 &dev_attr_addr_range.attr,
2019 &dev_attr_addr_start.attr,
2020 &dev_attr_addr_stop.attr,
2021 &dev_attr_addr_ctxtype.attr,
2022 &dev_attr_addr_context.attr,
2023 &dev_attr_seq_idx.attr,
2024 &dev_attr_seq_state.attr,
2025 &dev_attr_seq_event.attr,
2026 &dev_attr_seq_reset_event.attr,
2027 &dev_attr_cntr_idx.attr,
2028 &dev_attr_cntrldvr.attr,
2029 &dev_attr_cntr_val.attr,
2030 &dev_attr_cntr_ctrl.attr,
2031 &dev_attr_res_idx.attr,
2032 &dev_attr_res_ctrl.attr,
2033 &dev_attr_ctxid_idx.attr,
2034 &dev_attr_ctxid_pid.attr,
2035 &dev_attr_ctxid_masks.attr,
2036 &dev_attr_vmid_idx.attr,
2037 &dev_attr_vmid_val.attr,
2038 &dev_attr_vmid_masks.attr,
2039 &dev_attr_cpu.attr,
2040 NULL,
2043 struct etmv4_reg {
2044 void __iomem *addr;
2045 u32 data;
2048 static void do_smp_cross_read(void *data)
2050 struct etmv4_reg *reg = data;
2052 reg->data = readl_relaxed(reg->addr);
2055 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2057 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2058 struct etmv4_reg reg;
2060 reg.addr = drvdata->base + offset;
2062 * smp cross call ensures the CPU will be powered up before
2063 * accessing the ETMv4 trace core registers
2065 smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2066 return reg.data;
2069 #define coresight_etm4x_reg(name, offset) \
2070 coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2072 #define coresight_etm4x_cross_read(name, offset) \
2073 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
2074 name, offset)
2076 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2077 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2078 coresight_etm4x_reg(trclsr, TRCLSR);
2079 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2080 coresight_etm4x_reg(trcdevid, TRCDEVID);
2081 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2082 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2083 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2084 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2085 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2086 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2087 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2088 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2090 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2091 &dev_attr_trcoslsr.attr,
2092 &dev_attr_trcpdcr.attr,
2093 &dev_attr_trcpdsr.attr,
2094 &dev_attr_trclsr.attr,
2095 &dev_attr_trcconfig.attr,
2096 &dev_attr_trctraceid.attr,
2097 &dev_attr_trcauthstatus.attr,
2098 &dev_attr_trcdevid.attr,
2099 &dev_attr_trcdevtype.attr,
2100 &dev_attr_trcpidr0.attr,
2101 &dev_attr_trcpidr1.attr,
2102 &dev_attr_trcpidr2.attr,
2103 &dev_attr_trcpidr3.attr,
2104 NULL,
2107 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2108 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2109 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2110 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2111 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2112 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2113 /* trcidr[6,7] are reserved */
2114 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2115 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2116 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2117 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2118 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2119 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2121 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2122 &dev_attr_trcidr0.attr,
2123 &dev_attr_trcidr1.attr,
2124 &dev_attr_trcidr2.attr,
2125 &dev_attr_trcidr3.attr,
2126 &dev_attr_trcidr4.attr,
2127 &dev_attr_trcidr5.attr,
2128 /* trcidr[6,7] are reserved */
2129 &dev_attr_trcidr8.attr,
2130 &dev_attr_trcidr9.attr,
2131 &dev_attr_trcidr10.attr,
2132 &dev_attr_trcidr11.attr,
2133 &dev_attr_trcidr12.attr,
2134 &dev_attr_trcidr13.attr,
2135 NULL,
2138 static const struct attribute_group coresight_etmv4_group = {
2139 .attrs = coresight_etmv4_attrs,
2142 static const struct attribute_group coresight_etmv4_mgmt_group = {
2143 .attrs = coresight_etmv4_mgmt_attrs,
2144 .name = "mgmt",
2147 static const struct attribute_group coresight_etmv4_trcidr_group = {
2148 .attrs = coresight_etmv4_trcidr_attrs,
2149 .name = "trcidr",
2152 const struct attribute_group *coresight_etmv4_groups[] = {
2153 &coresight_etmv4_group,
2154 &coresight_etmv4_mgmt_group,
2155 &coresight_etmv4_trcidr_group,
2156 NULL,