Linux 4.2.1
[linux/fpc-iii.git] / drivers / hwtracing / coresight / coresight-etm4x.c
blob1312e993c5017b6f28d87197f9201479af1eb877
1 /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/io.h>
20 #include <linux/err.h>
21 #include <linux/fs.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/smp.h>
25 #include <linux/sysfs.h>
26 #include <linux/stat.h>
27 #include <linux/clk.h>
28 #include <linux/cpu.h>
29 #include <linux/coresight.h>
30 #include <linux/pm_wakeup.h>
31 #include <linux/amba/bus.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/pm_runtime.h>
35 #include <asm/sections.h>
37 #include "coresight-etm4x.h"
39 static int boot_enable;
40 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
42 /* The number of ETMv4 currently registered */
43 static int etm4_count;
44 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
46 static void etm4_os_unlock(void *info)
48 struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52 isb();
55 static bool etm4_arch_supported(u8 arch)
57 switch (arch) {
58 case ETM_ARCH_V4:
59 break;
60 default:
61 return false;
63 return true;
66 static int etm4_trace_id(struct coresight_device *csdev)
68 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
69 unsigned long flags;
70 int trace_id = -1;
72 if (!drvdata->enable)
73 return drvdata->trcid;
75 pm_runtime_get_sync(drvdata->dev);
76 spin_lock_irqsave(&drvdata->spinlock, flags);
78 CS_UNLOCK(drvdata->base);
79 trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
80 trace_id &= ETM_TRACEID_MASK;
81 CS_LOCK(drvdata->base);
83 spin_unlock_irqrestore(&drvdata->spinlock, flags);
84 pm_runtime_put(drvdata->dev);
86 return trace_id;
89 static void etm4_enable_hw(void *info)
91 int i;
92 struct etmv4_drvdata *drvdata = info;
94 CS_UNLOCK(drvdata->base);
96 etm4_os_unlock(drvdata);
98 /* Disable the trace unit before programming trace registers */
99 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
101 /* wait for TRCSTATR.IDLE to go up */
102 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
103 dev_err(drvdata->dev,
104 "timeout observed when probing at offset %#x\n",
105 TRCSTATR);
107 writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
108 writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
109 /* nothing specific implemented */
110 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
111 writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
112 writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
113 writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
114 writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
115 writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
116 writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
117 writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
118 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
119 writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
120 writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
121 writel_relaxed(drvdata->vissctlr,
122 drvdata->base + TRCVISSCTLR);
123 writel_relaxed(drvdata->vipcssctlr,
124 drvdata->base + TRCVIPCSSCTLR);
125 for (i = 0; i < drvdata->nrseqstate - 1; i++)
126 writel_relaxed(drvdata->seq_ctrl[i],
127 drvdata->base + TRCSEQEVRn(i));
128 writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
129 writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
130 writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
131 for (i = 0; i < drvdata->nr_cntr; i++) {
132 writel_relaxed(drvdata->cntrldvr[i],
133 drvdata->base + TRCCNTRLDVRn(i));
134 writel_relaxed(drvdata->cntr_ctrl[i],
135 drvdata->base + TRCCNTCTLRn(i));
136 writel_relaxed(drvdata->cntr_val[i],
137 drvdata->base + TRCCNTVRn(i));
139 for (i = 0; i < drvdata->nr_resource; i++)
140 writel_relaxed(drvdata->res_ctrl[i],
141 drvdata->base + TRCRSCTLRn(i));
143 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
144 writel_relaxed(drvdata->ss_ctrl[i],
145 drvdata->base + TRCSSCCRn(i));
146 writel_relaxed(drvdata->ss_status[i],
147 drvdata->base + TRCSSCSRn(i));
148 writel_relaxed(drvdata->ss_pe_cmp[i],
149 drvdata->base + TRCSSPCICRn(i));
151 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
152 writeq_relaxed(drvdata->addr_val[i],
153 drvdata->base + TRCACVRn(i));
154 writeq_relaxed(drvdata->addr_acc[i],
155 drvdata->base + TRCACATRn(i));
157 for (i = 0; i < drvdata->numcidc; i++)
158 writeq_relaxed(drvdata->ctxid_val[i],
159 drvdata->base + TRCCIDCVRn(i));
160 writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
161 writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
163 for (i = 0; i < drvdata->numvmidc; i++)
164 writeq_relaxed(drvdata->vmid_val[i],
165 drvdata->base + TRCVMIDCVRn(i));
166 writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
167 writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
169 /* Enable the trace unit */
170 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
172 /* wait for TRCSTATR.IDLE to go back down to '0' */
173 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
174 dev_err(drvdata->dev,
175 "timeout observed when probing at offset %#x\n",
176 TRCSTATR);
178 CS_LOCK(drvdata->base);
180 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
183 static int etm4_enable(struct coresight_device *csdev)
185 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
186 int ret;
188 pm_runtime_get_sync(drvdata->dev);
189 spin_lock(&drvdata->spinlock);
192 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
193 * ensures that register writes occur when cpu is powered.
195 ret = smp_call_function_single(drvdata->cpu,
196 etm4_enable_hw, drvdata, 1);
197 if (ret)
198 goto err;
199 drvdata->enable = true;
200 drvdata->sticky_enable = true;
202 spin_unlock(&drvdata->spinlock);
204 dev_info(drvdata->dev, "ETM tracing enabled\n");
205 return 0;
206 err:
207 spin_unlock(&drvdata->spinlock);
208 pm_runtime_put(drvdata->dev);
209 return ret;
212 static void etm4_disable_hw(void *info)
214 u32 control;
215 struct etmv4_drvdata *drvdata = info;
217 CS_UNLOCK(drvdata->base);
219 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
221 /* EN, bit[0] Trace unit enable bit */
222 control &= ~0x1;
224 /* make sure everything completes before disabling */
225 mb();
226 isb();
227 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
229 CS_LOCK(drvdata->base);
231 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
234 static void etm4_disable(struct coresight_device *csdev)
236 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
239 * Taking hotplug lock here protects from clocks getting disabled
240 * with tracing being left on (crash scenario) if user disable occurs
241 * after cpu online mask indicates the cpu is offline but before the
242 * DYING hotplug callback is serviced by the ETM driver.
244 get_online_cpus();
245 spin_lock(&drvdata->spinlock);
248 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
249 * ensures that register writes occur when cpu is powered.
251 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
252 drvdata->enable = false;
254 spin_unlock(&drvdata->spinlock);
255 put_online_cpus();
257 pm_runtime_put(drvdata->dev);
259 dev_info(drvdata->dev, "ETM tracing disabled\n");
262 static const struct coresight_ops_source etm4_source_ops = {
263 .trace_id = etm4_trace_id,
264 .enable = etm4_enable,
265 .disable = etm4_disable,
268 static const struct coresight_ops etm4_cs_ops = {
269 .source_ops = &etm4_source_ops,
272 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
274 u8 idx = drvdata->addr_idx;
277 * TRCACATRn.TYPE bit[1:0]: type of comparison
278 * the trace unit performs
280 if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
281 if (idx % 2 != 0)
282 return -EINVAL;
285 * We are performing instruction address comparison. Set the
286 * relevant bit of ViewInst Include/Exclude Control register
287 * for corresponding address comparator pair.
289 if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
290 drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
291 return -EINVAL;
293 if (exclude == true) {
295 * Set exclude bit and unset the include bit
296 * corresponding to comparator pair
298 drvdata->viiectlr |= BIT(idx / 2 + 16);
299 drvdata->viiectlr &= ~BIT(idx / 2);
300 } else {
302 * Set include bit and unset exclude bit
303 * corresponding to comparator pair
305 drvdata->viiectlr |= BIT(idx / 2);
306 drvdata->viiectlr &= ~BIT(idx / 2 + 16);
309 return 0;
312 static ssize_t nr_pe_cmp_show(struct device *dev,
313 struct device_attribute *attr,
314 char *buf)
316 unsigned long val;
317 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
319 val = drvdata->nr_pe_cmp;
320 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
322 static DEVICE_ATTR_RO(nr_pe_cmp);
324 static ssize_t nr_addr_cmp_show(struct device *dev,
325 struct device_attribute *attr,
326 char *buf)
328 unsigned long val;
329 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
331 val = drvdata->nr_addr_cmp;
332 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
334 static DEVICE_ATTR_RO(nr_addr_cmp);
336 static ssize_t nr_cntr_show(struct device *dev,
337 struct device_attribute *attr,
338 char *buf)
340 unsigned long val;
341 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
343 val = drvdata->nr_cntr;
344 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
346 static DEVICE_ATTR_RO(nr_cntr);
348 static ssize_t nr_ext_inp_show(struct device *dev,
349 struct device_attribute *attr,
350 char *buf)
352 unsigned long val;
353 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
355 val = drvdata->nr_ext_inp;
356 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
358 static DEVICE_ATTR_RO(nr_ext_inp);
360 static ssize_t numcidc_show(struct device *dev,
361 struct device_attribute *attr,
362 char *buf)
364 unsigned long val;
365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
367 val = drvdata->numcidc;
368 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
370 static DEVICE_ATTR_RO(numcidc);
372 static ssize_t numvmidc_show(struct device *dev,
373 struct device_attribute *attr,
374 char *buf)
376 unsigned long val;
377 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
379 val = drvdata->numvmidc;
380 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
382 static DEVICE_ATTR_RO(numvmidc);
384 static ssize_t nrseqstate_show(struct device *dev,
385 struct device_attribute *attr,
386 char *buf)
388 unsigned long val;
389 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
391 val = drvdata->nrseqstate;
392 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
394 static DEVICE_ATTR_RO(nrseqstate);
396 static ssize_t nr_resource_show(struct device *dev,
397 struct device_attribute *attr,
398 char *buf)
400 unsigned long val;
401 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
403 val = drvdata->nr_resource;
404 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
406 static DEVICE_ATTR_RO(nr_resource);
408 static ssize_t nr_ss_cmp_show(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
412 unsigned long val;
413 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
415 val = drvdata->nr_ss_cmp;
416 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
418 static DEVICE_ATTR_RO(nr_ss_cmp);
420 static ssize_t reset_store(struct device *dev,
421 struct device_attribute *attr,
422 const char *buf, size_t size)
424 int i;
425 unsigned long val;
426 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
428 if (kstrtoul(buf, 16, &val))
429 return -EINVAL;
431 spin_lock(&drvdata->spinlock);
432 if (val)
433 drvdata->mode = 0x0;
435 /* Disable data tracing: do not trace load and store data transfers */
436 drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
437 drvdata->cfg &= ~(BIT(1) | BIT(2));
439 /* Disable data value and data address tracing */
440 drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
441 ETM_MODE_DATA_TRACE_VAL);
442 drvdata->cfg &= ~(BIT(16) | BIT(17));
444 /* Disable all events tracing */
445 drvdata->eventctrl0 = 0x0;
446 drvdata->eventctrl1 = 0x0;
448 /* Disable timestamp event */
449 drvdata->ts_ctrl = 0x0;
451 /* Disable stalling */
452 drvdata->stall_ctrl = 0x0;
454 /* Reset trace synchronization period to 2^8 = 256 bytes*/
455 if (drvdata->syncpr == false)
456 drvdata->syncfreq = 0x8;
459 * Enable ViewInst to trace everything with start-stop logic in
460 * started state. ARM recommends start-stop logic is set before
461 * each trace run.
463 drvdata->vinst_ctrl |= BIT(0);
464 if (drvdata->nr_addr_cmp == true) {
465 drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
466 /* SSSTATUS, bit[9] */
467 drvdata->vinst_ctrl |= BIT(9);
470 /* No address range filtering for ViewInst */
471 drvdata->viiectlr = 0x0;
473 /* No start-stop filtering for ViewInst */
474 drvdata->vissctlr = 0x0;
476 /* Disable seq events */
477 for (i = 0; i < drvdata->nrseqstate-1; i++)
478 drvdata->seq_ctrl[i] = 0x0;
479 drvdata->seq_rst = 0x0;
480 drvdata->seq_state = 0x0;
482 /* Disable external input events */
483 drvdata->ext_inp = 0x0;
485 drvdata->cntr_idx = 0x0;
486 for (i = 0; i < drvdata->nr_cntr; i++) {
487 drvdata->cntrldvr[i] = 0x0;
488 drvdata->cntr_ctrl[i] = 0x0;
489 drvdata->cntr_val[i] = 0x0;
492 drvdata->res_idx = 0x0;
493 for (i = 0; i < drvdata->nr_resource; i++)
494 drvdata->res_ctrl[i] = 0x0;
496 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
497 drvdata->ss_ctrl[i] = 0x0;
498 drvdata->ss_pe_cmp[i] = 0x0;
501 drvdata->addr_idx = 0x0;
502 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
503 drvdata->addr_val[i] = 0x0;
504 drvdata->addr_acc[i] = 0x0;
505 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
508 drvdata->ctxid_idx = 0x0;
509 for (i = 0; i < drvdata->numcidc; i++)
510 drvdata->ctxid_val[i] = 0x0;
511 drvdata->ctxid_mask0 = 0x0;
512 drvdata->ctxid_mask1 = 0x0;
514 drvdata->vmid_idx = 0x0;
515 for (i = 0; i < drvdata->numvmidc; i++)
516 drvdata->vmid_val[i] = 0x0;
517 drvdata->vmid_mask0 = 0x0;
518 drvdata->vmid_mask1 = 0x0;
520 drvdata->trcid = drvdata->cpu + 1;
521 spin_unlock(&drvdata->spinlock);
522 return size;
524 static DEVICE_ATTR_WO(reset);
526 static ssize_t mode_show(struct device *dev,
527 struct device_attribute *attr,
528 char *buf)
530 unsigned long val;
531 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
533 val = drvdata->mode;
534 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
537 static ssize_t mode_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t size)
541 unsigned long val, mode;
542 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
544 if (kstrtoul(buf, 16, &val))
545 return -EINVAL;
547 spin_lock(&drvdata->spinlock);
548 drvdata->mode = val & ETMv4_MODE_ALL;
550 if (drvdata->mode & ETM_MODE_EXCLUDE)
551 etm4_set_mode_exclude(drvdata, true);
552 else
553 etm4_set_mode_exclude(drvdata, false);
555 if (drvdata->instrp0 == true) {
556 /* start by clearing instruction P0 field */
557 drvdata->cfg &= ~(BIT(1) | BIT(2));
558 if (drvdata->mode & ETM_MODE_LOAD)
559 /* 0b01 Trace load instructions as P0 instructions */
560 drvdata->cfg |= BIT(1);
561 if (drvdata->mode & ETM_MODE_STORE)
562 /* 0b10 Trace store instructions as P0 instructions */
563 drvdata->cfg |= BIT(2);
564 if (drvdata->mode & ETM_MODE_LOAD_STORE)
566 * 0b11 Trace load and store instructions
567 * as P0 instructions
569 drvdata->cfg |= BIT(1) | BIT(2);
572 /* bit[3], Branch broadcast mode */
573 if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
574 drvdata->cfg |= BIT(3);
575 else
576 drvdata->cfg &= ~BIT(3);
578 /* bit[4], Cycle counting instruction trace bit */
579 if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
580 (drvdata->trccci == true))
581 drvdata->cfg |= BIT(4);
582 else
583 drvdata->cfg &= ~BIT(4);
585 /* bit[6], Context ID tracing bit */
586 if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
587 drvdata->cfg |= BIT(6);
588 else
589 drvdata->cfg &= ~BIT(6);
591 if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
592 drvdata->cfg |= BIT(7);
593 else
594 drvdata->cfg &= ~BIT(7);
596 /* bits[10:8], Conditional instruction tracing bit */
597 mode = ETM_MODE_COND(drvdata->mode);
598 if (drvdata->trccond == true) {
599 drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
600 drvdata->cfg |= mode << 8;
603 /* bit[11], Global timestamp tracing bit */
604 if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
605 drvdata->cfg |= BIT(11);
606 else
607 drvdata->cfg &= ~BIT(11);
609 /* bit[12], Return stack enable bit */
610 if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
611 (drvdata->retstack == true))
612 drvdata->cfg |= BIT(12);
613 else
614 drvdata->cfg &= ~BIT(12);
616 /* bits[14:13], Q element enable field */
617 mode = ETM_MODE_QELEM(drvdata->mode);
618 /* start by clearing QE bits */
619 drvdata->cfg &= ~(BIT(13) | BIT(14));
620 /* if supported, Q elements with instruction counts are enabled */
621 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
622 drvdata->cfg |= BIT(13);
624 * if supported, Q elements with and without instruction
625 * counts are enabled
627 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
628 drvdata->cfg |= BIT(14);
630 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
631 if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
632 (drvdata->atbtrig == true))
633 drvdata->eventctrl1 |= BIT(11);
634 else
635 drvdata->eventctrl1 &= ~BIT(11);
637 /* bit[12], Low-power state behavior override bit */
638 if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
639 (drvdata->lpoverride == true))
640 drvdata->eventctrl1 |= BIT(12);
641 else
642 drvdata->eventctrl1 &= ~BIT(12);
644 /* bit[8], Instruction stall bit */
645 if (drvdata->mode & ETM_MODE_ISTALL_EN)
646 drvdata->stall_ctrl |= BIT(8);
647 else
648 drvdata->stall_ctrl &= ~BIT(8);
650 /* bit[10], Prioritize instruction trace bit */
651 if (drvdata->mode & ETM_MODE_INSTPRIO)
652 drvdata->stall_ctrl |= BIT(10);
653 else
654 drvdata->stall_ctrl &= ~BIT(10);
656 /* bit[13], Trace overflow prevention bit */
657 if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
658 (drvdata->nooverflow == true))
659 drvdata->stall_ctrl |= BIT(13);
660 else
661 drvdata->stall_ctrl &= ~BIT(13);
663 /* bit[9] Start/stop logic control bit */
664 if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
665 drvdata->vinst_ctrl |= BIT(9);
666 else
667 drvdata->vinst_ctrl &= ~BIT(9);
669 /* bit[10], Whether a trace unit must trace a Reset exception */
670 if (drvdata->mode & ETM_MODE_TRACE_RESET)
671 drvdata->vinst_ctrl |= BIT(10);
672 else
673 drvdata->vinst_ctrl &= ~BIT(10);
675 /* bit[11], Whether a trace unit must trace a system error exception */
676 if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
677 (drvdata->trc_error == true))
678 drvdata->vinst_ctrl |= BIT(11);
679 else
680 drvdata->vinst_ctrl &= ~BIT(11);
682 spin_unlock(&drvdata->spinlock);
683 return size;
685 static DEVICE_ATTR_RW(mode);
687 static ssize_t pe_show(struct device *dev,
688 struct device_attribute *attr,
689 char *buf)
691 unsigned long val;
692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
694 val = drvdata->pe_sel;
695 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
698 static ssize_t pe_store(struct device *dev,
699 struct device_attribute *attr,
700 const char *buf, size_t size)
702 unsigned long val;
703 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
705 if (kstrtoul(buf, 16, &val))
706 return -EINVAL;
708 spin_lock(&drvdata->spinlock);
709 if (val > drvdata->nr_pe) {
710 spin_unlock(&drvdata->spinlock);
711 return -EINVAL;
714 drvdata->pe_sel = val;
715 spin_unlock(&drvdata->spinlock);
716 return size;
718 static DEVICE_ATTR_RW(pe);
720 static ssize_t event_show(struct device *dev,
721 struct device_attribute *attr,
722 char *buf)
724 unsigned long val;
725 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
727 val = drvdata->eventctrl0;
728 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
731 static ssize_t event_store(struct device *dev,
732 struct device_attribute *attr,
733 const char *buf, size_t size)
735 unsigned long val;
736 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
738 if (kstrtoul(buf, 16, &val))
739 return -EINVAL;
741 spin_lock(&drvdata->spinlock);
742 switch (drvdata->nr_event) {
743 case 0x0:
744 /* EVENT0, bits[7:0] */
745 drvdata->eventctrl0 = val & 0xFF;
746 break;
747 case 0x1:
748 /* EVENT1, bits[15:8] */
749 drvdata->eventctrl0 = val & 0xFFFF;
750 break;
751 case 0x2:
752 /* EVENT2, bits[23:16] */
753 drvdata->eventctrl0 = val & 0xFFFFFF;
754 break;
755 case 0x3:
756 /* EVENT3, bits[31:24] */
757 drvdata->eventctrl0 = val;
758 break;
759 default:
760 break;
762 spin_unlock(&drvdata->spinlock);
763 return size;
765 static DEVICE_ATTR_RW(event);
767 static ssize_t event_instren_show(struct device *dev,
768 struct device_attribute *attr,
769 char *buf)
771 unsigned long val;
772 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
774 val = BMVAL(drvdata->eventctrl1, 0, 3);
775 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
778 static ssize_t event_instren_store(struct device *dev,
779 struct device_attribute *attr,
780 const char *buf, size_t size)
782 unsigned long val;
783 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
785 if (kstrtoul(buf, 16, &val))
786 return -EINVAL;
788 spin_lock(&drvdata->spinlock);
789 /* start by clearing all instruction event enable bits */
790 drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
791 switch (drvdata->nr_event) {
792 case 0x0:
793 /* generate Event element for event 1 */
794 drvdata->eventctrl1 |= val & BIT(1);
795 break;
796 case 0x1:
797 /* generate Event element for event 1 and 2 */
798 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
799 break;
800 case 0x2:
801 /* generate Event element for event 1, 2 and 3 */
802 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
803 break;
804 case 0x3:
805 /* generate Event element for all 4 events */
806 drvdata->eventctrl1 |= val & 0xF;
807 break;
808 default:
809 break;
811 spin_unlock(&drvdata->spinlock);
812 return size;
814 static DEVICE_ATTR_RW(event_instren);
816 static ssize_t event_ts_show(struct device *dev,
817 struct device_attribute *attr,
818 char *buf)
820 unsigned long val;
821 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
823 val = drvdata->ts_ctrl;
824 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
827 static ssize_t event_ts_store(struct device *dev,
828 struct device_attribute *attr,
829 const char *buf, size_t size)
831 unsigned long val;
832 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
834 if (kstrtoul(buf, 16, &val))
835 return -EINVAL;
836 if (!drvdata->ts_size)
837 return -EINVAL;
839 drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
840 return size;
842 static DEVICE_ATTR_RW(event_ts);
844 static ssize_t syncfreq_show(struct device *dev,
845 struct device_attribute *attr,
846 char *buf)
848 unsigned long val;
849 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
851 val = drvdata->syncfreq;
852 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
855 static ssize_t syncfreq_store(struct device *dev,
856 struct device_attribute *attr,
857 const char *buf, size_t size)
859 unsigned long val;
860 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
862 if (kstrtoul(buf, 16, &val))
863 return -EINVAL;
864 if (drvdata->syncpr == true)
865 return -EINVAL;
867 drvdata->syncfreq = val & ETMv4_SYNC_MASK;
868 return size;
870 static DEVICE_ATTR_RW(syncfreq);
872 static ssize_t cyc_threshold_show(struct device *dev,
873 struct device_attribute *attr,
874 char *buf)
876 unsigned long val;
877 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
879 val = drvdata->ccctlr;
880 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
883 static ssize_t cyc_threshold_store(struct device *dev,
884 struct device_attribute *attr,
885 const char *buf, size_t size)
887 unsigned long val;
888 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
890 if (kstrtoul(buf, 16, &val))
891 return -EINVAL;
892 if (val < drvdata->ccitmin)
893 return -EINVAL;
895 drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
896 return size;
898 static DEVICE_ATTR_RW(cyc_threshold);
900 static ssize_t bb_ctrl_show(struct device *dev,
901 struct device_attribute *attr,
902 char *buf)
904 unsigned long val;
905 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
907 val = drvdata->bb_ctrl;
908 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
911 static ssize_t bb_ctrl_store(struct device *dev,
912 struct device_attribute *attr,
913 const char *buf, size_t size)
915 unsigned long val;
916 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
918 if (kstrtoul(buf, 16, &val))
919 return -EINVAL;
920 if (drvdata->trcbb == false)
921 return -EINVAL;
922 if (!drvdata->nr_addr_cmp)
923 return -EINVAL;
925 * Bit[7:0] selects which address range comparator is used for
926 * branch broadcast control.
928 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
929 return -EINVAL;
931 drvdata->bb_ctrl = val;
932 return size;
934 static DEVICE_ATTR_RW(bb_ctrl);
936 static ssize_t event_vinst_show(struct device *dev,
937 struct device_attribute *attr,
938 char *buf)
940 unsigned long val;
941 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
943 val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
944 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
947 static ssize_t event_vinst_store(struct device *dev,
948 struct device_attribute *attr,
949 const char *buf, size_t size)
951 unsigned long val;
952 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
954 if (kstrtoul(buf, 16, &val))
955 return -EINVAL;
957 spin_lock(&drvdata->spinlock);
958 val &= ETMv4_EVENT_MASK;
959 drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
960 drvdata->vinst_ctrl |= val;
961 spin_unlock(&drvdata->spinlock);
962 return size;
964 static DEVICE_ATTR_RW(event_vinst);
966 static ssize_t s_exlevel_vinst_show(struct device *dev,
967 struct device_attribute *attr,
968 char *buf)
970 unsigned long val;
971 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
973 val = BMVAL(drvdata->vinst_ctrl, 16, 19);
974 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
977 static ssize_t s_exlevel_vinst_store(struct device *dev,
978 struct device_attribute *attr,
979 const char *buf, size_t size)
981 unsigned long val;
982 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
984 if (kstrtoul(buf, 16, &val))
985 return -EINVAL;
987 spin_lock(&drvdata->spinlock);
988 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
989 drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
990 /* enable instruction tracing for corresponding exception level */
991 val &= drvdata->s_ex_level;
992 drvdata->vinst_ctrl |= (val << 16);
993 spin_unlock(&drvdata->spinlock);
994 return size;
996 static DEVICE_ATTR_RW(s_exlevel_vinst);
998 static ssize_t ns_exlevel_vinst_show(struct device *dev,
999 struct device_attribute *attr,
1000 char *buf)
1002 unsigned long val;
1003 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1005 /* EXLEVEL_NS, bits[23:20] */
1006 val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1007 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1010 static ssize_t ns_exlevel_vinst_store(struct device *dev,
1011 struct device_attribute *attr,
1012 const char *buf, size_t size)
1014 unsigned long val;
1015 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1017 if (kstrtoul(buf, 16, &val))
1018 return -EINVAL;
1020 spin_lock(&drvdata->spinlock);
1021 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1022 drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1023 /* enable instruction tracing for corresponding exception level */
1024 val &= drvdata->ns_ex_level;
1025 drvdata->vinst_ctrl |= (val << 20);
1026 spin_unlock(&drvdata->spinlock);
1027 return size;
1029 static DEVICE_ATTR_RW(ns_exlevel_vinst);
1031 static ssize_t addr_idx_show(struct device *dev,
1032 struct device_attribute *attr,
1033 char *buf)
1035 unsigned long val;
1036 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1038 val = drvdata->addr_idx;
1039 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1042 static ssize_t addr_idx_store(struct device *dev,
1043 struct device_attribute *attr,
1044 const char *buf, size_t size)
1046 unsigned long val;
1047 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1049 if (kstrtoul(buf, 16, &val))
1050 return -EINVAL;
1051 if (val >= drvdata->nr_addr_cmp * 2)
1052 return -EINVAL;
1055 * Use spinlock to ensure index doesn't change while it gets
1056 * dereferenced multiple times within a spinlock block elsewhere.
1058 spin_lock(&drvdata->spinlock);
1059 drvdata->addr_idx = val;
1060 spin_unlock(&drvdata->spinlock);
1061 return size;
1063 static DEVICE_ATTR_RW(addr_idx);
1065 static ssize_t addr_instdatatype_show(struct device *dev,
1066 struct device_attribute *attr,
1067 char *buf)
1069 ssize_t len;
1070 u8 val, idx;
1071 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1073 spin_lock(&drvdata->spinlock);
1074 idx = drvdata->addr_idx;
1075 val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1076 len = scnprintf(buf, PAGE_SIZE, "%s\n",
1077 val == ETM_INSTR_ADDR ? "instr" :
1078 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
1079 (val == ETM_DATA_STORE_ADDR ? "data_store" :
1080 "data_load_store")));
1081 spin_unlock(&drvdata->spinlock);
1082 return len;
1085 static ssize_t addr_instdatatype_store(struct device *dev,
1086 struct device_attribute *attr,
1087 const char *buf, size_t size)
1089 u8 idx;
1090 char str[20] = "";
1091 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1093 if (strlen(buf) >= 20)
1094 return -EINVAL;
1095 if (sscanf(buf, "%s", str) != 1)
1096 return -EINVAL;
1098 spin_lock(&drvdata->spinlock);
1099 idx = drvdata->addr_idx;
1100 if (!strcmp(str, "instr"))
1101 /* TYPE, bits[1:0] */
1102 drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1104 spin_unlock(&drvdata->spinlock);
1105 return size;
1107 static DEVICE_ATTR_RW(addr_instdatatype);
1109 static ssize_t addr_single_show(struct device *dev,
1110 struct device_attribute *attr,
1111 char *buf)
1113 u8 idx;
1114 unsigned long val;
1115 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1117 idx = drvdata->addr_idx;
1118 spin_lock(&drvdata->spinlock);
1119 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1120 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1121 spin_unlock(&drvdata->spinlock);
1122 return -EPERM;
1124 val = (unsigned long)drvdata->addr_val[idx];
1125 spin_unlock(&drvdata->spinlock);
1126 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1129 static ssize_t addr_single_store(struct device *dev,
1130 struct device_attribute *attr,
1131 const char *buf, size_t size)
1133 u8 idx;
1134 unsigned long val;
1135 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1137 if (kstrtoul(buf, 16, &val))
1138 return -EINVAL;
1140 spin_lock(&drvdata->spinlock);
1141 idx = drvdata->addr_idx;
1142 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1143 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1144 spin_unlock(&drvdata->spinlock);
1145 return -EPERM;
1148 drvdata->addr_val[idx] = (u64)val;
1149 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1150 spin_unlock(&drvdata->spinlock);
1151 return size;
1153 static DEVICE_ATTR_RW(addr_single);
1155 static ssize_t addr_range_show(struct device *dev,
1156 struct device_attribute *attr,
1157 char *buf)
1159 u8 idx;
1160 unsigned long val1, val2;
1161 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1163 spin_lock(&drvdata->spinlock);
1164 idx = drvdata->addr_idx;
1165 if (idx % 2 != 0) {
1166 spin_unlock(&drvdata->spinlock);
1167 return -EPERM;
1169 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1170 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1171 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1172 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1173 spin_unlock(&drvdata->spinlock);
1174 return -EPERM;
1177 val1 = (unsigned long)drvdata->addr_val[idx];
1178 val2 = (unsigned long)drvdata->addr_val[idx + 1];
1179 spin_unlock(&drvdata->spinlock);
1180 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1183 static ssize_t addr_range_store(struct device *dev,
1184 struct device_attribute *attr,
1185 const char *buf, size_t size)
1187 u8 idx;
1188 unsigned long val1, val2;
1189 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1191 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1192 return -EINVAL;
1193 /* lower address comparator cannot have a higher address value */
1194 if (val1 > val2)
1195 return -EINVAL;
1197 spin_lock(&drvdata->spinlock);
1198 idx = drvdata->addr_idx;
1199 if (idx % 2 != 0) {
1200 spin_unlock(&drvdata->spinlock);
1201 return -EPERM;
1204 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1205 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1206 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1207 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1208 spin_unlock(&drvdata->spinlock);
1209 return -EPERM;
1212 drvdata->addr_val[idx] = (u64)val1;
1213 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1214 drvdata->addr_val[idx + 1] = (u64)val2;
1215 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1217 * Program include or exclude control bits for vinst or vdata
1218 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1220 if (drvdata->mode & ETM_MODE_EXCLUDE)
1221 etm4_set_mode_exclude(drvdata, true);
1222 else
1223 etm4_set_mode_exclude(drvdata, false);
1225 spin_unlock(&drvdata->spinlock);
1226 return size;
1228 static DEVICE_ATTR_RW(addr_range);
1230 static ssize_t addr_start_show(struct device *dev,
1231 struct device_attribute *attr,
1232 char *buf)
1234 u8 idx;
1235 unsigned long val;
1236 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1238 spin_lock(&drvdata->spinlock);
1239 idx = drvdata->addr_idx;
1241 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1242 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1243 spin_unlock(&drvdata->spinlock);
1244 return -EPERM;
1247 val = (unsigned long)drvdata->addr_val[idx];
1248 spin_unlock(&drvdata->spinlock);
1249 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1252 static ssize_t addr_start_store(struct device *dev,
1253 struct device_attribute *attr,
1254 const char *buf, size_t size)
1256 u8 idx;
1257 unsigned long val;
1258 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1260 if (kstrtoul(buf, 16, &val))
1261 return -EINVAL;
1263 spin_lock(&drvdata->spinlock);
1264 idx = drvdata->addr_idx;
1265 if (!drvdata->nr_addr_cmp) {
1266 spin_unlock(&drvdata->spinlock);
1267 return -EINVAL;
1269 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1270 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1271 spin_unlock(&drvdata->spinlock);
1272 return -EPERM;
1275 drvdata->addr_val[idx] = (u64)val;
1276 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1277 drvdata->vissctlr |= BIT(idx);
1278 /* SSSTATUS, bit[9] - turn on start/stop logic */
1279 drvdata->vinst_ctrl |= BIT(9);
1280 spin_unlock(&drvdata->spinlock);
1281 return size;
1283 static DEVICE_ATTR_RW(addr_start);
1285 static ssize_t addr_stop_show(struct device *dev,
1286 struct device_attribute *attr,
1287 char *buf)
1289 u8 idx;
1290 unsigned long val;
1291 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1293 spin_lock(&drvdata->spinlock);
1294 idx = drvdata->addr_idx;
1296 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1297 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1298 spin_unlock(&drvdata->spinlock);
1299 return -EPERM;
1302 val = (unsigned long)drvdata->addr_val[idx];
1303 spin_unlock(&drvdata->spinlock);
1304 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1307 static ssize_t addr_stop_store(struct device *dev,
1308 struct device_attribute *attr,
1309 const char *buf, size_t size)
1311 u8 idx;
1312 unsigned long val;
1313 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1315 if (kstrtoul(buf, 16, &val))
1316 return -EINVAL;
1318 spin_lock(&drvdata->spinlock);
1319 idx = drvdata->addr_idx;
1320 if (!drvdata->nr_addr_cmp) {
1321 spin_unlock(&drvdata->spinlock);
1322 return -EINVAL;
1324 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1325 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1326 spin_unlock(&drvdata->spinlock);
1327 return -EPERM;
1330 drvdata->addr_val[idx] = (u64)val;
1331 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1332 drvdata->vissctlr |= BIT(idx + 16);
1333 /* SSSTATUS, bit[9] - turn on start/stop logic */
1334 drvdata->vinst_ctrl |= BIT(9);
1335 spin_unlock(&drvdata->spinlock);
1336 return size;
1338 static DEVICE_ATTR_RW(addr_stop);
1340 static ssize_t addr_ctxtype_show(struct device *dev,
1341 struct device_attribute *attr,
1342 char *buf)
1344 ssize_t len;
1345 u8 idx, val;
1346 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1348 spin_lock(&drvdata->spinlock);
1349 idx = drvdata->addr_idx;
1350 /* CONTEXTTYPE, bits[3:2] */
1351 val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1352 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1353 (val == ETM_CTX_CTXID ? "ctxid" :
1354 (val == ETM_CTX_VMID ? "vmid" : "all")));
1355 spin_unlock(&drvdata->spinlock);
1356 return len;
1359 static ssize_t addr_ctxtype_store(struct device *dev,
1360 struct device_attribute *attr,
1361 const char *buf, size_t size)
1363 u8 idx;
1364 char str[10] = "";
1365 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1367 if (strlen(buf) >= 10)
1368 return -EINVAL;
1369 if (sscanf(buf, "%s", str) != 1)
1370 return -EINVAL;
1372 spin_lock(&drvdata->spinlock);
1373 idx = drvdata->addr_idx;
1374 if (!strcmp(str, "none"))
1375 /* start by clearing context type bits */
1376 drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1377 else if (!strcmp(str, "ctxid")) {
1378 /* 0b01 The trace unit performs a Context ID */
1379 if (drvdata->numcidc) {
1380 drvdata->addr_acc[idx] |= BIT(2);
1381 drvdata->addr_acc[idx] &= ~BIT(3);
1383 } else if (!strcmp(str, "vmid")) {
1384 /* 0b10 The trace unit performs a VMID */
1385 if (drvdata->numvmidc) {
1386 drvdata->addr_acc[idx] &= ~BIT(2);
1387 drvdata->addr_acc[idx] |= BIT(3);
1389 } else if (!strcmp(str, "all")) {
1391 * 0b11 The trace unit performs a Context ID
1392 * comparison and a VMID
1394 if (drvdata->numcidc)
1395 drvdata->addr_acc[idx] |= BIT(2);
1396 if (drvdata->numvmidc)
1397 drvdata->addr_acc[idx] |= BIT(3);
1399 spin_unlock(&drvdata->spinlock);
1400 return size;
1402 static DEVICE_ATTR_RW(addr_ctxtype);
1404 static ssize_t addr_context_show(struct device *dev,
1405 struct device_attribute *attr,
1406 char *buf)
1408 u8 idx;
1409 unsigned long val;
1410 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1412 spin_lock(&drvdata->spinlock);
1413 idx = drvdata->addr_idx;
1414 /* context ID comparator bits[6:4] */
1415 val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1416 spin_unlock(&drvdata->spinlock);
1417 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1420 static ssize_t addr_context_store(struct device *dev,
1421 struct device_attribute *attr,
1422 const char *buf, size_t size)
1424 u8 idx;
1425 unsigned long val;
1426 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1428 if (kstrtoul(buf, 16, &val))
1429 return -EINVAL;
1430 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1431 return -EINVAL;
1432 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1433 drvdata->numcidc : drvdata->numvmidc))
1434 return -EINVAL;
1436 spin_lock(&drvdata->spinlock);
1437 idx = drvdata->addr_idx;
1438 /* clear context ID comparator bits[6:4] */
1439 drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1440 drvdata->addr_acc[idx] |= (val << 4);
1441 spin_unlock(&drvdata->spinlock);
1442 return size;
1444 static DEVICE_ATTR_RW(addr_context);
1446 static ssize_t seq_idx_show(struct device *dev,
1447 struct device_attribute *attr,
1448 char *buf)
1450 unsigned long val;
1451 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1453 val = drvdata->seq_idx;
1454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1457 static ssize_t seq_idx_store(struct device *dev,
1458 struct device_attribute *attr,
1459 const char *buf, size_t size)
1461 unsigned long val;
1462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1464 if (kstrtoul(buf, 16, &val))
1465 return -EINVAL;
1466 if (val >= drvdata->nrseqstate - 1)
1467 return -EINVAL;
1470 * Use spinlock to ensure index doesn't change while it gets
1471 * dereferenced multiple times within a spinlock block elsewhere.
1473 spin_lock(&drvdata->spinlock);
1474 drvdata->seq_idx = val;
1475 spin_unlock(&drvdata->spinlock);
1476 return size;
1478 static DEVICE_ATTR_RW(seq_idx);
1480 static ssize_t seq_state_show(struct device *dev,
1481 struct device_attribute *attr,
1482 char *buf)
1484 unsigned long val;
1485 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1487 val = drvdata->seq_state;
1488 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1491 static ssize_t seq_state_store(struct device *dev,
1492 struct device_attribute *attr,
1493 const char *buf, size_t size)
1495 unsigned long val;
1496 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1498 if (kstrtoul(buf, 16, &val))
1499 return -EINVAL;
1500 if (val >= drvdata->nrseqstate)
1501 return -EINVAL;
1503 drvdata->seq_state = val;
1504 return size;
1506 static DEVICE_ATTR_RW(seq_state);
1508 static ssize_t seq_event_show(struct device *dev,
1509 struct device_attribute *attr,
1510 char *buf)
1512 u8 idx;
1513 unsigned long val;
1514 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1516 spin_lock(&drvdata->spinlock);
1517 idx = drvdata->seq_idx;
1518 val = drvdata->seq_ctrl[idx];
1519 spin_unlock(&drvdata->spinlock);
1520 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1523 static ssize_t seq_event_store(struct device *dev,
1524 struct device_attribute *attr,
1525 const char *buf, size_t size)
1527 u8 idx;
1528 unsigned long val;
1529 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1531 if (kstrtoul(buf, 16, &val))
1532 return -EINVAL;
1534 spin_lock(&drvdata->spinlock);
1535 idx = drvdata->seq_idx;
1536 /* RST, bits[7:0] */
1537 drvdata->seq_ctrl[idx] = val & 0xFF;
1538 spin_unlock(&drvdata->spinlock);
1539 return size;
1541 static DEVICE_ATTR_RW(seq_event);
1543 static ssize_t seq_reset_event_show(struct device *dev,
1544 struct device_attribute *attr,
1545 char *buf)
1547 unsigned long val;
1548 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1550 val = drvdata->seq_rst;
1551 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1554 static ssize_t seq_reset_event_store(struct device *dev,
1555 struct device_attribute *attr,
1556 const char *buf, size_t size)
1558 unsigned long val;
1559 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1561 if (kstrtoul(buf, 16, &val))
1562 return -EINVAL;
1563 if (!(drvdata->nrseqstate))
1564 return -EINVAL;
1566 drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1567 return size;
1569 static DEVICE_ATTR_RW(seq_reset_event);
1571 static ssize_t cntr_idx_show(struct device *dev,
1572 struct device_attribute *attr,
1573 char *buf)
1575 unsigned long val;
1576 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1578 val = drvdata->cntr_idx;
1579 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1582 static ssize_t cntr_idx_store(struct device *dev,
1583 struct device_attribute *attr,
1584 const char *buf, size_t size)
1586 unsigned long val;
1587 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589 if (kstrtoul(buf, 16, &val))
1590 return -EINVAL;
1591 if (val >= drvdata->nr_cntr)
1592 return -EINVAL;
1595 * Use spinlock to ensure index doesn't change while it gets
1596 * dereferenced multiple times within a spinlock block elsewhere.
1598 spin_lock(&drvdata->spinlock);
1599 drvdata->cntr_idx = val;
1600 spin_unlock(&drvdata->spinlock);
1601 return size;
1603 static DEVICE_ATTR_RW(cntr_idx);
1605 static ssize_t cntrldvr_show(struct device *dev,
1606 struct device_attribute *attr,
1607 char *buf)
1609 u8 idx;
1610 unsigned long val;
1611 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1613 spin_lock(&drvdata->spinlock);
1614 idx = drvdata->cntr_idx;
1615 val = drvdata->cntrldvr[idx];
1616 spin_unlock(&drvdata->spinlock);
1617 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1620 static ssize_t cntrldvr_store(struct device *dev,
1621 struct device_attribute *attr,
1622 const char *buf, size_t size)
1624 u8 idx;
1625 unsigned long val;
1626 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1628 if (kstrtoul(buf, 16, &val))
1629 return -EINVAL;
1630 if (val > ETM_CNTR_MAX_VAL)
1631 return -EINVAL;
1633 spin_lock(&drvdata->spinlock);
1634 idx = drvdata->cntr_idx;
1635 drvdata->cntrldvr[idx] = val;
1636 spin_unlock(&drvdata->spinlock);
1637 return size;
1639 static DEVICE_ATTR_RW(cntrldvr);
1641 static ssize_t cntr_val_show(struct device *dev,
1642 struct device_attribute *attr,
1643 char *buf)
1645 u8 idx;
1646 unsigned long val;
1647 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1649 spin_lock(&drvdata->spinlock);
1650 idx = drvdata->cntr_idx;
1651 val = drvdata->cntr_val[idx];
1652 spin_unlock(&drvdata->spinlock);
1653 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1656 static ssize_t cntr_val_store(struct device *dev,
1657 struct device_attribute *attr,
1658 const char *buf, size_t size)
1660 u8 idx;
1661 unsigned long val;
1662 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1664 if (kstrtoul(buf, 16, &val))
1665 return -EINVAL;
1666 if (val > ETM_CNTR_MAX_VAL)
1667 return -EINVAL;
1669 spin_lock(&drvdata->spinlock);
1670 idx = drvdata->cntr_idx;
1671 drvdata->cntr_val[idx] = val;
1672 spin_unlock(&drvdata->spinlock);
1673 return size;
1675 static DEVICE_ATTR_RW(cntr_val);
1677 static ssize_t cntr_ctrl_show(struct device *dev,
1678 struct device_attribute *attr,
1679 char *buf)
1681 u8 idx;
1682 unsigned long val;
1683 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1685 spin_lock(&drvdata->spinlock);
1686 idx = drvdata->cntr_idx;
1687 val = drvdata->cntr_ctrl[idx];
1688 spin_unlock(&drvdata->spinlock);
1689 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1692 static ssize_t cntr_ctrl_store(struct device *dev,
1693 struct device_attribute *attr,
1694 const char *buf, size_t size)
1696 u8 idx;
1697 unsigned long val;
1698 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1700 if (kstrtoul(buf, 16, &val))
1701 return -EINVAL;
1703 spin_lock(&drvdata->spinlock);
1704 idx = drvdata->cntr_idx;
1705 drvdata->cntr_ctrl[idx] = val;
1706 spin_unlock(&drvdata->spinlock);
1707 return size;
1709 static DEVICE_ATTR_RW(cntr_ctrl);
1711 static ssize_t res_idx_show(struct device *dev,
1712 struct device_attribute *attr,
1713 char *buf)
1715 unsigned long val;
1716 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1718 val = drvdata->res_idx;
1719 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1722 static ssize_t res_idx_store(struct device *dev,
1723 struct device_attribute *attr,
1724 const char *buf, size_t size)
1726 unsigned long val;
1727 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1729 if (kstrtoul(buf, 16, &val))
1730 return -EINVAL;
1731 /* Resource selector pair 0 is always implemented and reserved */
1732 if ((val == 0) || (val >= drvdata->nr_resource))
1733 return -EINVAL;
1736 * Use spinlock to ensure index doesn't change while it gets
1737 * dereferenced multiple times within a spinlock block elsewhere.
1739 spin_lock(&drvdata->spinlock);
1740 drvdata->res_idx = val;
1741 spin_unlock(&drvdata->spinlock);
1742 return size;
1744 static DEVICE_ATTR_RW(res_idx);
1746 static ssize_t res_ctrl_show(struct device *dev,
1747 struct device_attribute *attr,
1748 char *buf)
1750 u8 idx;
1751 unsigned long val;
1752 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1754 spin_lock(&drvdata->spinlock);
1755 idx = drvdata->res_idx;
1756 val = drvdata->res_ctrl[idx];
1757 spin_unlock(&drvdata->spinlock);
1758 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1761 static ssize_t res_ctrl_store(struct device *dev,
1762 struct device_attribute *attr,
1763 const char *buf, size_t size)
1765 u8 idx;
1766 unsigned long val;
1767 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1769 if (kstrtoul(buf, 16, &val))
1770 return -EINVAL;
1772 spin_lock(&drvdata->spinlock);
1773 idx = drvdata->res_idx;
1774 /* For odd idx pair inversal bit is RES0 */
1775 if (idx % 2 != 0)
1776 /* PAIRINV, bit[21] */
1777 val &= ~BIT(21);
1778 drvdata->res_ctrl[idx] = val;
1779 spin_unlock(&drvdata->spinlock);
1780 return size;
1782 static DEVICE_ATTR_RW(res_ctrl);
1784 static ssize_t ctxid_idx_show(struct device *dev,
1785 struct device_attribute *attr,
1786 char *buf)
1788 unsigned long val;
1789 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1791 val = drvdata->ctxid_idx;
1792 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1795 static ssize_t ctxid_idx_store(struct device *dev,
1796 struct device_attribute *attr,
1797 const char *buf, size_t size)
1799 unsigned long val;
1800 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1802 if (kstrtoul(buf, 16, &val))
1803 return -EINVAL;
1804 if (val >= drvdata->numcidc)
1805 return -EINVAL;
1808 * Use spinlock to ensure index doesn't change while it gets
1809 * dereferenced multiple times within a spinlock block elsewhere.
1811 spin_lock(&drvdata->spinlock);
1812 drvdata->ctxid_idx = val;
1813 spin_unlock(&drvdata->spinlock);
1814 return size;
1816 static DEVICE_ATTR_RW(ctxid_idx);
1818 static ssize_t ctxid_val_show(struct device *dev,
1819 struct device_attribute *attr,
1820 char *buf)
1822 u8 idx;
1823 unsigned long val;
1824 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1826 spin_lock(&drvdata->spinlock);
1827 idx = drvdata->ctxid_idx;
1828 val = (unsigned long)drvdata->ctxid_val[idx];
1829 spin_unlock(&drvdata->spinlock);
1830 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1833 static ssize_t ctxid_val_store(struct device *dev,
1834 struct device_attribute *attr,
1835 const char *buf, size_t size)
1837 u8 idx;
1838 unsigned long val;
1839 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1842 * only implemented when ctxid tracing is enabled, i.e. at least one
1843 * ctxid comparator is implemented and ctxid is greater than 0 bits
1844 * in length
1846 if (!drvdata->ctxid_size || !drvdata->numcidc)
1847 return -EINVAL;
1848 if (kstrtoul(buf, 16, &val))
1849 return -EINVAL;
1851 spin_lock(&drvdata->spinlock);
1852 idx = drvdata->ctxid_idx;
1853 drvdata->ctxid_val[idx] = (u64)val;
1854 spin_unlock(&drvdata->spinlock);
1855 return size;
1857 static DEVICE_ATTR_RW(ctxid_val);
1859 static ssize_t ctxid_masks_show(struct device *dev,
1860 struct device_attribute *attr,
1861 char *buf)
1863 unsigned long val1, val2;
1864 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1866 spin_lock(&drvdata->spinlock);
1867 val1 = drvdata->ctxid_mask0;
1868 val2 = drvdata->ctxid_mask1;
1869 spin_unlock(&drvdata->spinlock);
1870 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1873 static ssize_t ctxid_masks_store(struct device *dev,
1874 struct device_attribute *attr,
1875 const char *buf, size_t size)
1877 u8 i, j, maskbyte;
1878 unsigned long val1, val2, mask;
1879 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1882 * only implemented when ctxid tracing is enabled, i.e. at least one
1883 * ctxid comparator is implemented and ctxid is greater than 0 bits
1884 * in length
1886 if (!drvdata->ctxid_size || !drvdata->numcidc)
1887 return -EINVAL;
1888 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1889 return -EINVAL;
1891 spin_lock(&drvdata->spinlock);
1893 * each byte[0..3] controls mask value applied to ctxid
1894 * comparator[0..3]
1896 switch (drvdata->numcidc) {
1897 case 0x1:
1898 /* COMP0, bits[7:0] */
1899 drvdata->ctxid_mask0 = val1 & 0xFF;
1900 break;
1901 case 0x2:
1902 /* COMP1, bits[15:8] */
1903 drvdata->ctxid_mask0 = val1 & 0xFFFF;
1904 break;
1905 case 0x3:
1906 /* COMP2, bits[23:16] */
1907 drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1908 break;
1909 case 0x4:
1910 /* COMP3, bits[31:24] */
1911 drvdata->ctxid_mask0 = val1;
1912 break;
1913 case 0x5:
1914 /* COMP4, bits[7:0] */
1915 drvdata->ctxid_mask0 = val1;
1916 drvdata->ctxid_mask1 = val2 & 0xFF;
1917 break;
1918 case 0x6:
1919 /* COMP5, bits[15:8] */
1920 drvdata->ctxid_mask0 = val1;
1921 drvdata->ctxid_mask1 = val2 & 0xFFFF;
1922 break;
1923 case 0x7:
1924 /* COMP6, bits[23:16] */
1925 drvdata->ctxid_mask0 = val1;
1926 drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1927 break;
1928 case 0x8:
1929 /* COMP7, bits[31:24] */
1930 drvdata->ctxid_mask0 = val1;
1931 drvdata->ctxid_mask1 = val2;
1932 break;
1933 default:
1934 break;
1937 * If software sets a mask bit to 1, it must program relevant byte
1938 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1939 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1940 * of ctxid comparator0 value (corresponding to byte 0) register.
1942 mask = drvdata->ctxid_mask0;
1943 for (i = 0; i < drvdata->numcidc; i++) {
1944 /* mask value of corresponding ctxid comparator */
1945 maskbyte = mask & ETMv4_EVENT_MASK;
1947 * each bit corresponds to a byte of respective ctxid comparator
1948 * value register
1950 for (j = 0; j < 8; j++) {
1951 if (maskbyte & 1)
1952 drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
1953 maskbyte >>= 1;
1955 /* Select the next ctxid comparator mask value */
1956 if (i == 3)
1957 /* ctxid comparators[4-7] */
1958 mask = drvdata->ctxid_mask1;
1959 else
1960 mask >>= 0x8;
1963 spin_unlock(&drvdata->spinlock);
1964 return size;
1966 static DEVICE_ATTR_RW(ctxid_masks);
1968 static ssize_t vmid_idx_show(struct device *dev,
1969 struct device_attribute *attr,
1970 char *buf)
1972 unsigned long val;
1973 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1975 val = drvdata->vmid_idx;
1976 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1979 static ssize_t vmid_idx_store(struct device *dev,
1980 struct device_attribute *attr,
1981 const char *buf, size_t size)
1983 unsigned long val;
1984 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1986 if (kstrtoul(buf, 16, &val))
1987 return -EINVAL;
1988 if (val >= drvdata->numvmidc)
1989 return -EINVAL;
1992 * Use spinlock to ensure index doesn't change while it gets
1993 * dereferenced multiple times within a spinlock block elsewhere.
1995 spin_lock(&drvdata->spinlock);
1996 drvdata->vmid_idx = val;
1997 spin_unlock(&drvdata->spinlock);
1998 return size;
2000 static DEVICE_ATTR_RW(vmid_idx);
2002 static ssize_t vmid_val_show(struct device *dev,
2003 struct device_attribute *attr,
2004 char *buf)
2006 unsigned long val;
2007 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2009 val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2010 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2013 static ssize_t vmid_val_store(struct device *dev,
2014 struct device_attribute *attr,
2015 const char *buf, size_t size)
2017 unsigned long val;
2018 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2021 * only implemented when vmid tracing is enabled, i.e. at least one
2022 * vmid comparator is implemented and at least 8 bit vmid size
2024 if (!drvdata->vmid_size || !drvdata->numvmidc)
2025 return -EINVAL;
2026 if (kstrtoul(buf, 16, &val))
2027 return -EINVAL;
2029 spin_lock(&drvdata->spinlock);
2030 drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2031 spin_unlock(&drvdata->spinlock);
2032 return size;
2034 static DEVICE_ATTR_RW(vmid_val);
2036 static ssize_t vmid_masks_show(struct device *dev,
2037 struct device_attribute *attr, char *buf)
2039 unsigned long val1, val2;
2040 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2042 spin_lock(&drvdata->spinlock);
2043 val1 = drvdata->vmid_mask0;
2044 val2 = drvdata->vmid_mask1;
2045 spin_unlock(&drvdata->spinlock);
2046 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2049 static ssize_t vmid_masks_store(struct device *dev,
2050 struct device_attribute *attr,
2051 const char *buf, size_t size)
2053 u8 i, j, maskbyte;
2054 unsigned long val1, val2, mask;
2055 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2057 * only implemented when vmid tracing is enabled, i.e. at least one
2058 * vmid comparator is implemented and at least 8 bit vmid size
2060 if (!drvdata->vmid_size || !drvdata->numvmidc)
2061 return -EINVAL;
2062 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2063 return -EINVAL;
2065 spin_lock(&drvdata->spinlock);
2068 * each byte[0..3] controls mask value applied to vmid
2069 * comparator[0..3]
2071 switch (drvdata->numvmidc) {
2072 case 0x1:
2073 /* COMP0, bits[7:0] */
2074 drvdata->vmid_mask0 = val1 & 0xFF;
2075 break;
2076 case 0x2:
2077 /* COMP1, bits[15:8] */
2078 drvdata->vmid_mask0 = val1 & 0xFFFF;
2079 break;
2080 case 0x3:
2081 /* COMP2, bits[23:16] */
2082 drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2083 break;
2084 case 0x4:
2085 /* COMP3, bits[31:24] */
2086 drvdata->vmid_mask0 = val1;
2087 break;
2088 case 0x5:
2089 /* COMP4, bits[7:0] */
2090 drvdata->vmid_mask0 = val1;
2091 drvdata->vmid_mask1 = val2 & 0xFF;
2092 break;
2093 case 0x6:
2094 /* COMP5, bits[15:8] */
2095 drvdata->vmid_mask0 = val1;
2096 drvdata->vmid_mask1 = val2 & 0xFFFF;
2097 break;
2098 case 0x7:
2099 /* COMP6, bits[23:16] */
2100 drvdata->vmid_mask0 = val1;
2101 drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2102 break;
2103 case 0x8:
2104 /* COMP7, bits[31:24] */
2105 drvdata->vmid_mask0 = val1;
2106 drvdata->vmid_mask1 = val2;
2107 break;
2108 default:
2109 break;
2113 * If software sets a mask bit to 1, it must program relevant byte
2114 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2115 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2116 * of vmid comparator0 value (corresponding to byte 0) register.
2118 mask = drvdata->vmid_mask0;
2119 for (i = 0; i < drvdata->numvmidc; i++) {
2120 /* mask value of corresponding vmid comparator */
2121 maskbyte = mask & ETMv4_EVENT_MASK;
2123 * each bit corresponds to a byte of respective vmid comparator
2124 * value register
2126 for (j = 0; j < 8; j++) {
2127 if (maskbyte & 1)
2128 drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2129 maskbyte >>= 1;
2131 /* Select the next vmid comparator mask value */
2132 if (i == 3)
2133 /* vmid comparators[4-7] */
2134 mask = drvdata->vmid_mask1;
2135 else
2136 mask >>= 0x8;
2138 spin_unlock(&drvdata->spinlock);
2139 return size;
2141 static DEVICE_ATTR_RW(vmid_masks);
2143 static ssize_t cpu_show(struct device *dev,
2144 struct device_attribute *attr, char *buf)
2146 int val;
2147 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2149 val = drvdata->cpu;
2150 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2153 static DEVICE_ATTR_RO(cpu);
2155 static struct attribute *coresight_etmv4_attrs[] = {
2156 &dev_attr_nr_pe_cmp.attr,
2157 &dev_attr_nr_addr_cmp.attr,
2158 &dev_attr_nr_cntr.attr,
2159 &dev_attr_nr_ext_inp.attr,
2160 &dev_attr_numcidc.attr,
2161 &dev_attr_numvmidc.attr,
2162 &dev_attr_nrseqstate.attr,
2163 &dev_attr_nr_resource.attr,
2164 &dev_attr_nr_ss_cmp.attr,
2165 &dev_attr_reset.attr,
2166 &dev_attr_mode.attr,
2167 &dev_attr_pe.attr,
2168 &dev_attr_event.attr,
2169 &dev_attr_event_instren.attr,
2170 &dev_attr_event_ts.attr,
2171 &dev_attr_syncfreq.attr,
2172 &dev_attr_cyc_threshold.attr,
2173 &dev_attr_bb_ctrl.attr,
2174 &dev_attr_event_vinst.attr,
2175 &dev_attr_s_exlevel_vinst.attr,
2176 &dev_attr_ns_exlevel_vinst.attr,
2177 &dev_attr_addr_idx.attr,
2178 &dev_attr_addr_instdatatype.attr,
2179 &dev_attr_addr_single.attr,
2180 &dev_attr_addr_range.attr,
2181 &dev_attr_addr_start.attr,
2182 &dev_attr_addr_stop.attr,
2183 &dev_attr_addr_ctxtype.attr,
2184 &dev_attr_addr_context.attr,
2185 &dev_attr_seq_idx.attr,
2186 &dev_attr_seq_state.attr,
2187 &dev_attr_seq_event.attr,
2188 &dev_attr_seq_reset_event.attr,
2189 &dev_attr_cntr_idx.attr,
2190 &dev_attr_cntrldvr.attr,
2191 &dev_attr_cntr_val.attr,
2192 &dev_attr_cntr_ctrl.attr,
2193 &dev_attr_res_idx.attr,
2194 &dev_attr_res_ctrl.attr,
2195 &dev_attr_ctxid_idx.attr,
2196 &dev_attr_ctxid_val.attr,
2197 &dev_attr_ctxid_masks.attr,
2198 &dev_attr_vmid_idx.attr,
2199 &dev_attr_vmid_val.attr,
2200 &dev_attr_vmid_masks.attr,
2201 &dev_attr_cpu.attr,
2202 NULL,
2205 #define coresight_simple_func(name, offset) \
2206 static ssize_t name##_show(struct device *_dev, \
2207 struct device_attribute *attr, char *buf) \
2209 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2210 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2211 readl_relaxed(drvdata->base + offset)); \
2213 DEVICE_ATTR_RO(name)
2215 coresight_simple_func(trcoslsr, TRCOSLSR);
2216 coresight_simple_func(trcpdcr, TRCPDCR);
2217 coresight_simple_func(trcpdsr, TRCPDSR);
2218 coresight_simple_func(trclsr, TRCLSR);
2219 coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2220 coresight_simple_func(trcdevid, TRCDEVID);
2221 coresight_simple_func(trcdevtype, TRCDEVTYPE);
2222 coresight_simple_func(trcpidr0, TRCPIDR0);
2223 coresight_simple_func(trcpidr1, TRCPIDR1);
2224 coresight_simple_func(trcpidr2, TRCPIDR2);
2225 coresight_simple_func(trcpidr3, TRCPIDR3);
2227 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2228 &dev_attr_trcoslsr.attr,
2229 &dev_attr_trcpdcr.attr,
2230 &dev_attr_trcpdsr.attr,
2231 &dev_attr_trclsr.attr,
2232 &dev_attr_trcauthstatus.attr,
2233 &dev_attr_trcdevid.attr,
2234 &dev_attr_trcdevtype.attr,
2235 &dev_attr_trcpidr0.attr,
2236 &dev_attr_trcpidr1.attr,
2237 &dev_attr_trcpidr2.attr,
2238 &dev_attr_trcpidr3.attr,
2239 NULL,
2242 coresight_simple_func(trcidr0, TRCIDR0);
2243 coresight_simple_func(trcidr1, TRCIDR1);
2244 coresight_simple_func(trcidr2, TRCIDR2);
2245 coresight_simple_func(trcidr3, TRCIDR3);
2246 coresight_simple_func(trcidr4, TRCIDR4);
2247 coresight_simple_func(trcidr5, TRCIDR5);
2248 /* trcidr[6,7] are reserved */
2249 coresight_simple_func(trcidr8, TRCIDR8);
2250 coresight_simple_func(trcidr9, TRCIDR9);
2251 coresight_simple_func(trcidr10, TRCIDR10);
2252 coresight_simple_func(trcidr11, TRCIDR11);
2253 coresight_simple_func(trcidr12, TRCIDR12);
2254 coresight_simple_func(trcidr13, TRCIDR13);
2256 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2257 &dev_attr_trcidr0.attr,
2258 &dev_attr_trcidr1.attr,
2259 &dev_attr_trcidr2.attr,
2260 &dev_attr_trcidr3.attr,
2261 &dev_attr_trcidr4.attr,
2262 &dev_attr_trcidr5.attr,
2263 /* trcidr[6,7] are reserved */
2264 &dev_attr_trcidr8.attr,
2265 &dev_attr_trcidr9.attr,
2266 &dev_attr_trcidr10.attr,
2267 &dev_attr_trcidr11.attr,
2268 &dev_attr_trcidr12.attr,
2269 &dev_attr_trcidr13.attr,
2270 NULL,
2273 static const struct attribute_group coresight_etmv4_group = {
2274 .attrs = coresight_etmv4_attrs,
2277 static const struct attribute_group coresight_etmv4_mgmt_group = {
2278 .attrs = coresight_etmv4_mgmt_attrs,
2279 .name = "mgmt",
2282 static const struct attribute_group coresight_etmv4_trcidr_group = {
2283 .attrs = coresight_etmv4_trcidr_attrs,
2284 .name = "trcidr",
2287 static const struct attribute_group *coresight_etmv4_groups[] = {
2288 &coresight_etmv4_group,
2289 &coresight_etmv4_mgmt_group,
2290 &coresight_etmv4_trcidr_group,
2291 NULL,
2294 static void etm4_init_arch_data(void *info)
2296 u32 etmidr0;
2297 u32 etmidr1;
2298 u32 etmidr2;
2299 u32 etmidr3;
2300 u32 etmidr4;
2301 u32 etmidr5;
2302 struct etmv4_drvdata *drvdata = info;
2304 CS_UNLOCK(drvdata->base);
2306 /* find all capabilities of the tracing unit */
2307 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
2309 /* INSTP0, bits[2:1] P0 tracing support field */
2310 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
2311 drvdata->instrp0 = true;
2312 else
2313 drvdata->instrp0 = false;
2315 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2316 if (BMVAL(etmidr0, 5, 5))
2317 drvdata->trcbb = true;
2318 else
2319 drvdata->trcbb = false;
2321 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2322 if (BMVAL(etmidr0, 6, 6))
2323 drvdata->trccond = true;
2324 else
2325 drvdata->trccond = false;
2327 /* TRCCCI, bit[7] Cycle counting instruction bit */
2328 if (BMVAL(etmidr0, 7, 7))
2329 drvdata->trccci = true;
2330 else
2331 drvdata->trccci = false;
2333 /* RETSTACK, bit[9] Return stack bit */
2334 if (BMVAL(etmidr0, 9, 9))
2335 drvdata->retstack = true;
2336 else
2337 drvdata->retstack = false;
2339 /* NUMEVENT, bits[11:10] Number of events field */
2340 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
2341 /* QSUPP, bits[16:15] Q element support field */
2342 drvdata->q_support = BMVAL(etmidr0, 15, 16);
2343 /* TSSIZE, bits[28:24] Global timestamp size field */
2344 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
2346 /* base architecture of trace unit */
2347 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
2349 * TRCARCHMIN, bits[7:4] architecture the minor version number
2350 * TRCARCHMAJ, bits[11:8] architecture major versin number
2352 drvdata->arch = BMVAL(etmidr1, 4, 11);
2354 /* maximum size of resources */
2355 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
2356 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2357 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
2358 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2359 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
2360 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2361 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
2363 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
2364 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2365 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
2366 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2367 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
2368 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2369 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
2372 * TRCERR, bit[24] whether a trace unit can trace a
2373 * system error exception.
2375 if (BMVAL(etmidr3, 24, 24))
2376 drvdata->trc_error = true;
2377 else
2378 drvdata->trc_error = false;
2380 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2381 if (BMVAL(etmidr3, 25, 25))
2382 drvdata->syncpr = true;
2383 else
2384 drvdata->syncpr = false;
2386 /* STALLCTL, bit[26] is stall control implemented? */
2387 if (BMVAL(etmidr3, 26, 26))
2388 drvdata->stallctl = true;
2389 else
2390 drvdata->stallctl = false;
2392 /* SYSSTALL, bit[27] implementation can support stall control? */
2393 if (BMVAL(etmidr3, 27, 27))
2394 drvdata->sysstall = true;
2395 else
2396 drvdata->sysstall = false;
2398 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2399 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
2401 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2402 if (BMVAL(etmidr3, 31, 31))
2403 drvdata->nooverflow = true;
2404 else
2405 drvdata->nooverflow = false;
2407 /* number of resources trace unit supports */
2408 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
2409 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2410 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2411 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2412 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
2413 /* NUMRSPAIR, bits[19:16] the number of resource pairs for tracing */
2414 drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
2416 * NUMSSCC, bits[23:20] the number of single-shot
2417 * comparator control for tracing
2419 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
2420 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2421 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
2422 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2423 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
2425 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
2426 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2427 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
2428 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2429 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
2430 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2431 if (BMVAL(etmidr5, 22, 22))
2432 drvdata->atbtrig = true;
2433 else
2434 drvdata->atbtrig = false;
2436 * LPOVERRIDE, bit[23] implementation supports
2437 * low-power state override
2439 if (BMVAL(etmidr5, 23, 23))
2440 drvdata->lpoverride = true;
2441 else
2442 drvdata->lpoverride = false;
2443 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2444 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
2445 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2446 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
2447 CS_LOCK(drvdata->base);
2450 static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2452 int i;
2454 drvdata->pe_sel = 0x0;
2455 drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
2456 ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
2458 /* disable all events tracing */
2459 drvdata->eventctrl0 = 0x0;
2460 drvdata->eventctrl1 = 0x0;
2462 /* disable stalling */
2463 drvdata->stall_ctrl = 0x0;
2465 /* disable timestamp event */
2466 drvdata->ts_ctrl = 0x0;
2468 /* enable trace synchronization every 4096 bytes for trace */
2469 if (drvdata->syncpr == false)
2470 drvdata->syncfreq = 0xC;
2473 * enable viewInst to trace everything with start-stop logic in
2474 * started state
2476 drvdata->vinst_ctrl |= BIT(0);
2477 /* set initial state of start-stop logic */
2478 if (drvdata->nr_addr_cmp)
2479 drvdata->vinst_ctrl |= BIT(9);
2481 /* no address range filtering for ViewInst */
2482 drvdata->viiectlr = 0x0;
2483 /* no start-stop filtering for ViewInst */
2484 drvdata->vissctlr = 0x0;
2486 /* disable seq events */
2487 for (i = 0; i < drvdata->nrseqstate-1; i++)
2488 drvdata->seq_ctrl[i] = 0x0;
2489 drvdata->seq_rst = 0x0;
2490 drvdata->seq_state = 0x0;
2492 /* disable external input events */
2493 drvdata->ext_inp = 0x0;
2495 for (i = 0; i < drvdata->nr_cntr; i++) {
2496 drvdata->cntrldvr[i] = 0x0;
2497 drvdata->cntr_ctrl[i] = 0x0;
2498 drvdata->cntr_val[i] = 0x0;
2501 for (i = 2; i < drvdata->nr_resource * 2; i++)
2502 drvdata->res_ctrl[i] = 0x0;
2504 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
2505 drvdata->ss_ctrl[i] = 0x0;
2506 drvdata->ss_pe_cmp[i] = 0x0;
2509 if (drvdata->nr_addr_cmp >= 1) {
2510 drvdata->addr_val[0] = (unsigned long)_stext;
2511 drvdata->addr_val[1] = (unsigned long)_etext;
2512 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2513 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2516 for (i = 0; i < drvdata->numcidc; i++)
2517 drvdata->ctxid_val[i] = 0x0;
2518 drvdata->ctxid_mask0 = 0x0;
2519 drvdata->ctxid_mask1 = 0x0;
2521 for (i = 0; i < drvdata->numvmidc; i++)
2522 drvdata->vmid_val[i] = 0x0;
2523 drvdata->vmid_mask0 = 0x0;
2524 drvdata->vmid_mask1 = 0x0;
2527 * A trace ID value of 0 is invalid, so let's start at some
2528 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2529 * start at 0x20.
2531 drvdata->trcid = 0x20 + drvdata->cpu;
2534 static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2535 void *hcpu)
2537 unsigned int cpu = (unsigned long)hcpu;
2539 if (!etmdrvdata[cpu])
2540 goto out;
2542 switch (action & (~CPU_TASKS_FROZEN)) {
2543 case CPU_STARTING:
2544 spin_lock(&etmdrvdata[cpu]->spinlock);
2545 if (!etmdrvdata[cpu]->os_unlock) {
2546 etm4_os_unlock(etmdrvdata[cpu]);
2547 etmdrvdata[cpu]->os_unlock = true;
2550 if (etmdrvdata[cpu]->enable)
2551 etm4_enable_hw(etmdrvdata[cpu]);
2552 spin_unlock(&etmdrvdata[cpu]->spinlock);
2553 break;
2555 case CPU_ONLINE:
2556 if (etmdrvdata[cpu]->boot_enable &&
2557 !etmdrvdata[cpu]->sticky_enable)
2558 coresight_enable(etmdrvdata[cpu]->csdev);
2559 break;
2561 case CPU_DYING:
2562 spin_lock(&etmdrvdata[cpu]->spinlock);
2563 if (etmdrvdata[cpu]->enable)
2564 etm4_disable_hw(etmdrvdata[cpu]);
2565 spin_unlock(&etmdrvdata[cpu]->spinlock);
2566 break;
2568 out:
2569 return NOTIFY_OK;
2572 static struct notifier_block etm4_cpu_notifier = {
2573 .notifier_call = etm4_cpu_callback,
2576 static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2578 int ret;
2579 void __iomem *base;
2580 struct device *dev = &adev->dev;
2581 struct coresight_platform_data *pdata = NULL;
2582 struct etmv4_drvdata *drvdata;
2583 struct resource *res = &adev->res;
2584 struct coresight_desc *desc;
2585 struct device_node *np = adev->dev.of_node;
2587 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
2588 if (!desc)
2589 return -ENOMEM;
2591 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2592 if (!drvdata)
2593 return -ENOMEM;
2595 if (np) {
2596 pdata = of_get_coresight_platform_data(dev, np);
2597 if (IS_ERR(pdata))
2598 return PTR_ERR(pdata);
2599 adev->dev.platform_data = pdata;
2602 drvdata->dev = &adev->dev;
2603 dev_set_drvdata(dev, drvdata);
2605 /* Validity for the resource is already checked by the AMBA core */
2606 base = devm_ioremap_resource(dev, res);
2607 if (IS_ERR(base))
2608 return PTR_ERR(base);
2610 drvdata->base = base;
2612 spin_lock_init(&drvdata->spinlock);
2614 drvdata->cpu = pdata ? pdata->cpu : 0;
2616 get_online_cpus();
2617 etmdrvdata[drvdata->cpu] = drvdata;
2619 if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2620 drvdata->os_unlock = true;
2622 if (smp_call_function_single(drvdata->cpu,
2623 etm4_init_arch_data, drvdata, 1))
2624 dev_err(dev, "ETM arch init failed\n");
2626 if (!etm4_count++)
2627 register_hotcpu_notifier(&etm4_cpu_notifier);
2629 put_online_cpus();
2631 if (etm4_arch_supported(drvdata->arch) == false) {
2632 ret = -EINVAL;
2633 goto err_arch_supported;
2635 etm4_init_default_data(drvdata);
2637 pm_runtime_put(&adev->dev);
2639 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2640 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2641 desc->ops = &etm4_cs_ops;
2642 desc->pdata = pdata;
2643 desc->dev = dev;
2644 desc->groups = coresight_etmv4_groups;
2645 drvdata->csdev = coresight_register(desc);
2646 if (IS_ERR(drvdata->csdev)) {
2647 ret = PTR_ERR(drvdata->csdev);
2648 goto err_coresight_register;
2651 dev_info(dev, "%s initialized\n", (char *)id->data);
2653 if (boot_enable) {
2654 coresight_enable(drvdata->csdev);
2655 drvdata->boot_enable = true;
2658 return 0;
2660 err_arch_supported:
2661 pm_runtime_put(&adev->dev);
2662 err_coresight_register:
2663 if (--etm4_count == 0)
2664 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2665 return ret;
2668 static int etm4_remove(struct amba_device *adev)
2670 struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
2672 coresight_unregister(drvdata->csdev);
2673 if (--etm4_count == 0)
2674 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2676 return 0;
2679 static struct amba_id etm4_ids[] = {
2680 { /* ETM 4.0 - Qualcomm */
2681 .id = 0x0003b95d,
2682 .mask = 0x0003ffff,
2683 .data = "ETM 4.0",
2685 { /* ETM 4.0 - Juno board */
2686 .id = 0x000bb95e,
2687 .mask = 0x000fffff,
2688 .data = "ETM 4.0",
2690 { 0, 0},
2693 static struct amba_driver etm4x_driver = {
2694 .drv = {
2695 .name = "coresight-etm4x",
2697 .probe = etm4_probe,
2698 .remove = etm4_remove,
2699 .id_table = etm4_ids,
2702 module_amba_driver(etm4x_driver);