hwrng: core - Don't use a stack buffer in add_early_randomness()
[linux/fpc-iii.git] / drivers / hwtracing / coresight / coresight-tmc-etr.c
blob886ea83c68e0cf2233398f46cb00edbe188c56b6
1 /*
2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/coresight.h>
19 #include <linux/dma-mapping.h>
20 #include "coresight-priv.h"
21 #include "coresight-tmc.h"
23 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
25 u32 axictl;
27 /* Zero out the memory to help with debug */
28 memset(drvdata->vaddr, 0, drvdata->size);
30 CS_UNLOCK(drvdata->base);
32 /* Wait for TMCSReady bit to be set */
33 tmc_wait_for_tmcready(drvdata);
35 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
36 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
38 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
39 axictl |= TMC_AXICTL_WR_BURST_16;
40 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
41 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
42 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
43 axictl = (axictl &
44 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
45 TMC_AXICTL_PROT_CTL_B1;
46 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
48 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
49 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
50 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
51 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
52 TMC_FFCR_TRIGON_TRIGIN,
53 drvdata->base + TMC_FFCR);
54 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
55 tmc_enable_hw(drvdata);
57 CS_LOCK(drvdata->base);
60 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
62 u32 rwp, val;
64 rwp = readl_relaxed(drvdata->base + TMC_RWP);
65 val = readl_relaxed(drvdata->base + TMC_STS);
68 * Adjust the buffer to point to the beginning of the trace data
69 * and update the available trace data.
71 if (val & TMC_STS_FULL) {
72 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
73 drvdata->len = drvdata->size;
74 } else {
75 drvdata->buf = drvdata->vaddr;
76 drvdata->len = rwp - drvdata->paddr;
80 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
82 CS_UNLOCK(drvdata->base);
84 tmc_flush_and_stop(drvdata);
86 * When operating in sysFS mode the content of the buffer needs to be
87 * read before the TMC is disabled.
89 if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
90 tmc_etr_dump_hw(drvdata);
91 tmc_disable_hw(drvdata);
93 CS_LOCK(drvdata->base);
96 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
98 int ret = 0;
99 bool used = false;
100 long val;
101 unsigned long flags;
102 void __iomem *vaddr = NULL;
103 dma_addr_t paddr;
104 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
106 /* This shouldn't be happening */
107 if (WARN_ON(mode != CS_MODE_SYSFS))
108 return -EINVAL;
111 * If we don't have a buffer release the lock and allocate memory.
112 * Otherwise keep the lock and move along.
114 spin_lock_irqsave(&drvdata->spinlock, flags);
115 if (!drvdata->vaddr) {
116 spin_unlock_irqrestore(&drvdata->spinlock, flags);
119 * Contiguous memory can't be allocated while a spinlock is
120 * held. As such allocate memory here and free it if a buffer
121 * has already been allocated (from a previous session).
123 vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
124 &paddr, GFP_KERNEL);
125 if (!vaddr)
126 return -ENOMEM;
128 /* Let's try again */
129 spin_lock_irqsave(&drvdata->spinlock, flags);
132 if (drvdata->reading) {
133 ret = -EBUSY;
134 goto out;
137 val = local_xchg(&drvdata->mode, mode);
139 * In sysFS mode we can have multiple writers per sink. Since this
140 * sink is already enabled no memory is needed and the HW need not be
141 * touched.
143 if (val == CS_MODE_SYSFS)
144 goto out;
147 * If drvdata::buf == NULL, use the memory allocated above.
148 * Otherwise a buffer still exists from a previous session, so
149 * simply use that.
151 if (drvdata->buf == NULL) {
152 used = true;
153 drvdata->vaddr = vaddr;
154 drvdata->paddr = paddr;
155 drvdata->buf = drvdata->vaddr;
158 memset(drvdata->vaddr, 0, drvdata->size);
160 tmc_etr_enable_hw(drvdata);
161 out:
162 spin_unlock_irqrestore(&drvdata->spinlock, flags);
164 /* Free memory outside the spinlock if need be */
165 if (!used && vaddr)
166 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
168 if (!ret)
169 dev_info(drvdata->dev, "TMC-ETR enabled\n");
171 return ret;
174 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
176 int ret = 0;
177 long val;
178 unsigned long flags;
179 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
181 /* This shouldn't be happening */
182 if (WARN_ON(mode != CS_MODE_PERF))
183 return -EINVAL;
185 spin_lock_irqsave(&drvdata->spinlock, flags);
186 if (drvdata->reading) {
187 ret = -EINVAL;
188 goto out;
191 val = local_xchg(&drvdata->mode, mode);
193 * In Perf mode there can be only one writer per sink. There
194 * is also no need to continue if the ETR is already operated
195 * from sysFS.
197 if (val != CS_MODE_DISABLED) {
198 ret = -EINVAL;
199 goto out;
202 tmc_etr_enable_hw(drvdata);
203 out:
204 spin_unlock_irqrestore(&drvdata->spinlock, flags);
206 return ret;
209 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
211 switch (mode) {
212 case CS_MODE_SYSFS:
213 return tmc_enable_etr_sink_sysfs(csdev, mode);
214 case CS_MODE_PERF:
215 return tmc_enable_etr_sink_perf(csdev, mode);
218 /* We shouldn't be here */
219 return -EINVAL;
222 static void tmc_disable_etr_sink(struct coresight_device *csdev)
224 long val;
225 unsigned long flags;
226 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
228 spin_lock_irqsave(&drvdata->spinlock, flags);
229 if (drvdata->reading) {
230 spin_unlock_irqrestore(&drvdata->spinlock, flags);
231 return;
234 val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
235 /* Disable the TMC only if it needs to */
236 if (val != CS_MODE_DISABLED)
237 tmc_etr_disable_hw(drvdata);
239 spin_unlock_irqrestore(&drvdata->spinlock, flags);
241 dev_info(drvdata->dev, "TMC-ETR disabled\n");
244 static const struct coresight_ops_sink tmc_etr_sink_ops = {
245 .enable = tmc_enable_etr_sink,
246 .disable = tmc_disable_etr_sink,
249 const struct coresight_ops tmc_etr_cs_ops = {
250 .sink_ops = &tmc_etr_sink_ops,
253 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
255 int ret = 0;
256 long val;
257 unsigned long flags;
259 /* config types are set a boot time and never change */
260 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
261 return -EINVAL;
263 spin_lock_irqsave(&drvdata->spinlock, flags);
264 if (drvdata->reading) {
265 ret = -EBUSY;
266 goto out;
269 val = local_read(&drvdata->mode);
270 /* Don't interfere if operated from Perf */
271 if (val == CS_MODE_PERF) {
272 ret = -EINVAL;
273 goto out;
276 /* If drvdata::buf is NULL the trace data has been read already */
277 if (drvdata->buf == NULL) {
278 ret = -EINVAL;
279 goto out;
282 /* Disable the TMC if need be */
283 if (val == CS_MODE_SYSFS)
284 tmc_etr_disable_hw(drvdata);
286 drvdata->reading = true;
287 out:
288 spin_unlock_irqrestore(&drvdata->spinlock, flags);
290 return ret;
293 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
295 unsigned long flags;
296 dma_addr_t paddr;
297 void __iomem *vaddr = NULL;
299 /* config types are set a boot time and never change */
300 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
301 return -EINVAL;
303 spin_lock_irqsave(&drvdata->spinlock, flags);
305 /* RE-enable the TMC if need be */
306 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
308 * The trace run will continue with the same allocated trace
309 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
310 * so we don't have to explicitly clear it. Also, since the
311 * tracer is still enabled drvdata::buf can't be NULL.
313 tmc_etr_enable_hw(drvdata);
314 } else {
316 * The ETR is not tracing and the buffer was just read.
317 * As such prepare to free the trace buffer.
319 vaddr = drvdata->vaddr;
320 paddr = drvdata->paddr;
321 drvdata->buf = drvdata->vaddr = NULL;
324 drvdata->reading = false;
325 spin_unlock_irqrestore(&drvdata->spinlock, flags);
327 /* Free allocated memory out side of the spinlock */
328 if (vaddr)
329 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
331 return 0;