1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) IBM Corporation, 2005
5 * Jeff Muizelaar, 2006, 2007
6 * Pekka Paalanen, 2008 <pq@iki.fi>
8 * Derived from the read-mod example from relay-examples by Tom Zanussi.
11 #define pr_fmt(fmt) "mmiotrace: " fmt
15 #include <linux/moduleparam.h>
16 #include <linux/debugfs.h>
17 #include <linux/slab.h>
18 #include <linux/uaccess.h>
20 #include <linux/mmiotrace.h>
21 #include <linux/pgtable.h>
22 #include <asm/e820/api.h> /* for ISA_START_ADDRESS */
23 #include <linux/atomic.h>
24 #include <linux/percpu.h>
25 #include <linux/cpu.h>
32 enum reason_type type
;
37 struct list_head list
;
38 struct kmmio_probe probe
;
43 /* Accessed per-cpu. */
44 static DEFINE_PER_CPU(struct trap_reason
, pf_reason
);
45 static DEFINE_PER_CPU(struct mmiotrace_rw
, cpu_trace
);
47 static DEFINE_MUTEX(mmiotrace_mutex
);
48 static DEFINE_SPINLOCK(trace_lock
);
49 static atomic_t mmiotrace_enabled
;
50 static LIST_HEAD(trace_list
); /* struct remap_trace */
53 * Locking in this file:
54 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
55 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
57 * - Routines depending on is_enabled() must take trace_lock.
58 * - trace_list users must hold trace_lock.
59 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
60 * - pre/post callbacks assume the effect of is_enabled() being true.
63 /* module parameters */
64 static unsigned long filter_offset
;
65 static bool nommiotrace
;
68 module_param(filter_offset
, ulong
, 0);
69 module_param(nommiotrace
, bool, 0);
70 module_param(trace_pc
, bool, 0);
72 MODULE_PARM_DESC(filter_offset
, "Start address of traced mappings.");
73 MODULE_PARM_DESC(nommiotrace
, "Disable actual MMIO tracing.");
74 MODULE_PARM_DESC(trace_pc
, "Record address of faulting instructions.");
76 static bool is_enabled(void)
78 return atomic_read(&mmiotrace_enabled
);
81 static void print_pte(unsigned long address
)
84 pte_t
*pte
= lookup_address(address
, &level
);
87 pr_err("Error in %s: no pte for page 0x%08lx\n",
92 if (level
== PG_LEVEL_2M
) {
93 pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
97 pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
99 (unsigned long long)pte_val(*pte
),
100 (unsigned long long)pte_val(*pte
) & _PAGE_PRESENT
);
104 * For some reason the pre/post pairs have been called in an
105 * unmatched order. Report and die.
107 static void die_kmmio_nesting_error(struct pt_regs
*regs
, unsigned long addr
)
109 const struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
110 pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
111 addr
, my_reason
->addr
);
113 pr_emerg("faulting IP is at %pS\n", (void *)regs
->ip
);
114 pr_emerg("last faulting IP was at %pS\n", (void *)my_reason
->ip
);
116 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
117 regs
->ax
, regs
->bx
, regs
->cx
, regs
->dx
);
118 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
119 regs
->si
, regs
->di
, regs
->bp
, regs
->sp
);
121 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
122 regs
->ax
, regs
->cx
, regs
->dx
);
123 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
124 regs
->si
, regs
->di
, regs
->bp
, regs
->sp
);
126 put_cpu_var(pf_reason
);
130 static void pre(struct kmmio_probe
*p
, struct pt_regs
*regs
,
133 struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
134 struct mmiotrace_rw
*my_trace
= &get_cpu_var(cpu_trace
);
135 const unsigned long instptr
= instruction_pointer(regs
);
136 const enum reason_type type
= get_ins_type(instptr
);
137 struct remap_trace
*trace
= p
->private;
139 /* it doesn't make sense to have more than one active trace per cpu */
140 if (my_reason
->active_traces
)
141 die_kmmio_nesting_error(regs
, addr
);
143 my_reason
->active_traces
++;
145 my_reason
->type
= type
;
146 my_reason
->addr
= addr
;
147 my_reason
->ip
= instptr
;
149 my_trace
->phys
= addr
- trace
->probe
.addr
+ trace
->phys
;
150 my_trace
->map_id
= trace
->id
;
153 * Only record the program counter when requested.
154 * It may taint clean-room reverse engineering.
157 my_trace
->pc
= instptr
;
162 * XXX: the timestamp recorded will be *after* the tracing has been
163 * done, not at the time we hit the instruction. SMP implications
169 my_trace
->opcode
= MMIO_READ
;
170 my_trace
->width
= get_ins_mem_width(instptr
);
173 my_trace
->opcode
= MMIO_WRITE
;
174 my_trace
->width
= get_ins_mem_width(instptr
);
175 my_trace
->value
= get_ins_reg_val(instptr
, regs
);
178 my_trace
->opcode
= MMIO_WRITE
;
179 my_trace
->width
= get_ins_mem_width(instptr
);
180 my_trace
->value
= get_ins_imm_val(instptr
);
184 unsigned char *ip
= (unsigned char *)instptr
;
185 my_trace
->opcode
= MMIO_UNKNOWN_OP
;
187 my_trace
->value
= (*ip
) << 16 | *(ip
+ 1) << 8 |
191 put_cpu_var(cpu_trace
);
192 put_cpu_var(pf_reason
);
195 static void post(struct kmmio_probe
*p
, unsigned long condition
,
196 struct pt_regs
*regs
)
198 struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
199 struct mmiotrace_rw
*my_trace
= &get_cpu_var(cpu_trace
);
201 /* this should always return the active_trace count to 0 */
202 my_reason
->active_traces
--;
203 if (my_reason
->active_traces
) {
204 pr_emerg("unexpected post handler");
208 switch (my_reason
->type
) {
210 my_trace
->value
= get_ins_reg_val(my_reason
->ip
, regs
);
216 mmio_trace_rw(my_trace
);
217 put_cpu_var(cpu_trace
);
218 put_cpu_var(pf_reason
);
221 static void ioremap_trace_core(resource_size_t offset
, unsigned long size
,
224 static atomic_t next_id
;
225 struct remap_trace
*trace
= kmalloc(sizeof(*trace
), GFP_KERNEL
);
226 /* These are page-unaligned. */
227 struct mmiotrace_map map
= {
229 .virt
= (unsigned long)addr
,
235 pr_err("kmalloc failed in ioremap\n");
239 *trace
= (struct remap_trace
) {
241 .addr
= (unsigned long)addr
,
244 .post_handler
= post
,
248 .id
= atomic_inc_return(&next_id
)
250 map
.map_id
= trace
->id
;
252 spin_lock_irq(&trace_lock
);
258 mmio_trace_mapping(&map
);
259 list_add_tail(&trace
->list
, &trace_list
);
261 register_kmmio_probe(&trace
->probe
);
264 spin_unlock_irq(&trace_lock
);
267 void mmiotrace_ioremap(resource_size_t offset
, unsigned long size
,
270 if (!is_enabled()) /* recheck and proper locking in *_core() */
273 pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
274 (unsigned long long)offset
, size
, addr
);
275 if ((filter_offset
) && (offset
!= filter_offset
))
277 ioremap_trace_core(offset
, size
, addr
);
280 static void iounmap_trace_core(volatile void __iomem
*addr
)
282 struct mmiotrace_map map
= {
284 .virt
= (unsigned long)addr
,
286 .opcode
= MMIO_UNPROBE
288 struct remap_trace
*trace
;
289 struct remap_trace
*tmp
;
290 struct remap_trace
*found_trace
= NULL
;
292 pr_debug("Unmapping %p.\n", addr
);
294 spin_lock_irq(&trace_lock
);
298 list_for_each_entry_safe(trace
, tmp
, &trace_list
, list
) {
299 if ((unsigned long)addr
== trace
->probe
.addr
) {
301 unregister_kmmio_probe(&trace
->probe
);
302 list_del(&trace
->list
);
307 map
.map_id
= (found_trace
) ? found_trace
->id
: -1;
308 mmio_trace_mapping(&map
);
311 spin_unlock_irq(&trace_lock
);
313 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
318 void mmiotrace_iounmap(volatile void __iomem
*addr
)
321 if (is_enabled()) /* recheck and proper locking in *_core() */
322 iounmap_trace_core(addr
);
325 int mmiotrace_printk(const char *fmt
, ...)
332 spin_lock_irqsave(&trace_lock
, flags
);
334 ret
= mmio_trace_printk(fmt
, args
);
335 spin_unlock_irqrestore(&trace_lock
, flags
);
340 EXPORT_SYMBOL(mmiotrace_printk
);
342 static void clear_trace_list(void)
344 struct remap_trace
*trace
;
345 struct remap_trace
*tmp
;
348 * No locking required, because the caller ensures we are in a
349 * critical section via mutex, and is_enabled() is false,
350 * i.e. nothing can traverse or modify this list.
351 * Caller also ensures is_enabled() cannot change.
353 list_for_each_entry(trace
, &trace_list
, list
) {
354 pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
355 trace
->probe
.addr
, trace
->probe
.len
);
357 unregister_kmmio_probe(&trace
->probe
);
359 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
361 list_for_each_entry_safe(trace
, tmp
, &trace_list
, list
) {
362 list_del(&trace
->list
);
367 #ifdef CONFIG_HOTPLUG_CPU
368 static cpumask_var_t downed_cpus
;
370 static void enter_uniprocessor(void)
375 if (!cpumask_available(downed_cpus
) &&
376 !alloc_cpumask_var(&downed_cpus
, GFP_KERNEL
)) {
377 pr_notice("Failed to allocate mask\n");
382 cpumask_copy(downed_cpus
, cpu_online_mask
);
383 cpumask_clear_cpu(cpumask_first(cpu_online_mask
), downed_cpus
);
384 if (num_online_cpus() > 1)
385 pr_notice("Disabling non-boot CPUs...\n");
388 for_each_cpu(cpu
, downed_cpus
) {
389 err
= remove_cpu(cpu
);
391 pr_info("CPU%d is down.\n", cpu
);
393 pr_err("Error taking CPU%d down: %d\n", cpu
, err
);
396 if (num_online_cpus() > 1)
397 pr_warn("multiple CPUs still online, may miss events.\n");
400 static void leave_uniprocessor(void)
405 if (!cpumask_available(downed_cpus
) || cpumask_weight(downed_cpus
) == 0)
407 pr_notice("Re-enabling CPUs...\n");
408 for_each_cpu(cpu
, downed_cpus
) {
411 pr_info("enabled CPU%d.\n", cpu
);
413 pr_err("cannot re-enable CPU%d: %d\n", cpu
, err
);
417 #else /* !CONFIG_HOTPLUG_CPU */
418 static void enter_uniprocessor(void)
420 if (num_online_cpus() > 1)
421 pr_warn("multiple CPUs are online, may miss events. "
422 "Suggest booting with maxcpus=1 kernel argument.\n");
425 static void leave_uniprocessor(void)
430 void enable_mmiotrace(void)
432 mutex_lock(&mmiotrace_mutex
);
437 pr_info("MMIO tracing disabled.\n");
439 enter_uniprocessor();
440 spin_lock_irq(&trace_lock
);
441 atomic_inc(&mmiotrace_enabled
);
442 spin_unlock_irq(&trace_lock
);
443 pr_info("enabled.\n");
445 mutex_unlock(&mmiotrace_mutex
);
448 void disable_mmiotrace(void)
450 mutex_lock(&mmiotrace_mutex
);
454 spin_lock_irq(&trace_lock
);
455 atomic_dec(&mmiotrace_enabled
);
456 BUG_ON(is_enabled());
457 spin_unlock_irq(&trace_lock
);
459 clear_trace_list(); /* guarantees: no more kmmio callbacks */
460 leave_uniprocessor();
462 pr_info("disabled.\n");
464 mutex_unlock(&mmiotrace_mutex
);