1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) IBM Corporation, 2005
5 * Jeff Muizelaar, 2006, 2007
6 * Pekka Paalanen, 2008 <pq@iki.fi>
8 * Derived from the read-mod example from relay-examples by Tom Zanussi.
11 #define pr_fmt(fmt) "mmiotrace: " fmt
13 #include <linux/moduleparam.h>
14 #include <linux/debugfs.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
18 #include <linux/mmiotrace.h>
19 #include <linux/pgtable.h>
20 #include <asm/e820/api.h> /* for ISA_START_ADDRESS */
21 #include <linux/atomic.h>
22 #include <linux/percpu.h>
23 #include <linux/cpu.h>
30 enum reason_type type
;
35 struct list_head list
;
36 struct kmmio_probe probe
;
41 /* Accessed per-cpu. */
42 static DEFINE_PER_CPU(struct trap_reason
, pf_reason
);
43 static DEFINE_PER_CPU(struct mmiotrace_rw
, cpu_trace
);
45 static DEFINE_MUTEX(mmiotrace_mutex
);
46 static DEFINE_SPINLOCK(trace_lock
);
47 static atomic_t mmiotrace_enabled
;
48 static LIST_HEAD(trace_list
); /* struct remap_trace */
51 * Locking in this file:
52 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
53 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
55 * - Routines depending on is_enabled() must take trace_lock.
56 * - trace_list users must hold trace_lock.
57 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
58 * - pre/post callbacks assume the effect of is_enabled() being true.
61 /* module parameters */
62 static unsigned long filter_offset
;
63 static bool nommiotrace
;
66 module_param(filter_offset
, ulong
, 0);
67 module_param(nommiotrace
, bool, 0);
68 module_param(trace_pc
, bool, 0);
70 MODULE_PARM_DESC(filter_offset
, "Start address of traced mappings.");
71 MODULE_PARM_DESC(nommiotrace
, "Disable actual MMIO tracing.");
72 MODULE_PARM_DESC(trace_pc
, "Record address of faulting instructions.");
74 static bool is_enabled(void)
76 return atomic_read(&mmiotrace_enabled
);
79 static void print_pte(unsigned long address
)
82 pte_t
*pte
= lookup_address(address
, &level
);
85 pr_err("Error in %s: no pte for page 0x%08lx\n",
90 if (level
== PG_LEVEL_2M
) {
91 pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
95 pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
97 (unsigned long long)pte_val(*pte
),
98 (unsigned long long)pte_val(*pte
) & _PAGE_PRESENT
);
102 * For some reason the pre/post pairs have been called in an
103 * unmatched order. Report and die.
105 static void die_kmmio_nesting_error(struct pt_regs
*regs
, unsigned long addr
)
107 const struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
108 pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
109 addr
, my_reason
->addr
);
111 pr_emerg("faulting IP is at %pS\n", (void *)regs
->ip
);
112 pr_emerg("last faulting IP was at %pS\n", (void *)my_reason
->ip
);
114 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
115 regs
->ax
, regs
->bx
, regs
->cx
, regs
->dx
);
116 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
117 regs
->si
, regs
->di
, regs
->bp
, regs
->sp
);
119 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
120 regs
->ax
, regs
->cx
, regs
->dx
);
121 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
122 regs
->si
, regs
->di
, regs
->bp
, regs
->sp
);
124 put_cpu_var(pf_reason
);
128 static void pre(struct kmmio_probe
*p
, struct pt_regs
*regs
,
131 struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
132 struct mmiotrace_rw
*my_trace
= &get_cpu_var(cpu_trace
);
133 const unsigned long instptr
= instruction_pointer(regs
);
134 const enum reason_type type
= get_ins_type(instptr
);
135 struct remap_trace
*trace
= p
->private;
137 /* it doesn't make sense to have more than one active trace per cpu */
138 if (my_reason
->active_traces
)
139 die_kmmio_nesting_error(regs
, addr
);
141 my_reason
->active_traces
++;
143 my_reason
->type
= type
;
144 my_reason
->addr
= addr
;
145 my_reason
->ip
= instptr
;
147 my_trace
->phys
= addr
- trace
->probe
.addr
+ trace
->phys
;
148 my_trace
->map_id
= trace
->id
;
151 * Only record the program counter when requested.
152 * It may taint clean-room reverse engineering.
155 my_trace
->pc
= instptr
;
160 * XXX: the timestamp recorded will be *after* the tracing has been
161 * done, not at the time we hit the instruction. SMP implications
167 my_trace
->opcode
= MMIO_READ
;
168 my_trace
->width
= get_ins_mem_width(instptr
);
171 my_trace
->opcode
= MMIO_WRITE
;
172 my_trace
->width
= get_ins_mem_width(instptr
);
173 my_trace
->value
= get_ins_reg_val(instptr
, regs
);
176 my_trace
->opcode
= MMIO_WRITE
;
177 my_trace
->width
= get_ins_mem_width(instptr
);
178 my_trace
->value
= get_ins_imm_val(instptr
);
182 unsigned char *ip
= (unsigned char *)instptr
;
183 my_trace
->opcode
= MMIO_UNKNOWN_OP
;
185 my_trace
->value
= (*ip
) << 16 | *(ip
+ 1) << 8 |
189 put_cpu_var(cpu_trace
);
190 put_cpu_var(pf_reason
);
193 static void post(struct kmmio_probe
*p
, unsigned long condition
,
194 struct pt_regs
*regs
)
196 struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
197 struct mmiotrace_rw
*my_trace
= &get_cpu_var(cpu_trace
);
199 /* this should always return the active_trace count to 0 */
200 my_reason
->active_traces
--;
201 if (my_reason
->active_traces
) {
202 pr_emerg("unexpected post handler");
206 switch (my_reason
->type
) {
208 my_trace
->value
= get_ins_reg_val(my_reason
->ip
, regs
);
214 mmio_trace_rw(my_trace
);
215 put_cpu_var(cpu_trace
);
216 put_cpu_var(pf_reason
);
219 static void ioremap_trace_core(resource_size_t offset
, unsigned long size
,
222 static atomic_t next_id
;
223 struct remap_trace
*trace
= kmalloc(sizeof(*trace
), GFP_KERNEL
);
224 /* These are page-unaligned. */
225 struct mmiotrace_map map
= {
227 .virt
= (unsigned long)addr
,
233 pr_err("kmalloc failed in ioremap\n");
237 *trace
= (struct remap_trace
) {
239 .addr
= (unsigned long)addr
,
242 .post_handler
= post
,
246 .id
= atomic_inc_return(&next_id
)
248 map
.map_id
= trace
->id
;
250 spin_lock_irq(&trace_lock
);
256 mmio_trace_mapping(&map
);
257 list_add_tail(&trace
->list
, &trace_list
);
259 register_kmmio_probe(&trace
->probe
);
262 spin_unlock_irq(&trace_lock
);
265 void mmiotrace_ioremap(resource_size_t offset
, unsigned long size
,
268 if (!is_enabled()) /* recheck and proper locking in *_core() */
271 pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
272 (unsigned long long)offset
, size
, addr
);
273 if ((filter_offset
) && (offset
!= filter_offset
))
275 ioremap_trace_core(offset
, size
, addr
);
278 static void iounmap_trace_core(volatile void __iomem
*addr
)
280 struct mmiotrace_map map
= {
282 .virt
= (unsigned long)addr
,
284 .opcode
= MMIO_UNPROBE
286 struct remap_trace
*trace
;
287 struct remap_trace
*tmp
;
288 struct remap_trace
*found_trace
= NULL
;
290 pr_debug("Unmapping %p.\n", addr
);
292 spin_lock_irq(&trace_lock
);
296 list_for_each_entry_safe(trace
, tmp
, &trace_list
, list
) {
297 if ((unsigned long)addr
== trace
->probe
.addr
) {
299 unregister_kmmio_probe(&trace
->probe
);
300 list_del(&trace
->list
);
305 map
.map_id
= (found_trace
) ? found_trace
->id
: -1;
306 mmio_trace_mapping(&map
);
309 spin_unlock_irq(&trace_lock
);
311 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
316 void mmiotrace_iounmap(volatile void __iomem
*addr
)
319 if (is_enabled()) /* recheck and proper locking in *_core() */
320 iounmap_trace_core(addr
);
323 int mmiotrace_printk(const char *fmt
, ...)
330 spin_lock_irqsave(&trace_lock
, flags
);
332 ret
= mmio_trace_printk(fmt
, args
);
333 spin_unlock_irqrestore(&trace_lock
, flags
);
338 EXPORT_SYMBOL(mmiotrace_printk
);
340 static void clear_trace_list(void)
342 struct remap_trace
*trace
;
343 struct remap_trace
*tmp
;
346 * No locking required, because the caller ensures we are in a
347 * critical section via mutex, and is_enabled() is false,
348 * i.e. nothing can traverse or modify this list.
349 * Caller also ensures is_enabled() cannot change.
351 list_for_each_entry(trace
, &trace_list
, list
) {
352 pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
353 trace
->probe
.addr
, trace
->probe
.len
);
355 unregister_kmmio_probe(&trace
->probe
);
357 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
359 list_for_each_entry_safe(trace
, tmp
, &trace_list
, list
) {
360 list_del(&trace
->list
);
365 #ifdef CONFIG_HOTPLUG_CPU
366 static cpumask_var_t downed_cpus
;
368 static void enter_uniprocessor(void)
373 if (!cpumask_available(downed_cpus
) &&
374 !alloc_cpumask_var(&downed_cpus
, GFP_KERNEL
)) {
375 pr_notice("Failed to allocate mask\n");
380 cpumask_copy(downed_cpus
, cpu_online_mask
);
381 cpumask_clear_cpu(cpumask_first(cpu_online_mask
), downed_cpus
);
382 if (num_online_cpus() > 1)
383 pr_notice("Disabling non-boot CPUs...\n");
386 for_each_cpu(cpu
, downed_cpus
) {
387 err
= remove_cpu(cpu
);
389 pr_info("CPU%d is down.\n", cpu
);
391 pr_err("Error taking CPU%d down: %d\n", cpu
, err
);
394 if (num_online_cpus() > 1)
395 pr_warn("multiple CPUs still online, may miss events.\n");
398 static void leave_uniprocessor(void)
403 if (!cpumask_available(downed_cpus
) || cpumask_empty(downed_cpus
))
405 pr_notice("Re-enabling CPUs...\n");
406 for_each_cpu(cpu
, downed_cpus
) {
409 pr_info("enabled CPU%d.\n", cpu
);
411 pr_err("cannot re-enable CPU%d: %d\n", cpu
, err
);
415 #else /* !CONFIG_HOTPLUG_CPU */
416 static void enter_uniprocessor(void)
418 if (num_online_cpus() > 1)
419 pr_warn("multiple CPUs are online, may miss events. "
420 "Suggest booting with maxcpus=1 kernel argument.\n");
423 static void leave_uniprocessor(void)
428 void enable_mmiotrace(void)
430 mutex_lock(&mmiotrace_mutex
);
435 pr_info("MMIO tracing disabled.\n");
437 enter_uniprocessor();
438 spin_lock_irq(&trace_lock
);
439 atomic_inc(&mmiotrace_enabled
);
440 spin_unlock_irq(&trace_lock
);
441 pr_info("enabled.\n");
443 mutex_unlock(&mmiotrace_mutex
);
446 void disable_mmiotrace(void)
448 mutex_lock(&mmiotrace_mutex
);
452 spin_lock_irq(&trace_lock
);
453 atomic_dec(&mmiotrace_enabled
);
454 BUG_ON(is_enabled());
455 spin_unlock_irq(&trace_lock
);
457 clear_trace_list(); /* guarantees: no more kmmio callbacks */
458 leave_uniprocessor();
460 pr_info("disabled.\n");
462 mutex_unlock(&mmiotrace_mutex
);