2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) IBM Corporation, 2005
17 * Jeff Muizelaar, 2006, 2007
18 * Pekka Paalanen, 2008 <pq@iki.fi>
20 * Derived from the read-mod example from relay-examples by Tom Zanussi.
23 #define pr_fmt(fmt) "mmiotrace: " fmt
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
32 #include <linux/kallsyms.h>
33 #include <asm/pgtable.h>
34 #include <linux/mmiotrace.h>
35 #include <asm/e820.h> /* for ISA_START_ADDRESS */
36 #include <linux/atomic.h>
37 #include <linux/percpu.h>
38 #include <linux/cpu.h>
45 enum reason_type type
;
50 struct list_head list
;
51 struct kmmio_probe probe
;
56 /* Accessed per-cpu. */
57 static DEFINE_PER_CPU(struct trap_reason
, pf_reason
);
58 static DEFINE_PER_CPU(struct mmiotrace_rw
, cpu_trace
);
60 static DEFINE_MUTEX(mmiotrace_mutex
);
61 static DEFINE_SPINLOCK(trace_lock
);
62 static atomic_t mmiotrace_enabled
;
63 static LIST_HEAD(trace_list
); /* struct remap_trace */
66 * Locking in this file:
67 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
68 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
70 * - Routines depending on is_enabled() must take trace_lock.
71 * - trace_list users must hold trace_lock.
72 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
73 * - pre/post callbacks assume the effect of is_enabled() being true.
76 /* module parameters */
77 static unsigned long filter_offset
;
78 static bool nommiotrace
;
81 module_param(filter_offset
, ulong
, 0);
82 module_param(nommiotrace
, bool, 0);
83 module_param(trace_pc
, bool, 0);
85 MODULE_PARM_DESC(filter_offset
, "Start address of traced mappings.");
86 MODULE_PARM_DESC(nommiotrace
, "Disable actual MMIO tracing.");
87 MODULE_PARM_DESC(trace_pc
, "Record address of faulting instructions.");
89 static bool is_enabled(void)
91 return atomic_read(&mmiotrace_enabled
);
94 static void print_pte(unsigned long address
)
97 pte_t
*pte
= lookup_address(address
, &level
);
100 pr_err("Error in %s: no pte for page 0x%08lx\n",
105 if (level
== PG_LEVEL_2M
) {
106 pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
110 pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
112 (unsigned long long)pte_val(*pte
),
113 (unsigned long long)pte_val(*pte
) & _PAGE_PRESENT
);
117 * For some reason the pre/post pairs have been called in an
118 * unmatched order. Report and die.
120 static void die_kmmio_nesting_error(struct pt_regs
*regs
, unsigned long addr
)
122 const struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
123 pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
124 addr
, my_reason
->addr
);
126 print_symbol(KERN_EMERG
"faulting IP is at %s\n", regs
->ip
);
127 print_symbol(KERN_EMERG
"last faulting IP was at %s\n", my_reason
->ip
);
129 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
130 regs
->ax
, regs
->bx
, regs
->cx
, regs
->dx
);
131 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
132 regs
->si
, regs
->di
, regs
->bp
, regs
->sp
);
134 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
135 regs
->ax
, regs
->cx
, regs
->dx
);
136 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
137 regs
->si
, regs
->di
, regs
->bp
, regs
->sp
);
139 put_cpu_var(pf_reason
);
143 static void pre(struct kmmio_probe
*p
, struct pt_regs
*regs
,
146 struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
147 struct mmiotrace_rw
*my_trace
= &get_cpu_var(cpu_trace
);
148 const unsigned long instptr
= instruction_pointer(regs
);
149 const enum reason_type type
= get_ins_type(instptr
);
150 struct remap_trace
*trace
= p
->private;
152 /* it doesn't make sense to have more than one active trace per cpu */
153 if (my_reason
->active_traces
)
154 die_kmmio_nesting_error(regs
, addr
);
156 my_reason
->active_traces
++;
158 my_reason
->type
= type
;
159 my_reason
->addr
= addr
;
160 my_reason
->ip
= instptr
;
162 my_trace
->phys
= addr
- trace
->probe
.addr
+ trace
->phys
;
163 my_trace
->map_id
= trace
->id
;
166 * Only record the program counter when requested.
167 * It may taint clean-room reverse engineering.
170 my_trace
->pc
= instptr
;
175 * XXX: the timestamp recorded will be *after* the tracing has been
176 * done, not at the time we hit the instruction. SMP implications
182 my_trace
->opcode
= MMIO_READ
;
183 my_trace
->width
= get_ins_mem_width(instptr
);
186 my_trace
->opcode
= MMIO_WRITE
;
187 my_trace
->width
= get_ins_mem_width(instptr
);
188 my_trace
->value
= get_ins_reg_val(instptr
, regs
);
191 my_trace
->opcode
= MMIO_WRITE
;
192 my_trace
->width
= get_ins_mem_width(instptr
);
193 my_trace
->value
= get_ins_imm_val(instptr
);
197 unsigned char *ip
= (unsigned char *)instptr
;
198 my_trace
->opcode
= MMIO_UNKNOWN_OP
;
200 my_trace
->value
= (*ip
) << 16 | *(ip
+ 1) << 8 |
204 put_cpu_var(cpu_trace
);
205 put_cpu_var(pf_reason
);
208 static void post(struct kmmio_probe
*p
, unsigned long condition
,
209 struct pt_regs
*regs
)
211 struct trap_reason
*my_reason
= &get_cpu_var(pf_reason
);
212 struct mmiotrace_rw
*my_trace
= &get_cpu_var(cpu_trace
);
214 /* this should always return the active_trace count to 0 */
215 my_reason
->active_traces
--;
216 if (my_reason
->active_traces
) {
217 pr_emerg("unexpected post handler");
221 switch (my_reason
->type
) {
223 my_trace
->value
= get_ins_reg_val(my_reason
->ip
, regs
);
229 mmio_trace_rw(my_trace
);
230 put_cpu_var(cpu_trace
);
231 put_cpu_var(pf_reason
);
234 static void ioremap_trace_core(resource_size_t offset
, unsigned long size
,
237 static atomic_t next_id
;
238 struct remap_trace
*trace
= kmalloc(sizeof(*trace
), GFP_KERNEL
);
239 /* These are page-unaligned. */
240 struct mmiotrace_map map
= {
242 .virt
= (unsigned long)addr
,
248 pr_err("kmalloc failed in ioremap\n");
252 *trace
= (struct remap_trace
) {
254 .addr
= (unsigned long)addr
,
257 .post_handler
= post
,
261 .id
= atomic_inc_return(&next_id
)
263 map
.map_id
= trace
->id
;
265 spin_lock_irq(&trace_lock
);
271 mmio_trace_mapping(&map
);
272 list_add_tail(&trace
->list
, &trace_list
);
274 register_kmmio_probe(&trace
->probe
);
277 spin_unlock_irq(&trace_lock
);
280 void mmiotrace_ioremap(resource_size_t offset
, unsigned long size
,
283 if (!is_enabled()) /* recheck and proper locking in *_core() */
286 pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
287 (unsigned long long)offset
, size
, addr
);
288 if ((filter_offset
) && (offset
!= filter_offset
))
290 ioremap_trace_core(offset
, size
, addr
);
293 static void iounmap_trace_core(volatile void __iomem
*addr
)
295 struct mmiotrace_map map
= {
297 .virt
= (unsigned long)addr
,
299 .opcode
= MMIO_UNPROBE
301 struct remap_trace
*trace
;
302 struct remap_trace
*tmp
;
303 struct remap_trace
*found_trace
= NULL
;
305 pr_debug("Unmapping %p.\n", addr
);
307 spin_lock_irq(&trace_lock
);
311 list_for_each_entry_safe(trace
, tmp
, &trace_list
, list
) {
312 if ((unsigned long)addr
== trace
->probe
.addr
) {
314 unregister_kmmio_probe(&trace
->probe
);
315 list_del(&trace
->list
);
320 map
.map_id
= (found_trace
) ? found_trace
->id
: -1;
321 mmio_trace_mapping(&map
);
324 spin_unlock_irq(&trace_lock
);
326 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
331 void mmiotrace_iounmap(volatile void __iomem
*addr
)
334 if (is_enabled()) /* recheck and proper locking in *_core() */
335 iounmap_trace_core(addr
);
338 int mmiotrace_printk(const char *fmt
, ...)
345 spin_lock_irqsave(&trace_lock
, flags
);
347 ret
= mmio_trace_printk(fmt
, args
);
348 spin_unlock_irqrestore(&trace_lock
, flags
);
353 EXPORT_SYMBOL(mmiotrace_printk
);
355 static void clear_trace_list(void)
357 struct remap_trace
*trace
;
358 struct remap_trace
*tmp
;
361 * No locking required, because the caller ensures we are in a
362 * critical section via mutex, and is_enabled() is false,
363 * i.e. nothing can traverse or modify this list.
364 * Caller also ensures is_enabled() cannot change.
366 list_for_each_entry(trace
, &trace_list
, list
) {
367 pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
368 trace
->probe
.addr
, trace
->probe
.len
);
370 unregister_kmmio_probe(&trace
->probe
);
372 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
374 list_for_each_entry_safe(trace
, tmp
, &trace_list
, list
) {
375 list_del(&trace
->list
);
380 #ifdef CONFIG_HOTPLUG_CPU
381 static cpumask_var_t downed_cpus
;
383 static void enter_uniprocessor(void)
388 if (downed_cpus
== NULL
&&
389 !alloc_cpumask_var(&downed_cpus
, GFP_KERNEL
)) {
390 pr_notice("Failed to allocate mask\n");
395 cpumask_copy(downed_cpus
, cpu_online_mask
);
396 cpumask_clear_cpu(cpumask_first(cpu_online_mask
), downed_cpus
);
397 if (num_online_cpus() > 1)
398 pr_notice("Disabling non-boot CPUs...\n");
401 for_each_cpu(cpu
, downed_cpus
) {
404 pr_info("CPU%d is down.\n", cpu
);
406 pr_err("Error taking CPU%d down: %d\n", cpu
, err
);
409 if (num_online_cpus() > 1)
410 pr_warning("multiple CPUs still online, may miss events.\n");
413 /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
414 but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
415 static void __ref
leave_uniprocessor(void)
420 if (downed_cpus
== NULL
|| cpumask_weight(downed_cpus
) == 0)
422 pr_notice("Re-enabling CPUs...\n");
423 for_each_cpu(cpu
, downed_cpus
) {
426 pr_info("enabled CPU%d.\n", cpu
);
428 pr_err("cannot re-enable CPU%d: %d\n", cpu
, err
);
432 #else /* !CONFIG_HOTPLUG_CPU */
433 static void enter_uniprocessor(void)
435 if (num_online_cpus() > 1)
436 pr_warning("multiple CPUs are online, may miss events. "
437 "Suggest booting with maxcpus=1 kernel argument.\n");
440 static void leave_uniprocessor(void)
445 void enable_mmiotrace(void)
447 mutex_lock(&mmiotrace_mutex
);
452 pr_info("MMIO tracing disabled.\n");
454 enter_uniprocessor();
455 spin_lock_irq(&trace_lock
);
456 atomic_inc(&mmiotrace_enabled
);
457 spin_unlock_irq(&trace_lock
);
458 pr_info("enabled.\n");
460 mutex_unlock(&mmiotrace_mutex
);
463 void disable_mmiotrace(void)
465 mutex_lock(&mmiotrace_mutex
);
469 spin_lock_irq(&trace_lock
);
470 atomic_dec(&mmiotrace_enabled
);
471 BUG_ON(is_enabled());
472 spin_unlock_irq(&trace_lock
);
474 clear_trace_list(); /* guarantees: no more kmmio callbacks */
475 leave_uniprocessor();
477 pr_info("disabled.\n");
479 mutex_unlock(&mmiotrace_mutex
);