2 * CPU x86 architecture debug code
4 * Copyright(C) 2009 Jaswinder Singh Rajput
6 * For licencing details see kernel-base/COPYING
9 #include <linux/interrupt.h>
10 #include <linux/compiler.h>
11 #include <linux/seq_file.h>
12 #include <linux/debugfs.h>
13 #include <linux/kprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/signal.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/smp.h>
26 #include <asm/cpu_debug.h>
27 #include <asm/paravirt.h>
28 #include <asm/system.h>
29 #include <asm/traps.h>
33 static DEFINE_PER_CPU(struct cpu_cpuX_base
, cpu_arr
[CPU_REG_ALL_BIT
]);
34 static DEFINE_PER_CPU(struct cpu_private
*, priv_arr
[MAX_CPU_FILES
]);
35 static DEFINE_PER_CPU(unsigned, cpu_modelflag
);
36 static DEFINE_PER_CPU(int, cpu_priv_count
);
37 static DEFINE_PER_CPU(unsigned, cpu_model
);
39 static DEFINE_MUTEX(cpu_debug_lock
);
41 static struct dentry
*cpu_debugfs_dir
;
43 static struct cpu_debug_base cpu_base
[] = {
45 { "monitor", CPU_MONITOR
, 0 },
46 { "time", CPU_TIME
, 0 },
47 { "pmc", CPU_PMC
, 1 },
48 { "platform", CPU_PLATFORM
, 0 },
49 { "apic", CPU_APIC
, 0 },
50 { "poweron", CPU_POWERON
, 0 },
51 { "control", CPU_CONTROL
, 0 },
52 { "features", CPU_FEATURES
, 0 },
53 { "lastbranch", CPU_LBRANCH
, 0 },
54 { "bios", CPU_BIOS
, 0 },
55 { "freq", CPU_FREQ
, 0 },
56 { "mtrr", CPU_MTRR
, 0 },
57 { "perf", CPU_PERF
, 0 },
58 { "cache", CPU_CACHE
, 0 },
59 { "sysenter", CPU_SYSENTER
, 0 },
60 { "therm", CPU_THERM
, 0 },
61 { "misc", CPU_MISC
, 0 },
62 { "debug", CPU_DEBUG
, 0 },
63 { "pat", CPU_PAT
, 0 },
64 { "vmx", CPU_VMX
, 0 },
65 { "call", CPU_CALL
, 0 },
66 { "base", CPU_BASE
, 0 },
67 { "ver", CPU_VER
, 0 },
68 { "conf", CPU_CONF
, 0 },
69 { "smm", CPU_SMM
, 0 },
70 { "svm", CPU_SVM
, 0 },
71 { "osvm", CPU_OSVM
, 0 },
72 { "tss", CPU_TSS
, 0 },
75 { "registers", CPU_REG_ALL
, 0 },
78 static struct cpu_file_base cpu_file
[] = {
79 { "index", CPU_REG_ALL
, 0 },
80 { "value", CPU_REG_ALL
, 1 },
83 /* Intel Registers Range */
84 static struct cpu_debug_range cpu_intel_range
[] = {
85 { 0x00000000, 0x00000001, CPU_MC
, CPU_INTEL_ALL
},
86 { 0x00000006, 0x00000007, CPU_MONITOR
, CPU_CX_AT_XE
},
87 { 0x00000010, 0x00000010, CPU_TIME
, CPU_INTEL_ALL
},
88 { 0x00000011, 0x00000013, CPU_PMC
, CPU_INTEL_PENTIUM
},
89 { 0x00000017, 0x00000017, CPU_PLATFORM
, CPU_PX_CX_AT_XE
},
90 { 0x0000001B, 0x0000001B, CPU_APIC
, CPU_P6_CX_AT_XE
},
92 { 0x0000002A, 0x0000002A, CPU_POWERON
, CPU_PX_CX_AT_XE
},
93 { 0x0000002B, 0x0000002B, CPU_POWERON
, CPU_INTEL_XEON
},
94 { 0x0000002C, 0x0000002C, CPU_FREQ
, CPU_INTEL_XEON
},
95 { 0x0000003A, 0x0000003A, CPU_CONTROL
, CPU_CX_AT_XE
},
97 { 0x00000040, 0x00000043, CPU_LBRANCH
, CPU_PM_CX_AT_XE
},
98 { 0x00000044, 0x00000047, CPU_LBRANCH
, CPU_PM_CO_AT
},
99 { 0x00000060, 0x00000063, CPU_LBRANCH
, CPU_C2_AT
},
100 { 0x00000064, 0x00000067, CPU_LBRANCH
, CPU_INTEL_ATOM
},
102 { 0x00000079, 0x00000079, CPU_BIOS
, CPU_P6_CX_AT_XE
},
103 { 0x00000088, 0x0000008A, CPU_CACHE
, CPU_INTEL_P6
},
104 { 0x0000008B, 0x0000008B, CPU_BIOS
, CPU_P6_CX_AT_XE
},
105 { 0x0000009B, 0x0000009B, CPU_MONITOR
, CPU_INTEL_XEON
},
107 { 0x000000C1, 0x000000C2, CPU_PMC
, CPU_P6_CX_AT
},
108 { 0x000000CD, 0x000000CD, CPU_FREQ
, CPU_CX_AT
},
109 { 0x000000E7, 0x000000E8, CPU_PERF
, CPU_CX_AT
},
110 { 0x000000FE, 0x000000FE, CPU_MTRR
, CPU_P6_CX_XE
},
112 { 0x00000116, 0x00000116, CPU_CACHE
, CPU_INTEL_P6
},
113 { 0x00000118, 0x00000118, CPU_CACHE
, CPU_INTEL_P6
},
114 { 0x00000119, 0x00000119, CPU_CACHE
, CPU_INTEL_PX
},
115 { 0x0000011A, 0x0000011B, CPU_CACHE
, CPU_INTEL_P6
},
116 { 0x0000011E, 0x0000011E, CPU_CACHE
, CPU_PX_CX_AT
},
118 { 0x00000174, 0x00000176, CPU_SYSENTER
, CPU_P6_CX_AT_XE
},
119 { 0x00000179, 0x0000017A, CPU_MC
, CPU_PX_CX_AT_XE
},
120 { 0x0000017B, 0x0000017B, CPU_MC
, CPU_P6_XE
},
121 { 0x00000186, 0x00000187, CPU_PMC
, CPU_P6_CX_AT
},
122 { 0x00000198, 0x00000199, CPU_PERF
, CPU_PM_CX_AT_XE
},
123 { 0x0000019A, 0x0000019A, CPU_TIME
, CPU_PM_CX_AT_XE
},
124 { 0x0000019B, 0x0000019D, CPU_THERM
, CPU_PM_CX_AT_XE
},
125 { 0x000001A0, 0x000001A0, CPU_MISC
, CPU_PM_CX_AT_XE
},
127 { 0x000001C9, 0x000001C9, CPU_LBRANCH
, CPU_PM_CX_AT
},
128 { 0x000001D7, 0x000001D8, CPU_LBRANCH
, CPU_INTEL_XEON
},
129 { 0x000001D9, 0x000001D9, CPU_DEBUG
, CPU_CX_AT_XE
},
130 { 0x000001DA, 0x000001DA, CPU_LBRANCH
, CPU_INTEL_XEON
},
131 { 0x000001DB, 0x000001DB, CPU_LBRANCH
, CPU_P6_XE
},
132 { 0x000001DC, 0x000001DC, CPU_LBRANCH
, CPU_INTEL_P6
},
133 { 0x000001DD, 0x000001DE, CPU_LBRANCH
, CPU_PX_CX_AT_XE
},
134 { 0x000001E0, 0x000001E0, CPU_LBRANCH
, CPU_INTEL_P6
},
136 { 0x00000200, 0x0000020F, CPU_MTRR
, CPU_P6_CX_XE
},
137 { 0x00000250, 0x00000250, CPU_MTRR
, CPU_P6_CX_XE
},
138 { 0x00000258, 0x00000259, CPU_MTRR
, CPU_P6_CX_XE
},
139 { 0x00000268, 0x0000026F, CPU_MTRR
, CPU_P6_CX_XE
},
140 { 0x00000277, 0x00000277, CPU_PAT
, CPU_C2_AT_XE
},
141 { 0x000002FF, 0x000002FF, CPU_MTRR
, CPU_P6_CX_XE
},
143 { 0x00000300, 0x00000308, CPU_PMC
, CPU_INTEL_XEON
},
144 { 0x00000309, 0x0000030B, CPU_PMC
, CPU_C2_AT_XE
},
145 { 0x0000030C, 0x00000311, CPU_PMC
, CPU_INTEL_XEON
},
146 { 0x00000345, 0x00000345, CPU_PMC
, CPU_C2_AT
},
147 { 0x00000360, 0x00000371, CPU_PMC
, CPU_INTEL_XEON
},
148 { 0x0000038D, 0x00000390, CPU_PMC
, CPU_C2_AT
},
149 { 0x000003A0, 0x000003BE, CPU_PMC
, CPU_INTEL_XEON
},
150 { 0x000003C0, 0x000003CD, CPU_PMC
, CPU_INTEL_XEON
},
151 { 0x000003E0, 0x000003E1, CPU_PMC
, CPU_INTEL_XEON
},
152 { 0x000003F0, 0x000003F0, CPU_PMC
, CPU_INTEL_XEON
},
153 { 0x000003F1, 0x000003F1, CPU_PMC
, CPU_C2_AT_XE
},
154 { 0x000003F2, 0x000003F2, CPU_PMC
, CPU_INTEL_XEON
},
156 { 0x00000400, 0x00000402, CPU_MC
, CPU_PM_CX_AT_XE
},
157 { 0x00000403, 0x00000403, CPU_MC
, CPU_INTEL_XEON
},
158 { 0x00000404, 0x00000406, CPU_MC
, CPU_PM_CX_AT_XE
},
159 { 0x00000407, 0x00000407, CPU_MC
, CPU_INTEL_XEON
},
160 { 0x00000408, 0x0000040A, CPU_MC
, CPU_PM_CX_AT_XE
},
161 { 0x0000040B, 0x0000040B, CPU_MC
, CPU_INTEL_XEON
},
162 { 0x0000040C, 0x0000040E, CPU_MC
, CPU_PM_CX_XE
},
163 { 0x0000040F, 0x0000040F, CPU_MC
, CPU_INTEL_XEON
},
164 { 0x00000410, 0x00000412, CPU_MC
, CPU_PM_CX_AT_XE
},
165 { 0x00000413, 0x00000417, CPU_MC
, CPU_CX_AT_XE
},
166 { 0x00000480, 0x0000048B, CPU_VMX
, CPU_CX_AT_XE
},
168 { 0x00000600, 0x00000600, CPU_DEBUG
, CPU_PM_CX_AT_XE
},
169 { 0x00000680, 0x0000068F, CPU_LBRANCH
, CPU_INTEL_XEON
},
170 { 0x000006C0, 0x000006CF, CPU_LBRANCH
, CPU_INTEL_XEON
},
172 { 0x000107CC, 0x000107D3, CPU_PMC
, CPU_INTEL_XEON_MP
},
174 { 0xC0000080, 0xC0000080, CPU_FEATURES
, CPU_INTEL_XEON
},
175 { 0xC0000081, 0xC0000082, CPU_CALL
, CPU_INTEL_XEON
},
176 { 0xC0000084, 0xC0000084, CPU_CALL
, CPU_INTEL_XEON
},
177 { 0xC0000100, 0xC0000102, CPU_BASE
, CPU_INTEL_XEON
},
180 /* AMD Registers Range */
181 static struct cpu_debug_range cpu_amd_range
[] = {
182 { 0x00000000, 0x00000001, CPU_MC
, CPU_K10_PLUS
, },
183 { 0x00000010, 0x00000010, CPU_TIME
, CPU_K8_PLUS
, },
184 { 0x0000001B, 0x0000001B, CPU_APIC
, CPU_K8_PLUS
, },
185 { 0x0000002A, 0x0000002A, CPU_POWERON
, CPU_K7_PLUS
},
186 { 0x0000008B, 0x0000008B, CPU_VER
, CPU_K8_PLUS
},
187 { 0x000000FE, 0x000000FE, CPU_MTRR
, CPU_K8_PLUS
, },
189 { 0x00000174, 0x00000176, CPU_SYSENTER
, CPU_K8_PLUS
, },
190 { 0x00000179, 0x0000017B, CPU_MC
, CPU_K8_PLUS
, },
191 { 0x000001D9, 0x000001D9, CPU_DEBUG
, CPU_K8_PLUS
, },
192 { 0x000001DB, 0x000001DE, CPU_LBRANCH
, CPU_K8_PLUS
, },
194 { 0x00000200, 0x0000020F, CPU_MTRR
, CPU_K8_PLUS
, },
195 { 0x00000250, 0x00000250, CPU_MTRR
, CPU_K8_PLUS
, },
196 { 0x00000258, 0x00000259, CPU_MTRR
, CPU_K8_PLUS
, },
197 { 0x00000268, 0x0000026F, CPU_MTRR
, CPU_K8_PLUS
, },
198 { 0x00000277, 0x00000277, CPU_PAT
, CPU_K8_PLUS
, },
199 { 0x000002FF, 0x000002FF, CPU_MTRR
, CPU_K8_PLUS
, },
201 { 0x00000400, 0x00000413, CPU_MC
, CPU_K8_PLUS
, },
203 { 0xC0000080, 0xC0000080, CPU_FEATURES
, CPU_AMD_ALL
, },
204 { 0xC0000081, 0xC0000084, CPU_CALL
, CPU_K8_PLUS
, },
205 { 0xC0000100, 0xC0000102, CPU_BASE
, CPU_K8_PLUS
, },
206 { 0xC0000103, 0xC0000103, CPU_TIME
, CPU_K10_PLUS
, },
208 { 0xC0010000, 0xC0010007, CPU_PMC
, CPU_K8_PLUS
, },
209 { 0xC0010010, 0xC0010010, CPU_CONF
, CPU_K7_PLUS
, },
210 { 0xC0010015, 0xC0010015, CPU_CONF
, CPU_K7_PLUS
, },
211 { 0xC0010016, 0xC001001A, CPU_MTRR
, CPU_K8_PLUS
, },
212 { 0xC001001D, 0xC001001D, CPU_MTRR
, CPU_K8_PLUS
, },
213 { 0xC001001F, 0xC001001F, CPU_CONF
, CPU_K8_PLUS
, },
214 { 0xC0010030, 0xC0010035, CPU_BIOS
, CPU_K8_PLUS
, },
215 { 0xC0010044, 0xC0010048, CPU_MC
, CPU_K8_PLUS
, },
216 { 0xC0010050, 0xC0010056, CPU_SMM
, CPU_K0F_PLUS
, },
217 { 0xC0010058, 0xC0010058, CPU_CONF
, CPU_K10_PLUS
, },
218 { 0xC0010060, 0xC0010060, CPU_CACHE
, CPU_AMD_11
, },
219 { 0xC0010061, 0xC0010068, CPU_SMM
, CPU_K10_PLUS
, },
220 { 0xC0010069, 0xC001006B, CPU_SMM
, CPU_AMD_11
, },
221 { 0xC0010070, 0xC0010071, CPU_SMM
, CPU_K10_PLUS
, },
222 { 0xC0010111, 0xC0010113, CPU_SMM
, CPU_K8_PLUS
, },
223 { 0xC0010114, 0xC0010118, CPU_SVM
, CPU_K10_PLUS
, },
224 { 0xC0010140, 0xC0010141, CPU_OSVM
, CPU_K10_PLUS
, },
225 { 0xC0011022, 0xC0011023, CPU_CONF
, CPU_K10_PLUS
, },
230 static int get_intel_modelflag(unsigned model
)
238 flag
= CPU_INTEL_PENTIUM
;
251 flag
= CPU_INTEL_PENTIUM_M
;
254 flag
= CPU_INTEL_CORE
;
258 flag
= CPU_INTEL_CORE2
;
261 flag
= CPU_INTEL_ATOM
;
268 flag
= CPU_INTEL_XEON_P4
;
271 flag
= CPU_INTEL_XEON_MP
;
282 static int get_amd_modelflag(unsigned model
)
286 switch (model
>> 8) {
313 static int get_cpu_modelflag(unsigned cpu
)
317 flag
= per_cpu(cpu_model
, cpu
);
319 switch (flag
>> 16) {
320 case X86_VENDOR_INTEL
:
321 flag
= get_intel_modelflag(flag
);
324 flag
= get_amd_modelflag(flag
& 0xffff);
334 static int get_cpu_range_count(unsigned cpu
)
338 switch (per_cpu(cpu_model
, cpu
) >> 16) {
339 case X86_VENDOR_INTEL
:
340 index
= ARRAY_SIZE(cpu_intel_range
);
343 index
= ARRAY_SIZE(cpu_amd_range
);
353 static int is_typeflag_valid(unsigned cpu
, unsigned flag
)
355 unsigned vendor
, modelflag
;
358 /* Standard Registers should be always valid */
362 modelflag
= per_cpu(cpu_modelflag
, cpu
);
363 vendor
= per_cpu(cpu_model
, cpu
) >> 16;
364 index
= get_cpu_range_count(cpu
);
366 for (i
= 0; i
< index
; i
++) {
368 case X86_VENDOR_INTEL
:
369 if ((cpu_intel_range
[i
].model
& modelflag
) &&
370 (cpu_intel_range
[i
].flag
& flag
))
374 if ((cpu_amd_range
[i
].model
& modelflag
) &&
375 (cpu_amd_range
[i
].flag
& flag
))
385 static unsigned get_cpu_range(unsigned cpu
, unsigned *min
, unsigned *max
,
386 int index
, unsigned flag
)
390 modelflag
= per_cpu(cpu_modelflag
, cpu
);
392 switch (per_cpu(cpu_model
, cpu
) >> 16) {
393 case X86_VENDOR_INTEL
:
394 if ((cpu_intel_range
[index
].model
& modelflag
) &&
395 (cpu_intel_range
[index
].flag
& flag
)) {
396 *min
= cpu_intel_range
[index
].min
;
397 *max
= cpu_intel_range
[index
].max
;
401 if ((cpu_amd_range
[index
].model
& modelflag
) &&
402 (cpu_amd_range
[index
].flag
& flag
)) {
403 *min
= cpu_amd_range
[index
].min
;
404 *max
= cpu_amd_range
[index
].max
;
412 /* This function can also be called with seq = NULL for printk */
413 static void print_cpu_data(struct seq_file
*seq
, unsigned type
,
416 struct cpu_private
*priv
;
422 val
= (val
<< 32) | low
;
423 seq_printf(seq
, "0x%llx\n", val
);
425 seq_printf(seq
, " %08x: %08x_%08x\n",
428 printk(KERN_INFO
" %08x: %08x_%08x\n", type
, high
, low
);
431 /* This function can also be called with seq = NULL for printk */
432 static void print_msr(struct seq_file
*seq
, unsigned cpu
, unsigned flag
)
434 unsigned msr
, msr_min
, msr_max
;
435 struct cpu_private
*priv
;
442 if (!rdmsr_safe_on_cpu(priv
->cpu
, priv
->reg
,
444 print_cpu_data(seq
, priv
->reg
, low
, high
);
449 range
= get_cpu_range_count(cpu
);
451 for (i
= 0; i
< range
; i
++) {
452 if (!get_cpu_range(cpu
, &msr_min
, &msr_max
, i
, flag
))
455 for (msr
= msr_min
; msr
<= msr_max
; msr
++) {
456 if (rdmsr_safe_on_cpu(cpu
, msr
, &low
, &high
))
458 print_cpu_data(seq
, msr
, low
, high
);
463 static void print_tss(void *arg
)
465 struct pt_regs
*regs
= task_pt_regs(current
);
466 struct seq_file
*seq
= arg
;
469 seq_printf(seq
, " RAX\t: %016lx\n", regs
->ax
);
470 seq_printf(seq
, " RBX\t: %016lx\n", regs
->bx
);
471 seq_printf(seq
, " RCX\t: %016lx\n", regs
->cx
);
472 seq_printf(seq
, " RDX\t: %016lx\n", regs
->dx
);
474 seq_printf(seq
, " RSI\t: %016lx\n", regs
->si
);
475 seq_printf(seq
, " RDI\t: %016lx\n", regs
->di
);
476 seq_printf(seq
, " RBP\t: %016lx\n", regs
->bp
);
477 seq_printf(seq
, " ESP\t: %016lx\n", regs
->sp
);
480 seq_printf(seq
, " R08\t: %016lx\n", regs
->r8
);
481 seq_printf(seq
, " R09\t: %016lx\n", regs
->r9
);
482 seq_printf(seq
, " R10\t: %016lx\n", regs
->r10
);
483 seq_printf(seq
, " R11\t: %016lx\n", regs
->r11
);
484 seq_printf(seq
, " R12\t: %016lx\n", regs
->r12
);
485 seq_printf(seq
, " R13\t: %016lx\n", regs
->r13
);
486 seq_printf(seq
, " R14\t: %016lx\n", regs
->r14
);
487 seq_printf(seq
, " R15\t: %016lx\n", regs
->r15
);
490 asm("movl %%cs,%0" : "=r" (seg
));
491 seq_printf(seq
, " CS\t: %04x\n", seg
);
492 asm("movl %%ds,%0" : "=r" (seg
));
493 seq_printf(seq
, " DS\t: %04x\n", seg
);
494 seq_printf(seq
, " SS\t: %04lx\n", regs
->ss
& 0xffff);
495 asm("movl %%es,%0" : "=r" (seg
));
496 seq_printf(seq
, " ES\t: %04x\n", seg
);
497 asm("movl %%fs,%0" : "=r" (seg
));
498 seq_printf(seq
, " FS\t: %04x\n", seg
);
499 asm("movl %%gs,%0" : "=r" (seg
));
500 seq_printf(seq
, " GS\t: %04x\n", seg
);
502 seq_printf(seq
, " EFLAGS\t: %016lx\n", regs
->flags
);
504 seq_printf(seq
, " EIP\t: %016lx\n", regs
->ip
);
507 static void print_cr(void *arg
)
509 struct seq_file
*seq
= arg
;
511 seq_printf(seq
, " cr0\t: %016lx\n", read_cr0());
512 seq_printf(seq
, " cr2\t: %016lx\n", read_cr2());
513 seq_printf(seq
, " cr3\t: %016lx\n", read_cr3());
514 seq_printf(seq
, " cr4\t: %016lx\n", read_cr4_safe());
516 seq_printf(seq
, " cr8\t: %016lx\n", read_cr8());
520 static void print_desc_ptr(char *str
, struct seq_file
*seq
, struct desc_ptr dt
)
522 seq_printf(seq
, " %s\t: %016llx\n", str
, (u64
)(dt
.address
| dt
.size
));
525 static void print_dt(void *seq
)
531 store_idt((struct desc_ptr
*)&dt
);
532 print_desc_ptr("IDT", seq
, dt
);
535 store_gdt((struct desc_ptr
*)&dt
);
536 print_desc_ptr("GDT", seq
, dt
);
540 seq_printf(seq
, " LDT\t: %016lx\n", ldt
);
544 seq_printf(seq
, " TR\t: %016lx\n", ldt
);
547 static void print_dr(void *arg
)
549 struct seq_file
*seq
= arg
;
553 for (i
= 0; i
< 8; i
++) {
554 /* Ignore db4, db5 */
555 if ((i
== 4) || (i
== 5))
558 seq_printf(seq
, " dr%d\t: %016lx\n", i
, dr
);
561 seq_printf(seq
, "\n MSR\t:\n");
564 static void print_apic(void *arg
)
566 struct seq_file
*seq
= arg
;
568 #ifdef CONFIG_X86_LOCAL_APIC
569 seq_printf(seq
, " LAPIC\t:\n");
570 seq_printf(seq
, " ID\t\t: %08x\n", apic_read(APIC_ID
) >> 24);
571 seq_printf(seq
, " LVR\t\t: %08x\n", apic_read(APIC_LVR
));
572 seq_printf(seq
, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI
));
573 seq_printf(seq
, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI
));
574 seq_printf(seq
, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI
));
575 seq_printf(seq
, " LDR\t\t: %08x\n", apic_read(APIC_LDR
));
576 seq_printf(seq
, " DFR\t\t: %08x\n", apic_read(APIC_DFR
));
577 seq_printf(seq
, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV
));
578 seq_printf(seq
, " ISR\t\t: %08x\n", apic_read(APIC_ISR
));
579 seq_printf(seq
, " ESR\t\t: %08x\n", apic_read(APIC_ESR
));
580 seq_printf(seq
, " ICR\t\t: %08x\n", apic_read(APIC_ICR
));
581 seq_printf(seq
, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2
));
582 seq_printf(seq
, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT
));
583 seq_printf(seq
, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR
));
584 seq_printf(seq
, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC
));
585 seq_printf(seq
, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0
));
586 seq_printf(seq
, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1
));
587 seq_printf(seq
, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR
));
588 seq_printf(seq
, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT
));
589 seq_printf(seq
, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT
));
590 seq_printf(seq
, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR
));
591 #endif /* CONFIG_X86_LOCAL_APIC */
593 seq_printf(seq
, "\n MSR\t:\n");
596 static int cpu_seq_show(struct seq_file
*seq
, void *v
)
598 struct cpu_private
*priv
= seq
->private;
603 switch (cpu_base
[priv
->type
].flag
) {
605 smp_call_function_single(priv
->cpu
, print_tss
, seq
, 1);
608 smp_call_function_single(priv
->cpu
, print_cr
, seq
, 1);
611 smp_call_function_single(priv
->cpu
, print_dt
, seq
, 1);
614 if (priv
->file
== CPU_INDEX_BIT
)
615 smp_call_function_single(priv
->cpu
, print_dr
, seq
, 1);
616 print_msr(seq
, priv
->cpu
, cpu_base
[priv
->type
].flag
);
619 if (priv
->file
== CPU_INDEX_BIT
)
620 smp_call_function_single(priv
->cpu
, print_apic
, seq
, 1);
621 print_msr(seq
, priv
->cpu
, cpu_base
[priv
->type
].flag
);
625 print_msr(seq
, priv
->cpu
, cpu_base
[priv
->type
].flag
);
628 seq_printf(seq
, "\n");
633 static void *cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
635 if (*pos
== 0) /* One time is enough ;-) */
641 static void *cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
645 return cpu_seq_start(seq
, pos
);
648 static void cpu_seq_stop(struct seq_file
*seq
, void *v
)
652 static const struct seq_operations cpu_seq_ops
= {
653 .start
= cpu_seq_start
,
654 .next
= cpu_seq_next
,
655 .stop
= cpu_seq_stop
,
656 .show
= cpu_seq_show
,
659 static int cpu_seq_open(struct inode
*inode
, struct file
*file
)
661 struct cpu_private
*priv
= inode
->i_private
;
662 struct seq_file
*seq
;
665 err
= seq_open(file
, &cpu_seq_ops
);
667 seq
= file
->private_data
;
674 static int write_msr(struct cpu_private
*priv
, u64 val
)
678 high
= (val
>> 32) & 0xffffffff;
679 low
= val
& 0xffffffff;
681 if (!wrmsr_safe_on_cpu(priv
->cpu
, priv
->reg
, low
, high
))
687 static int write_cpu_register(struct cpu_private
*priv
, const char *buf
)
692 ret
= strict_strtoull(buf
, 0, &val
);
696 /* Supporting only MSRs */
697 if (priv
->type
< CPU_TSS_BIT
)
698 return write_msr(priv
, val
);
703 static ssize_t
cpu_write(struct file
*file
, const char __user
*ubuf
,
704 size_t count
, loff_t
*off
)
706 struct seq_file
*seq
= file
->private_data
;
707 struct cpu_private
*priv
= seq
->private;
710 if ((priv
== NULL
) || (count
>= sizeof(buf
)))
713 if (copy_from_user(&buf
, ubuf
, count
))
718 if ((cpu_base
[priv
->type
].write
) && (cpu_file
[priv
->file
].write
))
719 if (!write_cpu_register(priv
, buf
))
725 static const struct file_operations cpu_fops
= {
726 .owner
= THIS_MODULE
,
727 .open
= cpu_seq_open
,
731 .release
= seq_release
,
734 static int cpu_create_file(unsigned cpu
, unsigned type
, unsigned reg
,
735 unsigned file
, struct dentry
*dentry
)
737 struct cpu_private
*priv
= NULL
;
739 /* Already intialized */
740 if (file
== CPU_INDEX_BIT
)
741 if (per_cpu(cpu_arr
[type
].init
, cpu
))
744 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
752 mutex_lock(&cpu_debug_lock
);
753 per_cpu(priv_arr
[type
], cpu
) = priv
;
754 per_cpu(cpu_priv_count
, cpu
)++;
755 mutex_unlock(&cpu_debug_lock
);
758 debugfs_create_file(cpu_file
[file
].name
, S_IRUGO
,
759 dentry
, (void *)priv
, &cpu_fops
);
761 debugfs_create_file(cpu_base
[type
].name
, S_IRUGO
,
762 per_cpu(cpu_arr
[type
].dentry
, cpu
),
763 (void *)priv
, &cpu_fops
);
764 mutex_lock(&cpu_debug_lock
);
765 per_cpu(cpu_arr
[type
].init
, cpu
) = 1;
766 mutex_unlock(&cpu_debug_lock
);
772 static int cpu_init_regfiles(unsigned cpu
, unsigned int type
, unsigned reg
,
773 struct dentry
*dentry
)
778 for (file
= 0; file
< ARRAY_SIZE(cpu_file
); file
++) {
779 err
= cpu_create_file(cpu
, type
, reg
, file
, dentry
);
787 static int cpu_init_msr(unsigned cpu
, unsigned type
, struct dentry
*dentry
)
789 struct dentry
*cpu_dentry
= NULL
;
790 unsigned reg
, reg_min
, reg_max
;
791 int i
, range
, err
= 0;
795 range
= get_cpu_range_count(cpu
);
797 for (i
= 0; i
< range
; i
++) {
798 if (!get_cpu_range(cpu
, ®_min
, ®_max
, i
,
799 cpu_base
[type
].flag
))
802 for (reg
= reg_min
; reg
<= reg_max
; reg
++) {
803 if (rdmsr_safe_on_cpu(cpu
, reg
, &low
, &high
))
806 sprintf(reg_dir
, "0x%x", reg
);
807 cpu_dentry
= debugfs_create_dir(reg_dir
, dentry
);
808 err
= cpu_init_regfiles(cpu
, type
, reg
, cpu_dentry
);
817 static int cpu_init_allreg(unsigned cpu
, struct dentry
*dentry
)
819 struct dentry
*cpu_dentry
= NULL
;
823 for (type
= 0; type
< ARRAY_SIZE(cpu_base
) - 1; type
++) {
824 if (!is_typeflag_valid(cpu
, cpu_base
[type
].flag
))
826 cpu_dentry
= debugfs_create_dir(cpu_base
[type
].name
, dentry
);
827 per_cpu(cpu_arr
[type
].dentry
, cpu
) = cpu_dentry
;
829 if (type
< CPU_TSS_BIT
)
830 err
= cpu_init_msr(cpu
, type
, cpu_dentry
);
832 err
= cpu_create_file(cpu
, type
, 0, CPU_INDEX_BIT
,
841 static int cpu_init_cpu(void)
843 struct dentry
*cpu_dentry
= NULL
;
844 struct cpuinfo_x86
*cpui
;
849 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
850 cpui
= &cpu_data(cpu
);
851 if (!cpu_has(cpui
, X86_FEATURE_MSR
))
853 per_cpu(cpu_model
, cpu
) = ((cpui
->x86_vendor
<< 16) |
856 per_cpu(cpu_modelflag
, cpu
) = get_cpu_modelflag(cpu
);
858 sprintf(cpu_dir
, "cpu%d", cpu
);
859 cpu_dentry
= debugfs_create_dir(cpu_dir
, cpu_debugfs_dir
);
860 err
= cpu_init_allreg(cpu
, cpu_dentry
);
862 pr_info("cpu%d(%d) debug files %d\n",
863 cpu
, nr_cpu_ids
, per_cpu(cpu_priv_count
, cpu
));
864 if (per_cpu(cpu_priv_count
, cpu
) > MAX_CPU_FILES
) {
865 pr_err("Register files count %d exceeds limit %d\n",
866 per_cpu(cpu_priv_count
, cpu
), MAX_CPU_FILES
);
867 per_cpu(cpu_priv_count
, cpu
) = MAX_CPU_FILES
;
877 static int __init
cpu_debug_init(void)
879 cpu_debugfs_dir
= debugfs_create_dir("cpu", arch_debugfs_dir
);
881 return cpu_init_cpu();
884 static void __exit
cpu_debug_exit(void)
889 debugfs_remove_recursive(cpu_debugfs_dir
);
891 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
892 for (i
= 0; i
< per_cpu(cpu_priv_count
, cpu
); i
++)
893 kfree(per_cpu(priv_arr
[i
], cpu
));
896 module_init(cpu_debug_init
);
897 module_exit(cpu_debug_exit
);
899 MODULE_AUTHOR("Jaswinder Singh Rajput");
900 MODULE_DESCRIPTION("CPU Debug module");
901 MODULE_LICENSE("GPL");