2 * CPU x86 architecture debug code
4 * Copyright(C) 2009 Jaswinder Singh Rajput
6 * For licencing details see kernel-base/COPYING
9 #include <linux/interrupt.h>
10 #include <linux/compiler.h>
11 #include <linux/seq_file.h>
12 #include <linux/debugfs.h>
13 #include <linux/kprobes.h>
14 #include <linux/uaccess.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/percpu.h>
18 #include <linux/signal.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
21 #include <linux/types.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/smp.h>
26 #include <asm/cpu_debug.h>
27 #include <asm/paravirt.h>
28 #include <asm/system.h>
29 #include <asm/traps.h>
33 static DEFINE_PER_CPU(struct cpu_cpuX_base
[CPU_REG_ALL_BIT
], cpu_arr
);
34 static DEFINE_PER_CPU(struct cpu_private
* [MAX_CPU_FILES
], priv_arr
);
35 static DEFINE_PER_CPU(int, cpu_priv_count
);
37 static DEFINE_MUTEX(cpu_debug_lock
);
39 static struct dentry
*cpu_debugfs_dir
;
41 static struct cpu_debug_base cpu_base
[] = {
43 { "monitor", CPU_MONITOR
, 0 },
44 { "time", CPU_TIME
, 0 },
45 { "pmc", CPU_PMC
, 1 },
46 { "platform", CPU_PLATFORM
, 0 },
47 { "apic", CPU_APIC
, 0 },
48 { "poweron", CPU_POWERON
, 0 },
49 { "control", CPU_CONTROL
, 0 },
50 { "features", CPU_FEATURES
, 0 },
51 { "lastbranch", CPU_LBRANCH
, 0 },
52 { "bios", CPU_BIOS
, 0 },
53 { "freq", CPU_FREQ
, 0 },
54 { "mtrr", CPU_MTRR
, 0 },
55 { "perf", CPU_PERF
, 0 },
56 { "cache", CPU_CACHE
, 0 },
57 { "sysenter", CPU_SYSENTER
, 0 },
58 { "therm", CPU_THERM
, 0 },
59 { "misc", CPU_MISC
, 0 },
60 { "debug", CPU_DEBUG
, 0 },
61 { "pat", CPU_PAT
, 0 },
62 { "vmx", CPU_VMX
, 0 },
63 { "call", CPU_CALL
, 0 },
64 { "base", CPU_BASE
, 0 },
65 { "ver", CPU_VER
, 0 },
66 { "conf", CPU_CONF
, 0 },
67 { "smm", CPU_SMM
, 0 },
68 { "svm", CPU_SVM
, 0 },
69 { "osvm", CPU_OSVM
, 0 },
70 { "tss", CPU_TSS
, 0 },
73 { "registers", CPU_REG_ALL
, 0 },
76 static struct cpu_file_base cpu_file
[] = {
77 { "index", CPU_REG_ALL
, 0 },
78 { "value", CPU_REG_ALL
, 1 },
81 /* CPU Registers Range */
82 static struct cpu_debug_range cpu_reg_range
[] = {
83 { 0x00000000, 0x00000001, CPU_MC
, },
84 { 0x00000006, 0x00000007, CPU_MONITOR
, },
85 { 0x00000010, 0x00000010, CPU_TIME
, },
86 { 0x00000011, 0x00000013, CPU_PMC
, },
87 { 0x00000017, 0x00000017, CPU_PLATFORM
, },
88 { 0x0000001B, 0x0000001B, CPU_APIC
, },
89 { 0x0000002A, 0x0000002B, CPU_POWERON
, },
90 { 0x0000002C, 0x0000002C, CPU_FREQ
, },
91 { 0x0000003A, 0x0000003A, CPU_CONTROL
, },
92 { 0x00000040, 0x00000047, CPU_LBRANCH
, },
93 { 0x00000060, 0x00000067, CPU_LBRANCH
, },
94 { 0x00000079, 0x00000079, CPU_BIOS
, },
95 { 0x00000088, 0x0000008A, CPU_CACHE
, },
96 { 0x0000008B, 0x0000008B, CPU_BIOS
, },
97 { 0x0000009B, 0x0000009B, CPU_MONITOR
, },
98 { 0x000000C1, 0x000000C4, CPU_PMC
, },
99 { 0x000000CD, 0x000000CD, CPU_FREQ
, },
100 { 0x000000E7, 0x000000E8, CPU_PERF
, },
101 { 0x000000FE, 0x000000FE, CPU_MTRR
, },
103 { 0x00000116, 0x0000011E, CPU_CACHE
, },
104 { 0x00000174, 0x00000176, CPU_SYSENTER
, },
105 { 0x00000179, 0x0000017B, CPU_MC
, },
106 { 0x00000186, 0x00000189, CPU_PMC
, },
107 { 0x00000198, 0x00000199, CPU_PERF
, },
108 { 0x0000019A, 0x0000019A, CPU_TIME
, },
109 { 0x0000019B, 0x0000019D, CPU_THERM
, },
110 { 0x000001A0, 0x000001A0, CPU_MISC
, },
111 { 0x000001C9, 0x000001C9, CPU_LBRANCH
, },
112 { 0x000001D7, 0x000001D8, CPU_LBRANCH
, },
113 { 0x000001D9, 0x000001D9, CPU_DEBUG
, },
114 { 0x000001DA, 0x000001E0, CPU_LBRANCH
, },
116 { 0x00000200, 0x0000020F, CPU_MTRR
, },
117 { 0x00000250, 0x00000250, CPU_MTRR
, },
118 { 0x00000258, 0x00000259, CPU_MTRR
, },
119 { 0x00000268, 0x0000026F, CPU_MTRR
, },
120 { 0x00000277, 0x00000277, CPU_PAT
, },
121 { 0x000002FF, 0x000002FF, CPU_MTRR
, },
123 { 0x00000300, 0x00000311, CPU_PMC
, },
124 { 0x00000345, 0x00000345, CPU_PMC
, },
125 { 0x00000360, 0x00000371, CPU_PMC
, },
126 { 0x0000038D, 0x00000390, CPU_PMC
, },
127 { 0x000003A0, 0x000003BE, CPU_PMC
, },
128 { 0x000003C0, 0x000003CD, CPU_PMC
, },
129 { 0x000003E0, 0x000003E1, CPU_PMC
, },
130 { 0x000003F0, 0x000003F2, CPU_PMC
, },
132 { 0x00000400, 0x00000417, CPU_MC
, },
133 { 0x00000480, 0x0000048B, CPU_VMX
, },
135 { 0x00000600, 0x00000600, CPU_DEBUG
, },
136 { 0x00000680, 0x0000068F, CPU_LBRANCH
, },
137 { 0x000006C0, 0x000006CF, CPU_LBRANCH
, },
139 { 0x000107CC, 0x000107D3, CPU_PMC
, },
141 { 0xC0000080, 0xC0000080, CPU_FEATURES
, },
142 { 0xC0000081, 0xC0000084, CPU_CALL
, },
143 { 0xC0000100, 0xC0000102, CPU_BASE
, },
144 { 0xC0000103, 0xC0000103, CPU_TIME
, },
146 { 0xC0010000, 0xC0010007, CPU_PMC
, },
147 { 0xC0010010, 0xC0010010, CPU_CONF
, },
148 { 0xC0010015, 0xC0010015, CPU_CONF
, },
149 { 0xC0010016, 0xC001001A, CPU_MTRR
, },
150 { 0xC001001D, 0xC001001D, CPU_MTRR
, },
151 { 0xC001001F, 0xC001001F, CPU_CONF
, },
152 { 0xC0010030, 0xC0010035, CPU_BIOS
, },
153 { 0xC0010044, 0xC0010048, CPU_MC
, },
154 { 0xC0010050, 0xC0010056, CPU_SMM
, },
155 { 0xC0010058, 0xC0010058, CPU_CONF
, },
156 { 0xC0010060, 0xC0010060, CPU_CACHE
, },
157 { 0xC0010061, 0xC0010068, CPU_SMM
, },
158 { 0xC0010069, 0xC001006B, CPU_SMM
, },
159 { 0xC0010070, 0xC0010071, CPU_SMM
, },
160 { 0xC0010111, 0xC0010113, CPU_SMM
, },
161 { 0xC0010114, 0xC0010118, CPU_SVM
, },
162 { 0xC0010140, 0xC0010141, CPU_OSVM
, },
163 { 0xC0011022, 0xC0011023, CPU_CONF
, },
166 static int is_typeflag_valid(unsigned cpu
, unsigned flag
)
170 /* Standard Registers should be always valid */
174 for (i
= 0; i
< ARRAY_SIZE(cpu_reg_range
); i
++) {
175 if (cpu_reg_range
[i
].flag
== flag
)
183 static unsigned get_cpu_range(unsigned cpu
, unsigned *min
, unsigned *max
,
184 int index
, unsigned flag
)
186 if (cpu_reg_range
[index
].flag
== flag
) {
187 *min
= cpu_reg_range
[index
].min
;
188 *max
= cpu_reg_range
[index
].max
;
195 /* This function can also be called with seq = NULL for printk */
196 static void print_cpu_data(struct seq_file
*seq
, unsigned type
,
199 struct cpu_private
*priv
;
205 val
= (val
<< 32) | low
;
206 seq_printf(seq
, "0x%llx\n", val
);
208 seq_printf(seq
, " %08x: %08x_%08x\n",
211 printk(KERN_INFO
" %08x: %08x_%08x\n", type
, high
, low
);
214 /* This function can also be called with seq = NULL for printk */
215 static void print_msr(struct seq_file
*seq
, unsigned cpu
, unsigned flag
)
217 unsigned msr
, msr_min
, msr_max
;
218 struct cpu_private
*priv
;
225 if (!rdmsr_safe_on_cpu(priv
->cpu
, priv
->reg
,
227 print_cpu_data(seq
, priv
->reg
, low
, high
);
232 for (i
= 0; i
< ARRAY_SIZE(cpu_reg_range
); i
++) {
233 if (!get_cpu_range(cpu
, &msr_min
, &msr_max
, i
, flag
))
236 for (msr
= msr_min
; msr
<= msr_max
; msr
++) {
237 if (rdmsr_safe_on_cpu(cpu
, msr
, &low
, &high
))
239 print_cpu_data(seq
, msr
, low
, high
);
244 static void print_tss(void *arg
)
246 struct pt_regs
*regs
= task_pt_regs(current
);
247 struct seq_file
*seq
= arg
;
250 seq_printf(seq
, " RAX\t: %016lx\n", regs
->ax
);
251 seq_printf(seq
, " RBX\t: %016lx\n", regs
->bx
);
252 seq_printf(seq
, " RCX\t: %016lx\n", regs
->cx
);
253 seq_printf(seq
, " RDX\t: %016lx\n", regs
->dx
);
255 seq_printf(seq
, " RSI\t: %016lx\n", regs
->si
);
256 seq_printf(seq
, " RDI\t: %016lx\n", regs
->di
);
257 seq_printf(seq
, " RBP\t: %016lx\n", regs
->bp
);
258 seq_printf(seq
, " ESP\t: %016lx\n", regs
->sp
);
261 seq_printf(seq
, " R08\t: %016lx\n", regs
->r8
);
262 seq_printf(seq
, " R09\t: %016lx\n", regs
->r9
);
263 seq_printf(seq
, " R10\t: %016lx\n", regs
->r10
);
264 seq_printf(seq
, " R11\t: %016lx\n", regs
->r11
);
265 seq_printf(seq
, " R12\t: %016lx\n", regs
->r12
);
266 seq_printf(seq
, " R13\t: %016lx\n", regs
->r13
);
267 seq_printf(seq
, " R14\t: %016lx\n", regs
->r14
);
268 seq_printf(seq
, " R15\t: %016lx\n", regs
->r15
);
271 asm("movl %%cs,%0" : "=r" (seg
));
272 seq_printf(seq
, " CS\t: %04x\n", seg
);
273 asm("movl %%ds,%0" : "=r" (seg
));
274 seq_printf(seq
, " DS\t: %04x\n", seg
);
275 seq_printf(seq
, " SS\t: %04lx\n", regs
->ss
& 0xffff);
276 asm("movl %%es,%0" : "=r" (seg
));
277 seq_printf(seq
, " ES\t: %04x\n", seg
);
278 asm("movl %%fs,%0" : "=r" (seg
));
279 seq_printf(seq
, " FS\t: %04x\n", seg
);
280 asm("movl %%gs,%0" : "=r" (seg
));
281 seq_printf(seq
, " GS\t: %04x\n", seg
);
283 seq_printf(seq
, " EFLAGS\t: %016lx\n", regs
->flags
);
285 seq_printf(seq
, " EIP\t: %016lx\n", regs
->ip
);
288 static void print_cr(void *arg
)
290 struct seq_file
*seq
= arg
;
292 seq_printf(seq
, " cr0\t: %016lx\n", read_cr0());
293 seq_printf(seq
, " cr2\t: %016lx\n", read_cr2());
294 seq_printf(seq
, " cr3\t: %016lx\n", read_cr3());
295 seq_printf(seq
, " cr4\t: %016lx\n", read_cr4_safe());
297 seq_printf(seq
, " cr8\t: %016lx\n", read_cr8());
301 static void print_desc_ptr(char *str
, struct seq_file
*seq
, struct desc_ptr dt
)
303 seq_printf(seq
, " %s\t: %016llx\n", str
, (u64
)(dt
.address
| dt
.size
));
306 static void print_dt(void *seq
)
312 store_idt((struct desc_ptr
*)&dt
);
313 print_desc_ptr("IDT", seq
, dt
);
316 store_gdt((struct desc_ptr
*)&dt
);
317 print_desc_ptr("GDT", seq
, dt
);
321 seq_printf(seq
, " LDT\t: %016lx\n", ldt
);
325 seq_printf(seq
, " TR\t: %016lx\n", ldt
);
328 static void print_dr(void *arg
)
330 struct seq_file
*seq
= arg
;
334 for (i
= 0; i
< 8; i
++) {
335 /* Ignore db4, db5 */
336 if ((i
== 4) || (i
== 5))
339 seq_printf(seq
, " dr%d\t: %016lx\n", i
, dr
);
342 seq_printf(seq
, "\n MSR\t:\n");
345 static void print_apic(void *arg
)
347 struct seq_file
*seq
= arg
;
349 #ifdef CONFIG_X86_LOCAL_APIC
350 seq_printf(seq
, " LAPIC\t:\n");
351 seq_printf(seq
, " ID\t\t: %08x\n", apic_read(APIC_ID
) >> 24);
352 seq_printf(seq
, " LVR\t\t: %08x\n", apic_read(APIC_LVR
));
353 seq_printf(seq
, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI
));
354 seq_printf(seq
, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI
));
355 seq_printf(seq
, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI
));
356 seq_printf(seq
, " LDR\t\t: %08x\n", apic_read(APIC_LDR
));
357 seq_printf(seq
, " DFR\t\t: %08x\n", apic_read(APIC_DFR
));
358 seq_printf(seq
, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV
));
359 seq_printf(seq
, " ISR\t\t: %08x\n", apic_read(APIC_ISR
));
360 seq_printf(seq
, " ESR\t\t: %08x\n", apic_read(APIC_ESR
));
361 seq_printf(seq
, " ICR\t\t: %08x\n", apic_read(APIC_ICR
));
362 seq_printf(seq
, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2
));
363 seq_printf(seq
, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT
));
364 seq_printf(seq
, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR
));
365 seq_printf(seq
, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC
));
366 seq_printf(seq
, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0
));
367 seq_printf(seq
, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1
));
368 seq_printf(seq
, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR
));
369 seq_printf(seq
, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT
));
370 seq_printf(seq
, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT
));
371 seq_printf(seq
, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR
));
372 if (boot_cpu_has(X86_FEATURE_EXTAPIC
)) {
373 unsigned int i
, v
, maxeilvt
;
375 v
= apic_read(APIC_EFEAT
);
376 maxeilvt
= (v
>> 16) & 0xff;
377 seq_printf(seq
, " EFEAT\t\t: %08x\n", v
);
378 seq_printf(seq
, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL
));
380 for (i
= 0; i
< maxeilvt
; i
++) {
381 v
= apic_read(APIC_EILVTn(i
));
382 seq_printf(seq
, " EILVT%d\t\t: %08x\n", i
, v
);
385 #endif /* CONFIG_X86_LOCAL_APIC */
386 seq_printf(seq
, "\n MSR\t:\n");
389 static int cpu_seq_show(struct seq_file
*seq
, void *v
)
391 struct cpu_private
*priv
= seq
->private;
396 switch (cpu_base
[priv
->type
].flag
) {
398 smp_call_function_single(priv
->cpu
, print_tss
, seq
, 1);
401 smp_call_function_single(priv
->cpu
, print_cr
, seq
, 1);
404 smp_call_function_single(priv
->cpu
, print_dt
, seq
, 1);
407 if (priv
->file
== CPU_INDEX_BIT
)
408 smp_call_function_single(priv
->cpu
, print_dr
, seq
, 1);
409 print_msr(seq
, priv
->cpu
, cpu_base
[priv
->type
].flag
);
412 if (priv
->file
== CPU_INDEX_BIT
)
413 smp_call_function_single(priv
->cpu
, print_apic
, seq
, 1);
414 print_msr(seq
, priv
->cpu
, cpu_base
[priv
->type
].flag
);
418 print_msr(seq
, priv
->cpu
, cpu_base
[priv
->type
].flag
);
421 seq_printf(seq
, "\n");
426 static void *cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
428 if (*pos
== 0) /* One time is enough ;-) */
434 static void *cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
438 return cpu_seq_start(seq
, pos
);
441 static void cpu_seq_stop(struct seq_file
*seq
, void *v
)
445 static const struct seq_operations cpu_seq_ops
= {
446 .start
= cpu_seq_start
,
447 .next
= cpu_seq_next
,
448 .stop
= cpu_seq_stop
,
449 .show
= cpu_seq_show
,
452 static int cpu_seq_open(struct inode
*inode
, struct file
*file
)
454 struct cpu_private
*priv
= inode
->i_private
;
455 struct seq_file
*seq
;
458 err
= seq_open(file
, &cpu_seq_ops
);
460 seq
= file
->private_data
;
467 static int write_msr(struct cpu_private
*priv
, u64 val
)
471 high
= (val
>> 32) & 0xffffffff;
472 low
= val
& 0xffffffff;
474 if (!wrmsr_safe_on_cpu(priv
->cpu
, priv
->reg
, low
, high
))
480 static int write_cpu_register(struct cpu_private
*priv
, const char *buf
)
485 ret
= strict_strtoull(buf
, 0, &val
);
489 /* Supporting only MSRs */
490 if (priv
->type
< CPU_TSS_BIT
)
491 return write_msr(priv
, val
);
496 static ssize_t
cpu_write(struct file
*file
, const char __user
*ubuf
,
497 size_t count
, loff_t
*off
)
499 struct seq_file
*seq
= file
->private_data
;
500 struct cpu_private
*priv
= seq
->private;
503 if ((priv
== NULL
) || (count
>= sizeof(buf
)))
506 if (copy_from_user(&buf
, ubuf
, count
))
511 if ((cpu_base
[priv
->type
].write
) && (cpu_file
[priv
->file
].write
))
512 if (!write_cpu_register(priv
, buf
))
518 static const struct file_operations cpu_fops
= {
519 .owner
= THIS_MODULE
,
520 .open
= cpu_seq_open
,
524 .release
= seq_release
,
527 static int cpu_create_file(unsigned cpu
, unsigned type
, unsigned reg
,
528 unsigned file
, struct dentry
*dentry
)
530 struct cpu_private
*priv
= NULL
;
532 /* Already intialized */
533 if (file
== CPU_INDEX_BIT
)
534 if (per_cpu(cpu_arr
[type
].init
, cpu
))
537 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
545 mutex_lock(&cpu_debug_lock
);
546 per_cpu(priv_arr
[type
], cpu
) = priv
;
547 per_cpu(cpu_priv_count
, cpu
)++;
548 mutex_unlock(&cpu_debug_lock
);
551 debugfs_create_file(cpu_file
[file
].name
, S_IRUGO
,
552 dentry
, (void *)priv
, &cpu_fops
);
554 debugfs_create_file(cpu_base
[type
].name
, S_IRUGO
,
555 per_cpu(cpu_arr
[type
].dentry
, cpu
),
556 (void *)priv
, &cpu_fops
);
557 mutex_lock(&cpu_debug_lock
);
558 per_cpu(cpu_arr
[type
].init
, cpu
) = 1;
559 mutex_unlock(&cpu_debug_lock
);
565 static int cpu_init_regfiles(unsigned cpu
, unsigned int type
, unsigned reg
,
566 struct dentry
*dentry
)
571 for (file
= 0; file
< ARRAY_SIZE(cpu_file
); file
++) {
572 err
= cpu_create_file(cpu
, type
, reg
, file
, dentry
);
580 static int cpu_init_msr(unsigned cpu
, unsigned type
, struct dentry
*dentry
)
582 struct dentry
*cpu_dentry
= NULL
;
583 unsigned reg
, reg_min
, reg_max
;
588 for (i
= 0; i
< ARRAY_SIZE(cpu_reg_range
); i
++) {
589 if (!get_cpu_range(cpu
, ®_min
, ®_max
, i
,
590 cpu_base
[type
].flag
))
593 for (reg
= reg_min
; reg
<= reg_max
; reg
++) {
594 if (rdmsr_safe_on_cpu(cpu
, reg
, &low
, &high
))
597 sprintf(reg_dir
, "0x%x", reg
);
598 cpu_dentry
= debugfs_create_dir(reg_dir
, dentry
);
599 err
= cpu_init_regfiles(cpu
, type
, reg
, cpu_dentry
);
608 static int cpu_init_allreg(unsigned cpu
, struct dentry
*dentry
)
610 struct dentry
*cpu_dentry
= NULL
;
614 for (type
= 0; type
< ARRAY_SIZE(cpu_base
) - 1; type
++) {
615 if (!is_typeflag_valid(cpu
, cpu_base
[type
].flag
))
617 cpu_dentry
= debugfs_create_dir(cpu_base
[type
].name
, dentry
);
618 per_cpu(cpu_arr
[type
].dentry
, cpu
) = cpu_dentry
;
620 if (type
< CPU_TSS_BIT
)
621 err
= cpu_init_msr(cpu
, type
, cpu_dentry
);
623 err
= cpu_create_file(cpu
, type
, 0, CPU_INDEX_BIT
,
632 static int cpu_init_cpu(void)
634 struct dentry
*cpu_dentry
= NULL
;
635 struct cpuinfo_x86
*cpui
;
640 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
641 cpui
= &cpu_data(cpu
);
642 if (!cpu_has(cpui
, X86_FEATURE_MSR
))
645 sprintf(cpu_dir
, "cpu%d", cpu
);
646 cpu_dentry
= debugfs_create_dir(cpu_dir
, cpu_debugfs_dir
);
647 err
= cpu_init_allreg(cpu
, cpu_dentry
);
649 pr_info("cpu%d(%d) debug files %d\n",
650 cpu
, nr_cpu_ids
, per_cpu(cpu_priv_count
, cpu
));
651 if (per_cpu(cpu_priv_count
, cpu
) > MAX_CPU_FILES
) {
652 pr_err("Register files count %d exceeds limit %d\n",
653 per_cpu(cpu_priv_count
, cpu
), MAX_CPU_FILES
);
654 per_cpu(cpu_priv_count
, cpu
) = MAX_CPU_FILES
;
664 static int __init
cpu_debug_init(void)
666 cpu_debugfs_dir
= debugfs_create_dir("cpu", arch_debugfs_dir
);
668 return cpu_init_cpu();
671 static void __exit
cpu_debug_exit(void)
676 debugfs_remove_recursive(cpu_debugfs_dir
);
678 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
679 for (i
= 0; i
< per_cpu(cpu_priv_count
, cpu
); i
++)
680 kfree(per_cpu(priv_arr
[i
], cpu
));
683 module_init(cpu_debug_init
);
684 module_exit(cpu_debug_exit
);
686 MODULE_AUTHOR("Jaswinder Singh Rajput");
687 MODULE_DESCRIPTION("CPU Debug module");
688 MODULE_LICENSE("GPL");