coverity appeasement - redundant check
[minix.git] / kernel / arch / i386 / arch_watchdog.c
blobb6884e4d40887e987b90af34956707b61a4da8d3
1 #include "kernel/kernel.h"
2 #include "kernel/watchdog.h"
3 #include "arch_proto.h"
4 #include "glo.h"
5 #include <minix/minlib.h>
6 #include <minix/u64.h>
8 #include "apic.h"
10 #define CPUID_UNHALTED_CORE_CYCLES_AVAILABLE 0
12 #define INTEL_MSR_PERFMON_CRT0 0xc1
13 #define INTEL_MSR_PERFMON_SEL0 0x186
15 #define INTEL_MSR_PERFMON_SEL0_ENABLE (1 << 22)
18 * Intel architecture performance counters watchdog
21 static struct arch_watchdog intel_arch_watchdog;
22 static struct arch_watchdog amd_watchdog;
24 static void intel_arch_watchdog_init(const unsigned cpu)
26 u64_t cpuf;
27 u32_t val;
29 ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, 0);
31 /* Int, OS, USR, Core ccyles */
32 val = 1 << 20 | 1 << 17 | 1 << 16 | 0x3c;
33 ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0, val);
36 * should give as a tick approx. every 0.5-1s, the perf counter has only
37 * lowest 31 bits writable :(
39 cpuf = cpu_get_freq(cpu);
40 while (ex64hi(cpuf) || ex64lo(cpuf) > 0x7fffffffU)
41 cpuf = div64u64(cpuf, 2);
42 cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf));
43 watchdog->resetval = watchdog->watchdog_resetval = cpuf;
45 ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(cpuf));
47 ia32_msr_write(INTEL_MSR_PERFMON_SEL0, 0,
48 val | INTEL_MSR_PERFMON_SEL0_ENABLE);
50 /* unmask the performance counter interrupt */
51 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
54 static void intel_arch_watchdog_reinit(const unsigned cpu)
56 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
57 ia32_msr_write(INTEL_MSR_PERFMON_CRT0, 0, ex64lo(watchdog->resetval));
60 int arch_watchdog_init(void)
62 u32_t eax, ebx, ecx, edx;
63 unsigned cpu = cpuid;
65 if (!lapic_addr) {
66 printf("ERROR : Cannot use NMI watchdog if APIC is not enabled\n");
67 return -1;
70 if (cpu_info[cpu].vendor == CPU_VENDOR_INTEL) {
71 eax = 0xA;
73 _cpuid(&eax, &ebx, &ecx, &edx);
75 /* FIXME currently we support only watchdog based on the intel
76 * architectural performance counters. Some Intel CPUs don't have this
77 * feature
79 if (ebx & (1 << CPUID_UNHALTED_CORE_CYCLES_AVAILABLE))
80 return -1;
81 if (!((((eax >> 8)) & 0xff) > 0))
82 return -1;
84 watchdog = &intel_arch_watchdog;
85 } else if (cpu_info[cpu].vendor == CPU_VENDOR_AMD) {
86 if (cpu_info[cpu].family != 6 &&
87 cpu_info[cpu].family != 15 &&
88 cpu_info[cpu].family != 16 &&
89 cpu_info[cpu].family != 17)
90 return -1;
91 else
92 watchdog = &amd_watchdog;
93 } else
94 return -1;
96 /* Setup PC overflow as NMI for watchdog, it is masked for now */
97 lapic_write(LAPIC_LVTPCR, APIC_ICR_INT_MASK | APIC_ICR_DM_NMI);
98 (void) lapic_read(LAPIC_LVTPCR);
100 /* double check if LAPIC is enabled */
101 if (lapic_addr && watchdog->init) {
102 watchdog->init(cpuid);
105 return 0;
108 void arch_watchdog_stop(void)
112 void arch_watchdog_lockup(const struct nmi_frame * frame)
114 printf("KERNEL LOCK UP\n"
115 "eax 0x%08x\n"
116 "ecx 0x%08x\n"
117 "edx 0x%08x\n"
118 "ebx 0x%08x\n"
119 "ebp 0x%08x\n"
120 "esi 0x%08x\n"
121 "edi 0x%08x\n"
122 "gs 0x%08x\n"
123 "fs 0x%08x\n"
124 "es 0x%08x\n"
125 "ds 0x%08x\n"
126 "pc 0x%08x\n"
127 "cs 0x%08x\n"
128 "eflags 0x%08x\n",
129 frame->eax,
130 frame->ecx,
131 frame->edx,
132 frame->ebx,
133 frame->ebp,
134 frame->esi,
135 frame->edi,
136 frame->gs,
137 frame->fs,
138 frame->es,
139 frame->ds,
140 frame->pc,
141 frame->cs,
142 frame->eflags
144 panic("Kernel lockup");
147 int i386_watchdog_start(void)
149 if (arch_watchdog_init()) {
150 printf("WARNING watchdog initialization "
151 "failed! Disabled\n");
152 watchdog_enabled = 0;
153 return -1;
155 else
156 BOOT_VERBOSE(printf("Watchdog enabled\n"););
158 return 0;
161 static int intel_arch_watchdog_profile_init(const unsigned freq)
163 u64_t cpuf;
165 /* FIXME works only if all CPUs have the same freq */
166 cpuf = cpu_get_freq(cpuid);
167 cpuf = div64u64(cpuf, freq);
170 * if freq is too low and the cpu freq too high we may get in a range of
171 * insane value which cannot be handled by the 31bit CPU perf counter
173 if (ex64hi(cpuf) != 0 || ex64lo(cpuf) > 0x7fffffffU) {
174 printf("ERROR : nmi watchdog ticks exceed 31bits, use higher frequency\n");
175 return EINVAL;
178 cpuf = make64(-ex64lo(cpuf), ex64hi(cpuf));
179 watchdog->profile_resetval = cpuf;
181 return OK;
184 static struct arch_watchdog intel_arch_watchdog = {
185 /*.init = */ intel_arch_watchdog_init,
186 /*.reinit = */ intel_arch_watchdog_reinit,
187 /*.profile_init = */ intel_arch_watchdog_profile_init
190 #define AMD_MSR_EVENT_SEL0 0xc0010000
191 #define AMD_MSR_EVENT_CTR0 0xc0010004
192 #define AMD_MSR_EVENT_SEL0_ENABLE (1 << 22)
194 static void amd_watchdog_init(const unsigned cpu)
196 u64_t cpuf;
197 u32_t val;
199 ia32_msr_write(AMD_MSR_EVENT_CTR0, 0, 0);
201 /* Int, OS, USR, Cycles cpu is running */
202 val = 1 << 20 | 1 << 17 | 1 << 16 | 0x76;
203 ia32_msr_write(AMD_MSR_EVENT_SEL0, 0, val);
205 cpuf = cpu_get_freq(cpu);
206 neg64(cpuf);
207 watchdog->resetval = watchdog->watchdog_resetval = cpuf;
209 ia32_msr_write(AMD_MSR_EVENT_CTR0,
210 ex64hi(watchdog->resetval), ex64lo(watchdog->resetval));
212 ia32_msr_write(AMD_MSR_EVENT_SEL0, 0,
213 val | AMD_MSR_EVENT_SEL0_ENABLE);
215 /* unmask the performance counter interrupt */
216 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
219 static void amd_watchdog_reinit(const unsigned cpu)
221 lapic_write(LAPIC_LVTPCR, APIC_ICR_DM_NMI);
222 ia32_msr_write(AMD_MSR_EVENT_CTR0,
223 ex64hi(watchdog->resetval), ex64lo(watchdog->resetval));
226 static int amd_watchdog_profile_init(const unsigned freq)
228 u64_t cpuf;
230 /* FIXME works only if all CPUs have the same freq */
231 cpuf = cpu_get_freq(cpuid);
232 cpuf = div64u64(cpuf, freq);
234 neg64(cpuf);
235 watchdog->profile_resetval = cpuf;
237 return OK;
240 static struct arch_watchdog amd_watchdog = {
241 /*.init = */ amd_watchdog_init,
242 /*.reinit = */ amd_watchdog_reinit,
243 /*.profile_init = */ amd_watchdog_profile_init