btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / arch / x86 / 64 / descriptors.cpp
blob12f51c64e2da1b2368c1679a46c12bd6f5e44663
1 /*
2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
4 * Distributed under the terms of the MIT License.
5 */
8 #include <arch/x86/descriptors.h>
10 #include <boot/kernel_args.h>
11 #include <cpu.h>
12 #include <vm/vm.h>
13 #include <vm/vm_priv.h>
15 #include <arch/int.h>
16 #include <arch/user_debugger.h>
19 template<typename T, T (*Function)(unsigned), unsigned N, unsigned ...Index>
20 struct GenerateTable : GenerateTable<T, Function, N - 1, N - 1, Index...> {
23 template<typename T, T (*Function)(unsigned), unsigned ...Index>
24 struct GenerateTable<T, Function, 0, Index...> {
25 GenerateTable()
27 fTable { Function(Index)... }
31 T fTable[sizeof...(Index)];
34 enum class DescriptorType : unsigned {
35 DataWritable = 0x2,
36 CodeExecuteOnly = 0x8,
37 TSS = 0x9,
40 class Descriptor {
41 public:
42 constexpr Descriptor();
43 inline Descriptor(uint32_t first, uint32_t second);
44 constexpr Descriptor(DescriptorType type, bool kernelOnly);
46 protected:
47 union {
48 struct [[gnu::packed]] {
49 uint16_t fLimit0;
50 unsigned fBase0 :24;
51 unsigned fType :4;
52 unsigned fSystem :1;
53 unsigned fDPL :2;
54 unsigned fPresent :1;
55 unsigned fLimit1 :4;
56 unsigned fUnused :1;
57 unsigned fLong :1;
58 unsigned fDB :1;
59 unsigned fGranularity :1;
60 uint8_t fBase1;
63 uint32_t fDescriptor[2];
67 class TSSDescriptor : public Descriptor {
68 public:
69 inline TSSDescriptor(uintptr_t base, size_t limit);
71 const Descriptor& GetLower() const { return *this; }
72 const Descriptor& GetUpper() const { return fSecond; }
74 static void LoadTSS(unsigned index);
76 private:
77 Descriptor fSecond;
80 class GlobalDescriptorTable {
81 public:
82 constexpr GlobalDescriptorTable();
84 inline void Load() const;
86 unsigned SetTSS(unsigned cpu,
87 const TSSDescriptor& tss);
88 private:
89 static constexpr unsigned kFirstTSS = 5;
90 static constexpr unsigned kDescriptorCount
91 = kFirstTSS + SMP_MAX_CPUS * 2;
93 alignas(uint64_t) Descriptor fTable[kDescriptorCount];
96 enum class InterruptDescriptorType : unsigned {
97 Interrupt = 14,
98 Trap,
101 class [[gnu::packed]] InterruptDescriptor {
102 public:
103 constexpr InterruptDescriptor(uintptr_t isr,
104 unsigned ist, bool kernelOnly);
105 constexpr InterruptDescriptor(uintptr_t isr);
107 static constexpr InterruptDescriptor Generate(unsigned index);
109 private:
110 uint16_t fBase0;
111 uint16_t fSelector;
112 unsigned fIST :3;
113 unsigned fReserved0 :5;
114 unsigned fType :4;
115 unsigned fReserved1 :1;
116 unsigned fDPL :2;
117 unsigned fPresent :1;
118 uint16_t fBase1;
119 uint32_t fBase2;
120 uint32_t fReserved2;
123 class InterruptDescriptorTable {
124 public:
125 inline void Load() const;
127 static constexpr unsigned kDescriptorCount = 256;
129 private:
130 typedef GenerateTable<InterruptDescriptor, InterruptDescriptor::Generate,
131 kDescriptorCount> TableType;
132 alignas(uint64_t) TableType fTable;
135 class InterruptServiceRoutine {
136 alignas(16) uint8_t fDummy[16];
139 extern const InterruptServiceRoutine
140 isr_array[InterruptDescriptorTable::kDescriptorCount];
142 static GlobalDescriptorTable sGDT;
143 static InterruptDescriptorTable sIDT;
145 typedef void interrupt_handler_function(iframe* frame);
146 interrupt_handler_function*
147 gInterruptHandlerTable[InterruptDescriptorTable::kDescriptorCount];
150 constexpr bool
151 is_code_segment(DescriptorType type)
153 return type == DescriptorType::CodeExecuteOnly;
157 constexpr
158 Descriptor::Descriptor()
160 fDescriptor { 0, 0 }
162 static_assert(sizeof(Descriptor) == sizeof(uint64_t),
163 "Invalid Descriptor size.");
167 Descriptor::Descriptor(uint32_t first, uint32_t second)
169 fDescriptor { first, second }
174 constexpr
175 Descriptor::Descriptor(DescriptorType type, bool kernelOnly)
177 fLimit0(-1),
178 fBase0(0),
179 fType(static_cast<unsigned>(type)),
180 fSystem(1),
181 fDPL(kernelOnly ? 0 : 3),
182 fPresent(1),
183 fLimit1(0xf),
184 fUnused(0),
185 fLong(is_code_segment(type) ? 1 : 0),
186 fDB(is_code_segment(type) ? 0 : 1),
187 fGranularity(1),
188 fBase1(0)
193 TSSDescriptor::TSSDescriptor(uintptr_t base, size_t limit)
195 fSecond(base >> 32, 0)
197 fLimit0 = static_cast<uint16_t>(limit);
198 fBase0 = base & 0xffffff;
199 fType = static_cast<unsigned>(DescriptorType::TSS);
200 fPresent = 1;
201 fLimit1 = (limit >> 16) & 0xf;
202 fBase1 = static_cast<uint8_t>(base >> 24);
206 void
207 TSSDescriptor::LoadTSS(unsigned index)
209 asm volatile("ltr %w0" : : "r" (index << 3));
213 constexpr
214 GlobalDescriptorTable::GlobalDescriptorTable()
216 fTable {
217 Descriptor(),
218 Descriptor(DescriptorType::CodeExecuteOnly, true),
219 Descriptor(DescriptorType::DataWritable, true),
220 Descriptor(DescriptorType::DataWritable, false),
221 Descriptor(DescriptorType::CodeExecuteOnly, false),
224 static_assert(kDescriptorCount <= 8192,
225 "GDT cannot contain more than 8192 descriptors");
229 void
230 GlobalDescriptorTable::Load() const
232 struct [[gnu::packed]] {
233 uint16_t fLimit;
234 const void* fAddress;
235 } gdtDescriptor = {
236 sizeof(fTable) - 1,
237 static_cast<const void*>(fTable),
240 asm volatile("lgdt %0" : : "m" (gdtDescriptor));
244 unsigned
245 GlobalDescriptorTable::SetTSS(unsigned cpu, const TSSDescriptor& tss)
247 auto index = kFirstTSS + cpu * 2;
248 ASSERT(index + 1 < kDescriptorCount);
249 fTable[index] = tss.GetLower();
250 fTable[index + 1] = tss.GetUpper();
251 return index;
255 constexpr
256 InterruptDescriptor::InterruptDescriptor(uintptr_t isr, unsigned ist,
257 bool kernelOnly)
259 fBase0(isr),
260 fSelector(KERNEL_CODE_SELECTOR),
261 fIST(ist),
262 fReserved0(0),
263 fType(static_cast<unsigned>(InterruptDescriptorType::Interrupt)),
264 fReserved1(0),
265 fDPL(kernelOnly ? 0 : 3),
266 fPresent(1),
267 fBase1(isr >> 16),
268 fBase2(isr >> 32),
269 fReserved2(0)
271 static_assert(sizeof(InterruptDescriptor) == sizeof(uint64_t) * 2,
272 "Invalid InterruptDescriptor size.");
276 constexpr
277 InterruptDescriptor::InterruptDescriptor(uintptr_t isr)
279 InterruptDescriptor(isr, 0, true)
284 void
285 InterruptDescriptorTable::Load() const
287 struct [[gnu::packed]] {
288 uint16_t fLimit;
289 const void* fAddress;
290 } gdtDescriptor = {
291 sizeof(fTable) - 1,
292 static_cast<const void*>(fTable.fTable),
295 asm volatile("lidt %0" : : "m" (gdtDescriptor));
299 constexpr InterruptDescriptor
300 InterruptDescriptor::Generate(unsigned index)
302 return index == 3
303 ? InterruptDescriptor(uintptr_t(isr_array + index), 0, false)
304 : (index == 8
305 ? InterruptDescriptor(uintptr_t(isr_array + index), 1, true)
306 : InterruptDescriptor(uintptr_t(isr_array + index)));
310 // #pragma mark - Exception handlers
313 static void
314 x86_64_general_protection_fault(iframe* frame)
316 if (debug_debugger_running()) {
317 // Handle GPFs if there is a debugger fault handler installed, for
318 // non-canonical address accesses.
319 cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
320 if (cpu->fault_handler != 0) {
321 debug_set_page_fault_info(0, frame->ip, DEBUG_PAGE_FAULT_NO_INFO);
322 frame->ip = cpu->fault_handler;
323 frame->bp = cpu->fault_handler_stack_pointer;
324 return;
328 x86_unexpected_exception(frame);
332 static void
333 x86_64_stack_fault_exception(iframe* frame)
335 // Non-canonical address accesses which reference the stack cause a stack
336 // fault exception instead of GPF. However, we can treat it like a GPF.
337 x86_64_general_protection_fault(frame);
341 // #pragma mark -
344 void
345 x86_descriptors_preboot_init_percpu(kernel_args* args, int cpu)
347 new(&sGDT) GlobalDescriptorTable;
348 sGDT.Load();
350 memset(&gCPU[cpu].arch.tss, 0, sizeof(struct tss));
351 gCPU[cpu].arch.tss.io_map_base = sizeof(struct tss);
353 // Set up the double fault IST entry (see x86_descriptors_init()).
354 struct tss* tss = &gCPU[cpu].arch.tss;
355 size_t stackSize;
356 tss->ist1 = (addr_t)x86_get_double_fault_stack(cpu, &stackSize);
357 tss->ist1 += stackSize;
359 // Set up the descriptor for this TSS.
360 auto tssIndex = sGDT.SetTSS(cpu,
361 TSSDescriptor(uintptr_t(&gCPU[cpu].arch.tss), sizeof(struct tss)));
362 TSSDescriptor::LoadTSS(tssIndex);
364 new(&sIDT) InterruptDescriptorTable;
365 sIDT.Load();
369 void
370 x86_descriptors_init(kernel_args* args)
372 // Initialize the interrupt handler table.
373 interrupt_handler_function** table = gInterruptHandlerTable;
374 for (uint32 i = 0; i < ARCH_INTERRUPT_BASE; i++)
375 table[i] = x86_invalid_exception;
376 for (uint32 i = ARCH_INTERRUPT_BASE;
377 i < InterruptDescriptorTable::kDescriptorCount; i++) {
378 table[i] = x86_hardware_interrupt;
381 table[0] = x86_unexpected_exception; // Divide Error Exception (#DE)
382 table[1] = x86_handle_debug_exception; // Debug Exception (#DB)
383 table[2] = x86_fatal_exception; // NMI Interrupt
384 table[3] = x86_handle_breakpoint_exception; // Breakpoint Exception (#BP)
385 table[4] = x86_unexpected_exception; // Overflow Exception (#OF)
386 table[5] = x86_unexpected_exception; // BOUND Range Exceeded Exception (#BR)
387 table[6] = x86_unexpected_exception; // Invalid Opcode Exception (#UD)
388 table[7] = x86_fatal_exception; // Device Not Available Exception (#NM)
389 table[8] = x86_fatal_exception; // Double Fault Exception (#DF)
390 table[9] = x86_fatal_exception; // Coprocessor Segment Overrun
391 table[10] = x86_fatal_exception; // Invalid TSS Exception (#TS)
392 table[11] = x86_fatal_exception; // Segment Not Present (#NP)
393 table[12] = x86_64_stack_fault_exception; // Stack Fault Exception (#SS)
394 table[13] = x86_64_general_protection_fault; // General Protection Exception (#GP)
395 table[14] = x86_page_fault_exception; // Page-Fault Exception (#PF)
396 table[16] = x86_unexpected_exception; // x87 FPU Floating-Point Error (#MF)
397 table[17] = x86_unexpected_exception; // Alignment Check Exception (#AC)
398 table[18] = x86_fatal_exception; // Machine-Check Exception (#MC)
399 table[19] = x86_unexpected_exception; // SIMD Floating-Point Exception (#XF)