add a missing section header table index conversion
[tangerine.git] / arch / x86_64-pc / bootstrap / cpu.h
bloba8f4d830e43c1909339ee8f5252b01a9b94ad6f2
1 #ifndef CPU_H_
2 #define CPU_H_
4 /*
5 Copyright (C) 2006 The AROS Development Team. All rights reserved.
6 $Id$
8 Desc: Nice macros for low-level assembly support in C/C++ languages
9 */
11 /* Segment registers */
12 #define SEG_SUPER_CS 0x08
13 #define SEG_SUPER_DS 0x10
14 #define SEG_USER_CS32 0x18
15 #define SEG_USER_CS64 0x28
16 #define SEG_USER_DS 0x20
17 #define SEG_TSS 0x30
19 /* CR0 bits */
20 #define _CR0_PE_B 0 /* RW: Protected mode enable */
21 #define _CR0_MP_B 1 /* RW: Monitor FPU? If 1 then #NM exception may be generated */
22 #define _CR0_EM_B 2 /* RW: Eulate FPU */
23 #define _CR0_TS_B 3 /* RW: Task switched */
24 #define _CR0_ET_B 4 /* RO: Exception type */
25 #define _CR0_NE_B 5 /* RW: Numeric error */
26 #define _CR0_WP_B 16 /* RW: Write protect for RO pages in supervisor mode */
27 #define _CR0_AM_B 18 /* RW: Require data alignment */
28 #define _CR0_NW_B 29 /* RW: IGNORED: Not writethrough */
29 #define _CR0_CD_B 30 /* RW: Cache disable */
30 #define _CR0_PG_B 31 /* RW: Paging enable */
32 #define _CR0_PE (1 << _CR0_PE_B)
33 #define _CR0_MP (1 << _CR0_MP_B)
34 #define _CR0_EM (1 << _CR0_EM_B)
35 #define _CR0_TS (1 << _CR0_TS_B)
36 #define _CR0_ET (1 << _CR0_ET_B)
37 #define _CR0_NE (1 << _CR0_NE_B)
38 #define _CR0_WP (1 << _CR0_WP_B)
39 #define _CR0_AM (1 << _CR0_AM_B)
40 #define _CR0_NW (1 << _CR0_NW_B)
41 #define _CR0_CD (1 << _CR0_CD_B)
42 #define _CR0_PG (1 << _CR0_PG_B)
44 /* CR3 bits */
45 #define _CR3_PWT_B 3 /* RW: Page writethrough */
46 #define _CR3_PCD_B 4 /* RW: Cache disable */
48 #define _CR3_PWT (1 << _CR3_PWT_B)
49 #define _CR3_PCD (1 << _CR3_PCD_B)
51 /* CR4 bits */
52 #define _CR4_VME_B 0 /* RW: Virtual-8086 enable */
53 #define _CR4_PVI_B 1 /* RW: Protected mode virtual interrupts */
54 #define _CR4_TSD_B 2 /* RW: Time stamp disable for usermode */
55 #define _CR4_DE_B 3 /* RW: Debug extensions */
56 #define _CR4_PSE_B 4 /* RW: Page size extensions */
57 #define _CR4_PAE_B 5 /* RW: Physical-address extensions */
58 #define _CR4_MCE_B 6 /* RW: Machine check enable */
59 #define _CR4_PGE_B 7 /* RW: Page-Global enable */
60 #define _CR4_PCE_B 8 /* RW: Performance monitoring counter enable */
61 #define _CR4_OSFXSR_B 9 /* RW: Operating system fxsave/fsrstor support */
62 #define _CR4_OSXMMEXCPT_B 10 /*RW: Operating system unmasked exception support */
64 #define _CR4_VME (1 << _CR4_VME_B)
65 #define _CR4_PVI (1 << _CR4_PVI_B)
66 #define _CR4_TSD (1 << _CR4_TSD_B)
67 #define _CR4_DE (1 << _CR4_DE_B)
68 #define _CR4_PSE (1 << _CR4_PSE_B)
69 #define _CR4_PAE (1 << _CR4_PAE_B)
70 #define _CR4_MCE (1 << _CR4_MCE_B)
71 #define _CR4_PGE (1 << _CR4_PGE_B)
72 #define _CR4_PCE (1 << _CR4_PCE_B)
73 #define _CR4_OSFXSR (1 << _CR4_OSFXSR_B)
74 #define _CR4_OSXMMEXCPT (1 << _CR4_OSXMMEXCPT_B)
76 /* EFER */
77 #define EFER 0xc0000080 /* EFER number for rsmsr/wrmsr */
78 #define _EFER_SCE_B 0 /* RW: System call extensions */
79 #define _EFER_LME_B 8 /* RW: Long mode enable */
80 #define _EFER_LMA_B 10 /* RW: Long mode activated */
81 #define _EFER_NXE_B 11 /* RW: No-execute bit enable */
82 #define _EFER_FFXSR_B 14 /* RW: Fast fxsave/fxrstor */
84 #define _EFER_SCE (1 << _EFER_SCE_B)
85 #define _EFER_LME (1 << _EFER_LME_B)
86 #define _EFER_LMA (1 << _EFER_LMA_B)
87 #define _EFER_NXE (1 << _EFER_NXE_B)
88 #define _EFER_FFXSR (1 << _EFER_FFXSR_B)
90 struct int_gate_64bit {
91 unsigned short offset_low;
92 unsigned short selector;
93 unsigned ist:3, __pad0:5, type:5, dpl:2, p:1;
94 unsigned short offset_mid;
95 unsigned int offset_high;
96 unsigned int __pad1;
97 } __attribute__((packed));
99 struct segment_desc {
100 unsigned short limit_low;
101 unsigned short base_low;
102 unsigned base_mid:8, type:5, dpl:2, p:1;
103 unsigned limit_high:4, avl:1, l:1, d:1, g:1, base_high:8;
104 } __attribute__((packed));
106 struct segment_ext {
107 unsigned int base_ext;
108 unsigned int __pad0;
109 } __attribute__((packed));
111 struct tss_64bit {
112 unsigned int __pad0;
113 unsigned long long rsp0;
114 unsigned long long rsp1;
115 unsigned long long rsp2;
116 unsigned long long __pad1;
117 unsigned long long ist1;
118 unsigned long long ist2;
119 unsigned long long ist3;
120 unsigned long long ist4;
121 unsigned long long ist5;
122 unsigned long long ist6;
123 unsigned long long ist7;
124 unsigned long long __pad2;
125 unsigned short __pad3;
126 unsigned short iopb;
127 unsigned int bmp[];
128 } __attribute__((packed));
130 struct PML4E {
131 unsigned p:1,rw:1,us:1,pwt:1,pcd:1,a:1,__pad0:1,mbz:2,avl:3,base_low:20;
132 unsigned base_high:20,avail:11,nx:1;
133 } __attribute__((packed));
135 struct PDPE {
136 unsigned p:1,rw:1,us:1,pwt:1,pcd:1,a:1,__pad0:1,mbz:2,avl:3,base_low:20;
137 unsigned base_high:20,avail:11,nx:1;
138 } __attribute__((packed));
140 struct PDE4K {
141 unsigned p:1,rw:1,us:1,pwt:1,pcd:1,a:1,__pad0:1,ps:1,_pad1:1,avl:3,base_low:20;
142 unsigned base_high:20,avail:11,nx:1;
143 } __attribute__((packed));
145 struct PDE2M {
146 unsigned p:1,rw:1,us:1,pwt:1,pcd:1,a:1,d:1,ps:1,g:1,avl:3,pat:1,base_low:19;
147 unsigned base_high:20,avail:11,nx:1;
148 } __attribute__((packed));
150 struct PTE {
151 unsigned p:1,rw:1,us:1,pwt:1,pcd:1,a:1,d:1,pat:1,g:1,avl:3,base_low:20;
152 unsigned base_high:20,avail:11,nx:1;
153 } __attribute__((packed));
155 #define _ljmp(seg, addr) \
156 do { asm volatile("ljmp $" #seg ", $" #addr); }while(0)
157 #define ljmp(s, a) _ljmp(s, a)
159 #define _ljmp_arg(seg, addr, arg) \
160 do { asm volatile("ljmp $" #seg ", $" #addr ::"D"(arg)); }while(0)
161 #define ljmp_arg(s, a, p) _ljmp_arg(s, a, p)
163 #define rdcr(reg) \
164 ({ long val; asm volatile("mov %%" #reg ",%0":"=r"(val)); val; })
166 #define wrcr(reg, val) \
167 do { asm volatile("mov %0,%%" #reg::"r"(val)); } while(0)
169 #define cpuid(num, eax, ebx, ecx, edx) \
170 do { asm volatile("cpuid":"=a"(eax),"=b"(ebx),"=c"(ecx),"=d"(edx):"a"(num)); } while(0)
172 inline void __attribute__((always_inline)) rdmsr(unsigned int msr_no, unsigned int *ret_lo, unsigned int *ret_hi)
174 unsigned int ret1,ret2;
175 asm volatile("rdmsr":"=a"(ret1),"=d"(ret2):"c"(msr_no));
176 *ret_lo=ret1;
177 *ret_hi=ret2;
180 inline long long __attribute__((always_inline)) rdmsrq(unsigned int msr_no)
182 unsigned int ret1,ret2;
183 asm volatile("rdmsr":"=a"(ret1),"=d"(ret2):"c"(msr_no));
184 return ((long long)ret1 | ((long long)ret2 << 32));
187 inline void __attribute__((always_inline)) wrmsr(unsigned int msr_no, unsigned int val_lo, unsigned int val_hi)
189 asm volatile("wrmsr"::"a"(val_lo),"d"(val_hi),"c"(msr_no));
192 inline void __attribute__((always_inline)) wrmsrq(unsigned int msr_no, unsigned long long val)
194 asm volatile("wrmsr"::"a"((unsigned int)(val & 0xffffffff)),"d"((unsigned int)(val >> 32)),"c"(msr_no));
197 #endif /*CPU_H_*/