kvm: build: Make "make sync" get correct header files with latest Linux source
[kvm-userspace.git] / qemu / softmmu_header.h
blob512e5a2e4eb8551a84b6380060b77ce2c56d3e7e
1 /*
2 * Software MMU support
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #if DATA_SIZE == 8
21 #define SUFFIX q
22 #define USUFFIX q
23 #define DATA_TYPE uint64_t
24 #elif DATA_SIZE == 4
25 #define SUFFIX l
26 #define USUFFIX l
27 #define DATA_TYPE uint32_t
28 #elif DATA_SIZE == 2
29 #define SUFFIX w
30 #define USUFFIX uw
31 #define DATA_TYPE uint16_t
32 #define DATA_STYPE int16_t
33 #elif DATA_SIZE == 1
34 #define SUFFIX b
35 #define USUFFIX ub
36 #define DATA_TYPE uint8_t
37 #define DATA_STYPE int8_t
38 #else
39 #error unsupported data size
40 #endif
42 #if ACCESS_TYPE < (NB_MMU_MODES)
44 #define CPU_MMU_INDEX ACCESS_TYPE
45 #define MMUSUFFIX _mmu
47 #elif ACCESS_TYPE == (NB_MMU_MODES)
49 #define CPU_MMU_INDEX (cpu_mmu_index(env))
50 #define MMUSUFFIX _mmu
52 #elif ACCESS_TYPE == (NB_MMU_MODES + 1)
54 #define CPU_MMU_INDEX (cpu_mmu_index(env))
55 #define MMUSUFFIX _cmmu
57 #else
58 #error invalid ACCESS_TYPE
59 #endif
61 #if DATA_SIZE == 8
62 #define RES_TYPE uint64_t
63 #else
64 #define RES_TYPE int
65 #endif
67 #if ACCESS_TYPE == (NB_MMU_MODES + 1)
68 #define ADDR_READ addr_code
69 #else
70 #define ADDR_READ addr_read
71 #endif
73 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
74 int mmu_idx);
75 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int mmu_idx);
77 #if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
78 (ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
80 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
82 int res;
84 asm volatile ("movl %1, %%edx\n"
85 "movl %1, %%eax\n"
86 "shrl %3, %%edx\n"
87 "andl %4, %%eax\n"
88 "andl %2, %%edx\n"
89 "leal %5(%%edx, %%ebp), %%edx\n"
90 "cmpl (%%edx), %%eax\n"
91 "movl %1, %%eax\n"
92 "je 1f\n"
93 "movl %6, %%edx\n"
94 "call %7\n"
95 "movl %%eax, %0\n"
96 "jmp 2f\n"
97 "1:\n"
98 "addl 12(%%edx), %%eax\n"
99 #if DATA_SIZE == 1
100 "movzbl (%%eax), %0\n"
101 #elif DATA_SIZE == 2
102 "movzwl (%%eax), %0\n"
103 #elif DATA_SIZE == 4
104 "movl (%%eax), %0\n"
105 #else
106 #error unsupported size
107 #endif
108 "2:\n"
109 : "=r" (res)
110 : "r" (ptr),
111 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
112 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
113 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
114 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
115 "i" (CPU_MMU_INDEX),
116 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
117 : "%eax", "%ecx", "%edx", "memory", "cc");
118 return res;
121 #if DATA_SIZE <= 2
122 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
124 int res;
126 asm volatile ("movl %1, %%edx\n"
127 "movl %1, %%eax\n"
128 "shrl %3, %%edx\n"
129 "andl %4, %%eax\n"
130 "andl %2, %%edx\n"
131 "leal %5(%%edx, %%ebp), %%edx\n"
132 "cmpl (%%edx), %%eax\n"
133 "movl %1, %%eax\n"
134 "je 1f\n"
135 "movl %6, %%edx\n"
136 "call %7\n"
137 #if DATA_SIZE == 1
138 "movsbl %%al, %0\n"
139 #elif DATA_SIZE == 2
140 "movswl %%ax, %0\n"
141 #else
142 #error unsupported size
143 #endif
144 "jmp 2f\n"
145 "1:\n"
146 "addl 12(%%edx), %%eax\n"
147 #if DATA_SIZE == 1
148 "movsbl (%%eax), %0\n"
149 #elif DATA_SIZE == 2
150 "movswl (%%eax), %0\n"
151 #else
152 #error unsupported size
153 #endif
154 "2:\n"
155 : "=r" (res)
156 : "r" (ptr),
157 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
158 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
159 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
160 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
161 "i" (CPU_MMU_INDEX),
162 "m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
163 : "%eax", "%ecx", "%edx", "memory", "cc");
164 return res;
166 #endif
168 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
170 asm volatile ("movl %0, %%edx\n"
171 "movl %0, %%eax\n"
172 "shrl %3, %%edx\n"
173 "andl %4, %%eax\n"
174 "andl %2, %%edx\n"
175 "leal %5(%%edx, %%ebp), %%edx\n"
176 "cmpl (%%edx), %%eax\n"
177 "movl %0, %%eax\n"
178 "je 1f\n"
179 #if DATA_SIZE == 1
180 "movzbl %b1, %%edx\n"
181 #elif DATA_SIZE == 2
182 "movzwl %w1, %%edx\n"
183 #elif DATA_SIZE == 4
184 "movl %1, %%edx\n"
185 #else
186 #error unsupported size
187 #endif
188 "movl %6, %%ecx\n"
189 "call %7\n"
190 "jmp 2f\n"
191 "1:\n"
192 "addl 8(%%edx), %%eax\n"
193 #if DATA_SIZE == 1
194 "movb %b1, (%%eax)\n"
195 #elif DATA_SIZE == 2
196 "movw %w1, (%%eax)\n"
197 #elif DATA_SIZE == 4
198 "movl %1, (%%eax)\n"
199 #else
200 #error unsupported size
201 #endif
202 "2:\n"
204 : "r" (ptr),
205 #if DATA_SIZE == 1
206 "q" (v),
207 #else
208 "r" (v),
209 #endif
210 "i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
211 "i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
212 "i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
213 "m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
214 "i" (CPU_MMU_INDEX),
215 "m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
216 : "%eax", "%ecx", "%edx", "memory", "cc");
219 #else
221 /* generic load/store macros */
223 static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
225 int page_index;
226 RES_TYPE res;
227 target_ulong addr;
228 unsigned long physaddr;
229 int mmu_idx;
231 addr = ptr;
232 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
233 mmu_idx = CPU_MMU_INDEX;
234 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
235 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
236 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
237 } else {
238 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
239 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
241 return res;
244 #if DATA_SIZE <= 2
245 static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
247 int res, page_index;
248 target_ulong addr;
249 unsigned long physaddr;
250 int mmu_idx;
252 addr = ptr;
253 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
254 mmu_idx = CPU_MMU_INDEX;
255 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
256 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
257 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
258 } else {
259 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
260 res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
262 return res;
264 #endif
266 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
268 /* generic store macro */
270 static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE v)
272 int page_index;
273 target_ulong addr;
274 unsigned long physaddr;
275 int mmu_idx;
277 addr = ptr;
278 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
279 mmu_idx = CPU_MMU_INDEX;
280 if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
281 (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
282 glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
283 } else {
284 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
285 glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
289 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
291 #endif /* !asm */
293 #if ACCESS_TYPE != (NB_MMU_MODES + 1)
295 #if DATA_SIZE == 8
296 static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
298 union {
299 float64 d;
300 uint64_t i;
301 } u;
302 u.i = glue(ldq, MEMSUFFIX)(ptr);
303 return u.d;
306 static inline void glue(stfq, MEMSUFFIX)(target_ulong ptr, float64 v)
308 union {
309 float64 d;
310 uint64_t i;
311 } u;
312 u.d = v;
313 glue(stq, MEMSUFFIX)(ptr, u.i);
315 #endif /* DATA_SIZE == 8 */
317 #if DATA_SIZE == 4
318 static inline float32 glue(ldfl, MEMSUFFIX)(target_ulong ptr)
320 union {
321 float32 f;
322 uint32_t i;
323 } u;
324 u.i = glue(ldl, MEMSUFFIX)(ptr);
325 return u.f;
328 static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
330 union {
331 float32 f;
332 uint32_t i;
333 } u;
334 u.f = v;
335 glue(stl, MEMSUFFIX)(ptr, u.i);
337 #endif /* DATA_SIZE == 4 */
339 #endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
341 #undef RES_TYPE
342 #undef DATA_TYPE
343 #undef DATA_STYPE
344 #undef SUFFIX
345 #undef USUFFIX
346 #undef DATA_SIZE
347 #undef CPU_MMU_INDEX
348 #undef MMUSUFFIX
349 #undef ADDR_READ