kvm: libkvm: start removing kernel memory functions
[qemu-kvm/fedora.git] / target-arm / op_helper.c
blob2d7be9c04f018fafe61c184db860e077a1e251b6
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 #include "helpers.h"
23 #define SIGNBIT (uint32_t)0x80000000
24 #define SIGNBIT64 ((uint64_t)1 << 63)
26 void raise_exception(int tt)
28 env->exception_index = tt;
29 cpu_loop_exit();
32 /* thread support */
34 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
36 void cpu_lock(void)
38 spin_lock(&global_cpu_lock);
41 void cpu_unlock(void)
43 spin_unlock(&global_cpu_lock);
46 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
47 uint32_t rn, uint32_t maxindex)
49 uint32_t val;
50 uint32_t tmp;
51 int index;
52 int shift;
53 uint64_t *table;
54 table = (uint64_t *)&env->vfp.regs[rn];
55 val = 0;
56 for (shift = 0; shift < 32; shift += 8) {
57 index = (ireg >> shift) & 0xff;
58 if (index < maxindex) {
59 tmp = (table[index >> 3] >> (index & 7)) & 0xff;
60 val |= tmp << shift;
61 } else {
62 val |= def & (0xff << shift);
65 return val;
68 #if !defined(CONFIG_USER_ONLY)
70 #define MMUSUFFIX _mmu
72 #define SHIFT 0
73 #include "softmmu_template.h"
75 #define SHIFT 1
76 #include "softmmu_template.h"
78 #define SHIFT 2
79 #include "softmmu_template.h"
81 #define SHIFT 3
82 #include "softmmu_template.h"
84 /* try to fill the TLB and return an exception if error. If retaddr is
85 NULL, it means that the function was called in C code (i.e. not
86 from generated code or from helper.c) */
87 /* XXX: fix it to restore all registers */
88 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
90 TranslationBlock *tb;
91 CPUState *saved_env;
92 unsigned long pc;
93 int ret;
95 /* XXX: hack to restore env in all cases, even if not called from
96 generated code */
97 saved_env = env;
98 env = cpu_single_env;
99 ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
100 if (unlikely(ret)) {
101 if (retaddr) {
102 /* now we have a real cpu fault */
103 pc = (unsigned long)retaddr;
104 tb = tb_find_pc(pc);
105 if (tb) {
106 /* the PC is inside the translated code. It means that we have
107 a virtual CPU fault */
108 cpu_restore_state(tb, env, pc, NULL);
111 raise_exception(env->exception_index);
113 env = saved_env;
115 #endif
117 /* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
118 instructions into helper.c */
119 uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
121 uint32_t res = a + b;
122 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
123 env->QF = 1;
124 return res;
127 uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b)
129 uint32_t res = a + b;
130 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
131 env->QF = 1;
132 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
134 return res;
137 uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b)
139 uint32_t res = a - b;
140 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
141 env->QF = 1;
142 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
144 return res;
147 uint32_t HELPER(double_saturate)(int32_t val)
149 uint32_t res;
150 if (val >= 0x40000000) {
151 res = ~SIGNBIT;
152 env->QF = 1;
153 } else if (val <= (int32_t)0xc0000000) {
154 res = SIGNBIT;
155 env->QF = 1;
156 } else {
157 res = val << 1;
159 return res;
162 uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b)
164 uint32_t res = a + b;
165 if (res < a) {
166 env->QF = 1;
167 res = ~0;
169 return res;
172 uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b)
174 uint32_t res = a - b;
175 if (res > a) {
176 env->QF = 1;
177 res = 0;
179 return res;
182 /* Signed saturation. */
183 static inline uint32_t do_ssat(int32_t val, int shift)
185 int32_t top;
186 uint32_t mask;
188 top = val >> shift;
189 mask = (1u << shift) - 1;
190 if (top > 0) {
191 env->QF = 1;
192 return mask;
193 } else if (top < -1) {
194 env->QF = 1;
195 return ~mask;
197 return val;
200 /* Unsigned saturation. */
201 static inline uint32_t do_usat(int32_t val, int shift)
203 uint32_t max;
205 max = (1u << shift) - 1;
206 if (val < 0) {
207 env->QF = 1;
208 return 0;
209 } else if (val > max) {
210 env->QF = 1;
211 return max;
213 return val;
216 /* Signed saturate. */
217 uint32_t HELPER(ssat)(uint32_t x, uint32_t shift)
219 return do_ssat(x, shift);
222 /* Dual halfword signed saturate. */
223 uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift)
225 uint32_t res;
227 res = (uint16_t)do_ssat((int16_t)x, shift);
228 res |= do_ssat(((int32_t)x) >> 16, shift) << 16;
229 return res;
232 /* Unsigned saturate. */
233 uint32_t HELPER(usat)(uint32_t x, uint32_t shift)
235 return do_usat(x, shift);
238 /* Dual halfword unsigned saturate. */
239 uint32_t HELPER(usat16)(uint32_t x, uint32_t shift)
241 uint32_t res;
243 res = (uint16_t)do_usat((int16_t)x, shift);
244 res |= do_usat(((int32_t)x) >> 16, shift) << 16;
245 return res;
248 void HELPER(wfi)(void)
250 env->exception_index = EXCP_HLT;
251 env->halted = 1;
252 cpu_loop_exit();
255 void HELPER(exception)(uint32_t excp)
257 env->exception_index = excp;
258 cpu_loop_exit();
261 uint32_t HELPER(cpsr_read)(void)
263 return cpsr_read(env) & ~CPSR_EXEC;
266 void HELPER(cpsr_write)(uint32_t val, uint32_t mask)
268 cpsr_write(env, val, mask);
271 /* Access to user mode registers from privileged modes. */
272 uint32_t HELPER(get_user_reg)(uint32_t regno)
274 uint32_t val;
276 if (regno == 13) {
277 val = env->banked_r13[0];
278 } else if (regno == 14) {
279 val = env->banked_r14[0];
280 } else if (regno >= 8
281 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
282 val = env->usr_regs[regno - 8];
283 } else {
284 val = env->regs[regno];
286 return val;
289 void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
291 if (regno == 13) {
292 env->banked_r13[0] = val;
293 } else if (regno == 14) {
294 env->banked_r14[0] = val;
295 } else if (regno >= 8
296 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
297 env->usr_regs[regno - 8] = val;
298 } else {
299 env->regs[regno] = val;
303 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
304 The only way to do that in TCG is a conditional branch, which clobbers
305 all our temporaries. For now implement these as helper functions. */
307 uint32_t HELPER (add_cc)(uint32_t a, uint32_t b)
309 uint32_t result;
310 result = T0 + T1;
311 env->NF = env->ZF = result;
312 env->CF = result < a;
313 env->VF = (a ^ b ^ -1) & (a ^ result);
314 return result;
317 uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
319 uint32_t result;
320 if (!env->CF) {
321 result = a + b;
322 env->CF = result < a;
323 } else {
324 result = a + b + 1;
325 env->CF = result <= a;
327 env->VF = (a ^ b ^ -1) & (a ^ result);
328 env->NF = env->ZF = result;
329 return result;
332 uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
334 uint32_t result;
335 result = a - b;
336 env->NF = env->ZF = result;
337 env->CF = a >= b;
338 env->VF = (a ^ b) & (a ^ result);
339 return result;
342 uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
344 uint32_t result;
345 if (!env->CF) {
346 result = a - b - 1;
347 env->CF = a > b;
348 } else {
349 result = a - b;
350 env->CF = a >= b;
352 env->VF = (a ^ b) & (a ^ result);
353 env->NF = env->ZF = result;
354 return result;
357 /* Similarly for variable shift instructions. */
359 uint32_t HELPER(shl)(uint32_t x, uint32_t i)
361 int shift = i & 0xff;
362 if (shift >= 32)
363 return 0;
364 return x << shift;
367 uint32_t HELPER(shr)(uint32_t x, uint32_t i)
369 int shift = i & 0xff;
370 if (shift >= 32)
371 return 0;
372 return (uint32_t)x >> shift;
375 uint32_t HELPER(sar)(uint32_t x, uint32_t i)
377 int shift = i & 0xff;
378 if (shift >= 32)
379 shift = 31;
380 return (int32_t)x >> shift;
383 uint32_t HELPER(ror)(uint32_t x, uint32_t i)
385 int shift = i & 0xff;
386 if (shift == 0)
387 return x;
388 return (x >> shift) | (x << (32 - shift));
391 uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
393 int shift = i & 0xff;
394 if (shift >= 32) {
395 if (shift == 32)
396 env->CF = x & 1;
397 else
398 env->CF = 0;
399 return 0;
400 } else if (shift != 0) {
401 env->CF = (x >> (32 - shift)) & 1;
402 return x << shift;
404 return x;
407 uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
409 int shift = i & 0xff;
410 if (shift >= 32) {
411 if (shift == 32)
412 env->CF = (x >> 31) & 1;
413 else
414 env->CF = 0;
415 return 0;
416 } else if (shift != 0) {
417 env->CF = (x >> (shift - 1)) & 1;
418 return x >> shift;
420 return x;
423 uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
425 int shift = i & 0xff;
426 if (shift >= 32) {
427 env->CF = (x >> 31) & 1;
428 return (int32_t)x >> 31;
429 } else if (shift != 0) {
430 env->CF = (x >> (shift - 1)) & 1;
431 return (int32_t)x >> shift;
433 return x;
436 uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
438 int shift1, shift;
439 shift1 = i & 0xff;
440 shift = shift1 & 0x1f;
441 if (shift == 0) {
442 if (shift1 != 0)
443 env->CF = (x >> 31) & 1;
444 return x;
445 } else {
446 env->CF = (x >> (shift - 1)) & 1;
447 return ((uint32_t)x >> shift) | (x << (32 - shift));
451 uint64_t HELPER(neon_add_saturate_s64)(uint64_t src1, uint64_t src2)
453 uint64_t res;
455 res = src1 + src2;
456 if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
457 env->QF = 1;
458 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
460 return res;
463 uint64_t HELPER(neon_add_saturate_u64)(uint64_t src1, uint64_t src2)
465 uint64_t res;
467 res = src1 + src2;
468 if (res < src1) {
469 env->QF = 1;
470 res = ~(uint64_t)0;
472 return res;
475 uint64_t HELPER(neon_sub_saturate_s64)(uint64_t src1, uint64_t src2)
477 uint64_t res;
479 res = src1 - src2;
480 if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
481 env->QF = 1;
482 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
484 return res;
487 uint64_t HELPER(neon_sub_saturate_u64)(uint64_t src1, uint64_t src2)
489 uint64_t res;
491 if (src1 < src2) {
492 env->QF = 1;
493 res = 0;
494 } else {
495 res = src1 - src2;
497 return res;
500 /* These need to return a pair of value, so still use T0/T1. */
501 /* Transpose. Argument order is rather strange to avoid special casing
502 the tranlation code.
503 On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */
504 void HELPER(neon_trn_u8)(void)
506 uint32_t rd;
507 uint32_t rm;
508 rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff);
509 rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
510 T0 = rd;
511 T1 = rm;
512 FORCE_RET();
515 void HELPER(neon_trn_u16)(void)
517 uint32_t rd;
518 uint32_t rm;
519 rd = (T0 << 16) | (T1 & 0xffff);
520 rm = (T1 >> 16) | (T0 & 0xffff0000);
521 T0 = rd;
522 T1 = rm;
523 FORCE_RET();
526 /* Worker routines for zip and unzip. */
527 void HELPER(neon_unzip_u8)(void)
529 uint32_t rd;
530 uint32_t rm;
531 rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
532 | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000);
533 rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
534 | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
535 T0 = rd;
536 T1 = rm;
537 FORCE_RET();
540 void HELPER(neon_zip_u8)(void)
542 uint32_t rd;
543 uint32_t rm;
544 rd = (T0 & 0xff) | ((T1 << 8) & 0xff00)
545 | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000);
546 rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00)
547 | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
548 T0 = rd;
549 T1 = rm;
550 FORCE_RET();
553 void HELPER(neon_zip_u16)(void)
555 uint32_t tmp;
557 tmp = (T0 & 0xffff) | (T1 << 16);
558 T1 = (T1 & 0xffff0000) | (T0 >> 16);
559 T0 = tmp;
560 FORCE_RET();