pseries: Implement IOMMU and DMA for PAPR PCI devices
[qemu/opensuse.git] / tcg / tcg.c
blob8386b70abd24a4f67f4b46848dc8f56c6f49ae4d
1 /*
2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
29 #include "config.h"
31 /* Define to jump the ELF file used to communicate with GDB. */
32 #undef DEBUG_JIT
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
36 #define NDEBUG
37 #endif
39 #include "qemu-common.h"
40 #include "cache-utils.h"
41 #include "host-utils.h"
42 #include "qemu-timer.h"
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
46 instructions */
47 #define NO_CPU_IO_DEFS
48 #include "cpu.h"
50 #include "tcg-op.h"
52 #if TCG_TARGET_REG_BITS == 64
53 # define ELF_CLASS ELFCLASS64
54 #else
55 # define ELF_CLASS ELFCLASS32
56 #endif
57 #ifdef HOST_WORDS_BIGENDIAN
58 # define ELF_DATA ELFDATA2MSB
59 #else
60 # define ELF_DATA ELFDATA2LSB
61 #endif
63 #include "elf.h"
65 #if defined(CONFIG_USE_GUEST_BASE) && !defined(TCG_TARGET_HAS_GUEST_BASE)
66 #error GUEST_BASE not supported on this host.
67 #endif
69 /* Forward declarations for functions declared in tcg-target.c and used here. */
70 static void tcg_target_init(TCGContext *s);
71 static void tcg_target_qemu_prologue(TCGContext *s);
72 static void patch_reloc(uint8_t *code_ptr, int type,
73 tcg_target_long value, tcg_target_long addend);
75 static void tcg_register_jit_int(void *buf, size_t size,
76 void *debug_frame, size_t debug_frame_size)
77 __attribute__((unused));
79 /* Forward declarations for functions declared and used in tcg-target.c. */
80 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
81 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
82 tcg_target_long arg2);
83 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
84 static void tcg_out_movi(TCGContext *s, TCGType type,
85 TCGReg ret, tcg_target_long arg);
86 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
87 const int *const_args);
88 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
89 tcg_target_long arg2);
90 static int tcg_target_const_match(tcg_target_long val,
91 const TCGArgConstraint *arg_ct);
92 static int tcg_target_get_call_iarg_regs_count(int flags);
94 TCGOpDef tcg_op_defs[] = {
95 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
96 #include "tcg-opc.h"
97 #undef DEF
99 const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs);
101 static TCGRegSet tcg_target_available_regs[2];
102 static TCGRegSet tcg_target_call_clobber_regs;
104 /* XXX: move that inside the context */
105 uint16_t *gen_opc_ptr;
106 TCGArg *gen_opparam_ptr;
108 static inline void tcg_out8(TCGContext *s, uint8_t v)
110 *s->code_ptr++ = v;
113 static inline void tcg_out16(TCGContext *s, uint16_t v)
115 *(uint16_t *)s->code_ptr = v;
116 s->code_ptr += 2;
119 static inline void tcg_out32(TCGContext *s, uint32_t v)
121 *(uint32_t *)s->code_ptr = v;
122 s->code_ptr += 4;
125 /* label relocation processing */
127 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
128 int label_index, long addend)
130 TCGLabel *l;
131 TCGRelocation *r;
133 l = &s->labels[label_index];
134 if (l->has_value) {
135 /* FIXME: This may break relocations on RISC targets that
136 modify instruction fields in place. The caller may not have
137 written the initial value. */
138 patch_reloc(code_ptr, type, l->u.value, addend);
139 } else {
140 /* add a new relocation entry */
141 r = tcg_malloc(sizeof(TCGRelocation));
142 r->type = type;
143 r->ptr = code_ptr;
144 r->addend = addend;
145 r->next = l->u.first_reloc;
146 l->u.first_reloc = r;
150 static void tcg_out_label(TCGContext *s, int label_index, void *ptr)
152 TCGLabel *l;
153 TCGRelocation *r;
154 tcg_target_long value = (tcg_target_long)ptr;
156 l = &s->labels[label_index];
157 if (l->has_value)
158 tcg_abort();
159 r = l->u.first_reloc;
160 while (r != NULL) {
161 patch_reloc(r->ptr, r->type, value, r->addend);
162 r = r->next;
164 l->has_value = 1;
165 l->u.value = value;
168 int gen_new_label(void)
170 TCGContext *s = &tcg_ctx;
171 int idx;
172 TCGLabel *l;
174 if (s->nb_labels >= TCG_MAX_LABELS)
175 tcg_abort();
176 idx = s->nb_labels++;
177 l = &s->labels[idx];
178 l->has_value = 0;
179 l->u.first_reloc = NULL;
180 return idx;
183 #include "tcg-target.c"
185 /* pool based memory allocation */
186 void *tcg_malloc_internal(TCGContext *s, int size)
188 TCGPool *p;
189 int pool_size;
191 if (size > TCG_POOL_CHUNK_SIZE) {
192 /* big malloc: insert a new pool (XXX: could optimize) */
193 p = g_malloc(sizeof(TCGPool) + size);
194 p->size = size;
195 p->next = s->pool_first_large;
196 s->pool_first_large = p;
197 return p->data;
198 } else {
199 p = s->pool_current;
200 if (!p) {
201 p = s->pool_first;
202 if (!p)
203 goto new_pool;
204 } else {
205 if (!p->next) {
206 new_pool:
207 pool_size = TCG_POOL_CHUNK_SIZE;
208 p = g_malloc(sizeof(TCGPool) + pool_size);
209 p->size = pool_size;
210 p->next = NULL;
211 if (s->pool_current)
212 s->pool_current->next = p;
213 else
214 s->pool_first = p;
215 } else {
216 p = p->next;
220 s->pool_current = p;
221 s->pool_cur = p->data + size;
222 s->pool_end = p->data + p->size;
223 return p->data;
226 void tcg_pool_reset(TCGContext *s)
228 TCGPool *p, *t;
229 for (p = s->pool_first_large; p; p = t) {
230 t = p->next;
231 g_free(p);
233 s->pool_first_large = NULL;
234 s->pool_cur = s->pool_end = NULL;
235 s->pool_current = NULL;
238 void tcg_context_init(TCGContext *s)
240 int op, total_args, n;
241 TCGOpDef *def;
242 TCGArgConstraint *args_ct;
243 int *sorted_args;
245 memset(s, 0, sizeof(*s));
246 s->temps = s->static_temps;
247 s->nb_globals = 0;
249 /* Count total number of arguments and allocate the corresponding
250 space */
251 total_args = 0;
252 for(op = 0; op < NB_OPS; op++) {
253 def = &tcg_op_defs[op];
254 n = def->nb_iargs + def->nb_oargs;
255 total_args += n;
258 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
259 sorted_args = g_malloc(sizeof(int) * total_args);
261 for(op = 0; op < NB_OPS; op++) {
262 def = &tcg_op_defs[op];
263 def->args_ct = args_ct;
264 def->sorted_args = sorted_args;
265 n = def->nb_iargs + def->nb_oargs;
266 sorted_args += n;
267 args_ct += n;
270 tcg_target_init(s);
273 void tcg_prologue_init(TCGContext *s)
275 /* init global prologue and epilogue */
276 s->code_buf = code_gen_prologue;
277 s->code_ptr = s->code_buf;
278 tcg_target_qemu_prologue(s);
279 flush_icache_range((tcg_target_ulong)s->code_buf,
280 (tcg_target_ulong)s->code_ptr);
283 void tcg_set_frame(TCGContext *s, int reg,
284 tcg_target_long start, tcg_target_long size)
286 s->frame_start = start;
287 s->frame_end = start + size;
288 s->frame_reg = reg;
291 void tcg_func_start(TCGContext *s)
293 int i;
294 tcg_pool_reset(s);
295 s->nb_temps = s->nb_globals;
296 for(i = 0; i < (TCG_TYPE_COUNT * 2); i++)
297 s->first_free_temp[i] = -1;
298 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
299 s->nb_labels = 0;
300 s->current_frame_offset = s->frame_start;
302 gen_opc_ptr = gen_opc_buf;
303 gen_opparam_ptr = gen_opparam_buf;
306 static inline void tcg_temp_alloc(TCGContext *s, int n)
308 if (n > TCG_MAX_TEMPS)
309 tcg_abort();
312 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
313 const char *name)
315 TCGContext *s = &tcg_ctx;
316 TCGTemp *ts;
317 int idx;
319 #if TCG_TARGET_REG_BITS == 32
320 if (type != TCG_TYPE_I32)
321 tcg_abort();
322 #endif
323 if (tcg_regset_test_reg(s->reserved_regs, reg))
324 tcg_abort();
325 idx = s->nb_globals;
326 tcg_temp_alloc(s, s->nb_globals + 1);
327 ts = &s->temps[s->nb_globals];
328 ts->base_type = type;
329 ts->type = type;
330 ts->fixed_reg = 1;
331 ts->reg = reg;
332 ts->name = name;
333 s->nb_globals++;
334 tcg_regset_set_reg(s->reserved_regs, reg);
335 return idx;
338 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
340 int idx;
342 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
343 return MAKE_TCGV_I32(idx);
346 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
348 int idx;
350 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
351 return MAKE_TCGV_I64(idx);
354 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
355 tcg_target_long offset,
356 const char *name)
358 TCGContext *s = &tcg_ctx;
359 TCGTemp *ts;
360 int idx;
362 idx = s->nb_globals;
363 #if TCG_TARGET_REG_BITS == 32
364 if (type == TCG_TYPE_I64) {
365 char buf[64];
366 tcg_temp_alloc(s, s->nb_globals + 2);
367 ts = &s->temps[s->nb_globals];
368 ts->base_type = type;
369 ts->type = TCG_TYPE_I32;
370 ts->fixed_reg = 0;
371 ts->mem_allocated = 1;
372 ts->mem_reg = reg;
373 #ifdef TCG_TARGET_WORDS_BIGENDIAN
374 ts->mem_offset = offset + 4;
375 #else
376 ts->mem_offset = offset;
377 #endif
378 pstrcpy(buf, sizeof(buf), name);
379 pstrcat(buf, sizeof(buf), "_0");
380 ts->name = strdup(buf);
381 ts++;
383 ts->base_type = type;
384 ts->type = TCG_TYPE_I32;
385 ts->fixed_reg = 0;
386 ts->mem_allocated = 1;
387 ts->mem_reg = reg;
388 #ifdef TCG_TARGET_WORDS_BIGENDIAN
389 ts->mem_offset = offset;
390 #else
391 ts->mem_offset = offset + 4;
392 #endif
393 pstrcpy(buf, sizeof(buf), name);
394 pstrcat(buf, sizeof(buf), "_1");
395 ts->name = strdup(buf);
397 s->nb_globals += 2;
398 } else
399 #endif
401 tcg_temp_alloc(s, s->nb_globals + 1);
402 ts = &s->temps[s->nb_globals];
403 ts->base_type = type;
404 ts->type = type;
405 ts->fixed_reg = 0;
406 ts->mem_allocated = 1;
407 ts->mem_reg = reg;
408 ts->mem_offset = offset;
409 ts->name = name;
410 s->nb_globals++;
412 return idx;
415 TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset,
416 const char *name)
418 int idx;
420 idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
421 return MAKE_TCGV_I32(idx);
424 TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset,
425 const char *name)
427 int idx;
429 idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
430 return MAKE_TCGV_I64(idx);
433 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
435 TCGContext *s = &tcg_ctx;
436 TCGTemp *ts;
437 int idx, k;
439 k = type;
440 if (temp_local)
441 k += TCG_TYPE_COUNT;
442 idx = s->first_free_temp[k];
443 if (idx != -1) {
444 /* There is already an available temp with the
445 right type */
446 ts = &s->temps[idx];
447 s->first_free_temp[k] = ts->next_free_temp;
448 ts->temp_allocated = 1;
449 assert(ts->temp_local == temp_local);
450 } else {
451 idx = s->nb_temps;
452 #if TCG_TARGET_REG_BITS == 32
453 if (type == TCG_TYPE_I64) {
454 tcg_temp_alloc(s, s->nb_temps + 2);
455 ts = &s->temps[s->nb_temps];
456 ts->base_type = type;
457 ts->type = TCG_TYPE_I32;
458 ts->temp_allocated = 1;
459 ts->temp_local = temp_local;
460 ts->name = NULL;
461 ts++;
462 ts->base_type = TCG_TYPE_I32;
463 ts->type = TCG_TYPE_I32;
464 ts->temp_allocated = 1;
465 ts->temp_local = temp_local;
466 ts->name = NULL;
467 s->nb_temps += 2;
468 } else
469 #endif
471 tcg_temp_alloc(s, s->nb_temps + 1);
472 ts = &s->temps[s->nb_temps];
473 ts->base_type = type;
474 ts->type = type;
475 ts->temp_allocated = 1;
476 ts->temp_local = temp_local;
477 ts->name = NULL;
478 s->nb_temps++;
482 #if defined(CONFIG_DEBUG_TCG)
483 s->temps_in_use++;
484 #endif
485 return idx;
488 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
490 int idx;
492 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
493 return MAKE_TCGV_I32(idx);
496 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
498 int idx;
500 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
501 return MAKE_TCGV_I64(idx);
504 static inline void tcg_temp_free_internal(int idx)
506 TCGContext *s = &tcg_ctx;
507 TCGTemp *ts;
508 int k;
510 #if defined(CONFIG_DEBUG_TCG)
511 s->temps_in_use--;
512 if (s->temps_in_use < 0) {
513 fprintf(stderr, "More temporaries freed than allocated!\n");
515 #endif
517 assert(idx >= s->nb_globals && idx < s->nb_temps);
518 ts = &s->temps[idx];
519 assert(ts->temp_allocated != 0);
520 ts->temp_allocated = 0;
521 k = ts->base_type;
522 if (ts->temp_local)
523 k += TCG_TYPE_COUNT;
524 ts->next_free_temp = s->first_free_temp[k];
525 s->first_free_temp[k] = idx;
528 void tcg_temp_free_i32(TCGv_i32 arg)
530 tcg_temp_free_internal(GET_TCGV_I32(arg));
533 void tcg_temp_free_i64(TCGv_i64 arg)
535 tcg_temp_free_internal(GET_TCGV_I64(arg));
538 TCGv_i32 tcg_const_i32(int32_t val)
540 TCGv_i32 t0;
541 t0 = tcg_temp_new_i32();
542 tcg_gen_movi_i32(t0, val);
543 return t0;
546 TCGv_i64 tcg_const_i64(int64_t val)
548 TCGv_i64 t0;
549 t0 = tcg_temp_new_i64();
550 tcg_gen_movi_i64(t0, val);
551 return t0;
554 TCGv_i32 tcg_const_local_i32(int32_t val)
556 TCGv_i32 t0;
557 t0 = tcg_temp_local_new_i32();
558 tcg_gen_movi_i32(t0, val);
559 return t0;
562 TCGv_i64 tcg_const_local_i64(int64_t val)
564 TCGv_i64 t0;
565 t0 = tcg_temp_local_new_i64();
566 tcg_gen_movi_i64(t0, val);
567 return t0;
570 #if defined(CONFIG_DEBUG_TCG)
571 void tcg_clear_temp_count(void)
573 TCGContext *s = &tcg_ctx;
574 s->temps_in_use = 0;
577 int tcg_check_temp_count(void)
579 TCGContext *s = &tcg_ctx;
580 if (s->temps_in_use) {
581 /* Clear the count so that we don't give another
582 * warning immediately next time around.
584 s->temps_in_use = 0;
585 return 1;
587 return 0;
589 #endif
591 void tcg_register_helper(void *func, const char *name)
593 TCGContext *s = &tcg_ctx;
594 int n;
595 if ((s->nb_helpers + 1) > s->allocated_helpers) {
596 n = s->allocated_helpers;
597 if (n == 0) {
598 n = 4;
599 } else {
600 n *= 2;
602 s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
603 s->allocated_helpers = n;
605 s->helpers[s->nb_helpers].func = (tcg_target_ulong)func;
606 s->helpers[s->nb_helpers].name = name;
607 s->nb_helpers++;
610 /* Note: we convert the 64 bit args to 32 bit and do some alignment
611 and endian swap. Maybe it would be better to do the alignment
612 and endian swap in tcg_reg_alloc_call(). */
613 void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
614 int sizemask, TCGArg ret, int nargs, TCGArg *args)
616 int i;
617 int real_args;
618 int nb_rets;
619 TCGArg *nparam;
621 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
622 for (i = 0; i < nargs; ++i) {
623 int is_64bit = sizemask & (1 << (i+1)*2);
624 int is_signed = sizemask & (2 << (i+1)*2);
625 if (!is_64bit) {
626 TCGv_i64 temp = tcg_temp_new_i64();
627 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
628 if (is_signed) {
629 tcg_gen_ext32s_i64(temp, orig);
630 } else {
631 tcg_gen_ext32u_i64(temp, orig);
633 args[i] = GET_TCGV_I64(temp);
636 #endif /* TCG_TARGET_EXTEND_ARGS */
638 *gen_opc_ptr++ = INDEX_op_call;
639 nparam = gen_opparam_ptr++;
640 if (ret != TCG_CALL_DUMMY_ARG) {
641 #if TCG_TARGET_REG_BITS < 64
642 if (sizemask & 1) {
643 #ifdef TCG_TARGET_WORDS_BIGENDIAN
644 *gen_opparam_ptr++ = ret + 1;
645 *gen_opparam_ptr++ = ret;
646 #else
647 *gen_opparam_ptr++ = ret;
648 *gen_opparam_ptr++ = ret + 1;
649 #endif
650 nb_rets = 2;
651 } else
652 #endif
654 *gen_opparam_ptr++ = ret;
655 nb_rets = 1;
657 } else {
658 nb_rets = 0;
660 real_args = 0;
661 for (i = 0; i < nargs; i++) {
662 #if TCG_TARGET_REG_BITS < 64
663 int is_64bit = sizemask & (1 << (i+1)*2);
664 if (is_64bit) {
665 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
666 /* some targets want aligned 64 bit args */
667 if (real_args & 1) {
668 *gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG;
669 real_args++;
671 #endif
672 /* If stack grows up, then we will be placing successive
673 arguments at lower addresses, which means we need to
674 reverse the order compared to how we would normally
675 treat either big or little-endian. For those arguments
676 that will wind up in registers, this still works for
677 HPPA (the only current STACK_GROWSUP target) since the
678 argument registers are *also* allocated in decreasing
679 order. If another such target is added, this logic may
680 have to get more complicated to differentiate between
681 stack arguments and register arguments. */
682 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
683 *gen_opparam_ptr++ = args[i] + 1;
684 *gen_opparam_ptr++ = args[i];
685 #else
686 *gen_opparam_ptr++ = args[i];
687 *gen_opparam_ptr++ = args[i] + 1;
688 #endif
689 real_args += 2;
690 continue;
692 #endif /* TCG_TARGET_REG_BITS < 64 */
694 *gen_opparam_ptr++ = args[i];
695 real_args++;
697 *gen_opparam_ptr++ = GET_TCGV_PTR(func);
699 *gen_opparam_ptr++ = flags;
701 *nparam = (nb_rets << 16) | (real_args + 1);
703 /* total parameters, needed to go backward in the instruction stream */
704 *gen_opparam_ptr++ = 1 + nb_rets + real_args + 3;
706 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
707 for (i = 0; i < nargs; ++i) {
708 int is_64bit = sizemask & (1 << (i+1)*2);
709 if (!is_64bit) {
710 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
711 tcg_temp_free_i64(temp);
714 #endif /* TCG_TARGET_EXTEND_ARGS */
717 #if TCG_TARGET_REG_BITS == 32
718 void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
719 int c, int right, int arith)
721 if (c == 0) {
722 tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
723 tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
724 } else if (c >= 32) {
725 c -= 32;
726 if (right) {
727 if (arith) {
728 tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
729 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
730 } else {
731 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c);
732 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
734 } else {
735 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
736 tcg_gen_movi_i32(TCGV_LOW(ret), 0);
738 } else {
739 TCGv_i32 t0, t1;
741 t0 = tcg_temp_new_i32();
742 t1 = tcg_temp_new_i32();
743 if (right) {
744 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
745 if (arith)
746 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
747 else
748 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
749 tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
750 tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
751 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
752 } else {
753 tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
754 /* Note: ret can be the same as arg1, so we use t1 */
755 tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
756 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
757 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
758 tcg_gen_mov_i32(TCGV_LOW(ret), t1);
760 tcg_temp_free_i32(t0);
761 tcg_temp_free_i32(t1);
764 #endif
767 static void tcg_reg_alloc_start(TCGContext *s)
769 int i;
770 TCGTemp *ts;
771 for(i = 0; i < s->nb_globals; i++) {
772 ts = &s->temps[i];
773 if (ts->fixed_reg) {
774 ts->val_type = TEMP_VAL_REG;
775 } else {
776 ts->val_type = TEMP_VAL_MEM;
779 for(i = s->nb_globals; i < s->nb_temps; i++) {
780 ts = &s->temps[i];
781 ts->val_type = TEMP_VAL_DEAD;
782 ts->mem_allocated = 0;
783 ts->fixed_reg = 0;
785 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
786 s->reg_to_temp[i] = -1;
790 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
791 int idx)
793 TCGTemp *ts;
795 assert(idx >= 0 && idx < s->nb_temps);
796 ts = &s->temps[idx];
797 assert(ts);
798 if (idx < s->nb_globals) {
799 pstrcpy(buf, buf_size, ts->name);
800 } else {
801 if (ts->temp_local)
802 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
803 else
804 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
806 return buf;
809 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
811 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
814 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
816 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
819 static int helper_cmp(const void *p1, const void *p2)
821 const TCGHelperInfo *th1 = p1;
822 const TCGHelperInfo *th2 = p2;
823 if (th1->func < th2->func)
824 return -1;
825 else if (th1->func == th2->func)
826 return 0;
827 else
828 return 1;
831 /* find helper definition (Note: A hash table would be better) */
832 static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val)
834 int m, m_min, m_max;
835 TCGHelperInfo *th;
836 tcg_target_ulong v;
838 if (unlikely(!s->helpers_sorted)) {
839 qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo),
840 helper_cmp);
841 s->helpers_sorted = 1;
844 /* binary search */
845 m_min = 0;
846 m_max = s->nb_helpers - 1;
847 while (m_min <= m_max) {
848 m = (m_min + m_max) >> 1;
849 th = &s->helpers[m];
850 v = th->func;
851 if (v == val)
852 return th;
853 else if (val < v) {
854 m_max = m - 1;
855 } else {
856 m_min = m + 1;
859 return NULL;
862 static const char * const cond_name[] =
864 [TCG_COND_EQ] = "eq",
865 [TCG_COND_NE] = "ne",
866 [TCG_COND_LT] = "lt",
867 [TCG_COND_GE] = "ge",
868 [TCG_COND_LE] = "le",
869 [TCG_COND_GT] = "gt",
870 [TCG_COND_LTU] = "ltu",
871 [TCG_COND_GEU] = "geu",
872 [TCG_COND_LEU] = "leu",
873 [TCG_COND_GTU] = "gtu"
876 void tcg_dump_ops(TCGContext *s)
878 const uint16_t *opc_ptr;
879 const TCGArg *args;
880 TCGArg arg;
881 TCGOpcode c;
882 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn;
883 const TCGOpDef *def;
884 char buf[128];
886 first_insn = 1;
887 opc_ptr = gen_opc_buf;
888 args = gen_opparam_buf;
889 while (opc_ptr < gen_opc_ptr) {
890 c = *opc_ptr++;
891 def = &tcg_op_defs[c];
892 if (c == INDEX_op_debug_insn_start) {
893 uint64_t pc;
894 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
895 pc = ((uint64_t)args[1] << 32) | args[0];
896 #else
897 pc = args[0];
898 #endif
899 if (!first_insn) {
900 qemu_log("\n");
902 qemu_log(" ---- 0x%" PRIx64, pc);
903 first_insn = 0;
904 nb_oargs = def->nb_oargs;
905 nb_iargs = def->nb_iargs;
906 nb_cargs = def->nb_cargs;
907 } else if (c == INDEX_op_call) {
908 TCGArg arg;
910 /* variable number of arguments */
911 arg = *args++;
912 nb_oargs = arg >> 16;
913 nb_iargs = arg & 0xffff;
914 nb_cargs = def->nb_cargs;
916 qemu_log(" %s ", def->name);
918 /* function name */
919 qemu_log("%s",
920 tcg_get_arg_str_idx(s, buf, sizeof(buf),
921 args[nb_oargs + nb_iargs - 1]));
922 /* flags */
923 qemu_log(",$0x%" TCG_PRIlx, args[nb_oargs + nb_iargs]);
924 /* nb out args */
925 qemu_log(",$%d", nb_oargs);
926 for(i = 0; i < nb_oargs; i++) {
927 qemu_log(",");
928 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
929 args[i]));
931 for(i = 0; i < (nb_iargs - 1); i++) {
932 qemu_log(",");
933 if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) {
934 qemu_log("<dummy>");
935 } else {
936 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
937 args[nb_oargs + i]));
940 } else if (c == INDEX_op_movi_i32
941 #if TCG_TARGET_REG_BITS == 64
942 || c == INDEX_op_movi_i64
943 #endif
945 tcg_target_ulong val;
946 TCGHelperInfo *th;
948 nb_oargs = def->nb_oargs;
949 nb_iargs = def->nb_iargs;
950 nb_cargs = def->nb_cargs;
951 qemu_log(" %s %s,$", def->name,
952 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0]));
953 val = args[1];
954 th = tcg_find_helper(s, val);
955 if (th) {
956 qemu_log("%s", th->name);
957 } else {
958 if (c == INDEX_op_movi_i32) {
959 qemu_log("0x%x", (uint32_t)val);
960 } else {
961 qemu_log("0x%" PRIx64 , (uint64_t)val);
964 } else {
965 qemu_log(" %s ", def->name);
966 if (c == INDEX_op_nopn) {
967 /* variable number of arguments */
968 nb_cargs = *args;
969 nb_oargs = 0;
970 nb_iargs = 0;
971 } else {
972 nb_oargs = def->nb_oargs;
973 nb_iargs = def->nb_iargs;
974 nb_cargs = def->nb_cargs;
977 k = 0;
978 for(i = 0; i < nb_oargs; i++) {
979 if (k != 0) {
980 qemu_log(",");
982 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
983 args[k++]));
985 for(i = 0; i < nb_iargs; i++) {
986 if (k != 0) {
987 qemu_log(",");
989 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
990 args[k++]));
992 switch (c) {
993 case INDEX_op_brcond_i32:
994 #if TCG_TARGET_REG_BITS == 32
995 case INDEX_op_brcond2_i32:
996 #elif TCG_TARGET_REG_BITS == 64
997 case INDEX_op_brcond_i64:
998 #endif
999 case INDEX_op_setcond_i32:
1000 #if TCG_TARGET_REG_BITS == 32
1001 case INDEX_op_setcond2_i32:
1002 #elif TCG_TARGET_REG_BITS == 64
1003 case INDEX_op_setcond_i64:
1004 #endif
1005 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1006 qemu_log(",%s", cond_name[args[k++]]);
1007 } else {
1008 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1010 i = 1;
1011 break;
1012 default:
1013 i = 0;
1014 break;
1016 for(; i < nb_cargs; i++) {
1017 if (k != 0) {
1018 qemu_log(",");
1020 arg = args[k++];
1021 qemu_log("$0x%" TCG_PRIlx, arg);
1024 qemu_log("\n");
1025 args += nb_iargs + nb_oargs + nb_cargs;
1029 /* we give more priority to constraints with less registers */
1030 static int get_constraint_priority(const TCGOpDef *def, int k)
1032 const TCGArgConstraint *arg_ct;
1034 int i, n;
1035 arg_ct = &def->args_ct[k];
1036 if (arg_ct->ct & TCG_CT_ALIAS) {
1037 /* an alias is equivalent to a single register */
1038 n = 1;
1039 } else {
1040 if (!(arg_ct->ct & TCG_CT_REG))
1041 return 0;
1042 n = 0;
1043 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1044 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1045 n++;
1048 return TCG_TARGET_NB_REGS - n + 1;
1051 /* sort from highest priority to lowest */
1052 static void sort_constraints(TCGOpDef *def, int start, int n)
1054 int i, j, p1, p2, tmp;
1056 for(i = 0; i < n; i++)
1057 def->sorted_args[start + i] = start + i;
1058 if (n <= 1)
1059 return;
1060 for(i = 0; i < n - 1; i++) {
1061 for(j = i + 1; j < n; j++) {
1062 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1063 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1064 if (p1 < p2) {
1065 tmp = def->sorted_args[start + i];
1066 def->sorted_args[start + i] = def->sorted_args[start + j];
1067 def->sorted_args[start + j] = tmp;
1073 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1075 TCGOpcode op;
1076 TCGOpDef *def;
1077 const char *ct_str;
1078 int i, nb_args;
1080 for(;;) {
1081 if (tdefs->op == (TCGOpcode)-1)
1082 break;
1083 op = tdefs->op;
1084 assert((unsigned)op < NB_OPS);
1085 def = &tcg_op_defs[op];
1086 #if defined(CONFIG_DEBUG_TCG)
1087 /* Duplicate entry in op definitions? */
1088 assert(!def->used);
1089 def->used = 1;
1090 #endif
1091 nb_args = def->nb_iargs + def->nb_oargs;
1092 for(i = 0; i < nb_args; i++) {
1093 ct_str = tdefs->args_ct_str[i];
1094 /* Incomplete TCGTargetOpDef entry? */
1095 assert(ct_str != NULL);
1096 tcg_regset_clear(def->args_ct[i].u.regs);
1097 def->args_ct[i].ct = 0;
1098 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1099 int oarg;
1100 oarg = ct_str[0] - '0';
1101 assert(oarg < def->nb_oargs);
1102 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1103 /* TCG_CT_ALIAS is for the output arguments. The input
1104 argument is tagged with TCG_CT_IALIAS. */
1105 def->args_ct[i] = def->args_ct[oarg];
1106 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1107 def->args_ct[oarg].alias_index = i;
1108 def->args_ct[i].ct |= TCG_CT_IALIAS;
1109 def->args_ct[i].alias_index = oarg;
1110 } else {
1111 for(;;) {
1112 if (*ct_str == '\0')
1113 break;
1114 switch(*ct_str) {
1115 case 'i':
1116 def->args_ct[i].ct |= TCG_CT_CONST;
1117 ct_str++;
1118 break;
1119 default:
1120 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1121 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1122 ct_str, i, def->name);
1123 exit(1);
1130 /* TCGTargetOpDef entry with too much information? */
1131 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1133 /* sort the constraints (XXX: this is just an heuristic) */
1134 sort_constraints(def, 0, def->nb_oargs);
1135 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1137 #if 0
1139 int i;
1141 printf("%s: sorted=", def->name);
1142 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1143 printf(" %d", def->sorted_args[i]);
1144 printf("\n");
1146 #endif
1147 tdefs++;
1150 #if defined(CONFIG_DEBUG_TCG)
1151 i = 0;
1152 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
1153 const TCGOpDef *def = &tcg_op_defs[op];
1154 if (op < INDEX_op_call
1155 || op == INDEX_op_debug_insn_start
1156 || (def->flags & TCG_OPF_NOT_PRESENT)) {
1157 /* Wrong entry in op definitions? */
1158 if (def->used) {
1159 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1160 i = 1;
1162 } else {
1163 /* Missing entry in op definitions? */
1164 if (!def->used) {
1165 fprintf(stderr, "Missing op definition for %s\n", def->name);
1166 i = 1;
1170 if (i == 1) {
1171 tcg_abort();
1173 #endif
1176 #ifdef USE_LIVENESS_ANALYSIS
1178 /* set a nop for an operation using 'nb_args' */
1179 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
1180 TCGArg *args, int nb_args)
1182 if (nb_args == 0) {
1183 *opc_ptr = INDEX_op_nop;
1184 } else {
1185 *opc_ptr = INDEX_op_nopn;
1186 args[0] = nb_args;
1187 args[nb_args - 1] = nb_args;
1191 /* liveness analysis: end of function: globals are live, temps are
1192 dead. */
1193 /* XXX: at this stage, not used as there would be little gains because
1194 most TBs end with a conditional jump. */
1195 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps)
1197 memset(dead_temps, 0, s->nb_globals);
1198 memset(dead_temps + s->nb_globals, 1, s->nb_temps - s->nb_globals);
1201 /* liveness analysis: end of basic block: globals are live, temps are
1202 dead, local temps are live. */
1203 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps)
1205 int i;
1206 TCGTemp *ts;
1208 memset(dead_temps, 0, s->nb_globals);
1209 ts = &s->temps[s->nb_globals];
1210 for(i = s->nb_globals; i < s->nb_temps; i++) {
1211 if (ts->temp_local)
1212 dead_temps[i] = 0;
1213 else
1214 dead_temps[i] = 1;
1215 ts++;
1219 /* Liveness analysis : update the opc_dead_args array to tell if a
1220 given input arguments is dead. Instructions updating dead
1221 temporaries are removed. */
1222 static void tcg_liveness_analysis(TCGContext *s)
1224 int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
1225 TCGOpcode op;
1226 TCGArg *args;
1227 const TCGOpDef *def;
1228 uint8_t *dead_temps;
1229 unsigned int dead_args;
1231 gen_opc_ptr++; /* skip end */
1233 nb_ops = gen_opc_ptr - gen_opc_buf;
1235 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1237 dead_temps = tcg_malloc(s->nb_temps);
1238 memset(dead_temps, 1, s->nb_temps);
1240 args = gen_opparam_ptr;
1241 op_index = nb_ops - 1;
1242 while (op_index >= 0) {
1243 op = gen_opc_buf[op_index];
1244 def = &tcg_op_defs[op];
1245 switch(op) {
1246 case INDEX_op_call:
1248 int call_flags;
1250 nb_args = args[-1];
1251 args -= nb_args;
1252 nb_iargs = args[0] & 0xffff;
1253 nb_oargs = args[0] >> 16;
1254 args++;
1255 call_flags = args[nb_oargs + nb_iargs];
1257 /* pure functions can be removed if their result is not
1258 used */
1259 if (call_flags & TCG_CALL_PURE) {
1260 for(i = 0; i < nb_oargs; i++) {
1261 arg = args[i];
1262 if (!dead_temps[arg])
1263 goto do_not_remove_call;
1265 tcg_set_nop(s, gen_opc_buf + op_index,
1266 args - 1, nb_args);
1267 } else {
1268 do_not_remove_call:
1270 /* output args are dead */
1271 dead_args = 0;
1272 for(i = 0; i < nb_oargs; i++) {
1273 arg = args[i];
1274 if (dead_temps[arg]) {
1275 dead_args |= (1 << i);
1277 dead_temps[arg] = 1;
1280 if (!(call_flags & TCG_CALL_CONST)) {
1281 /* globals are live (they may be used by the call) */
1282 memset(dead_temps, 0, s->nb_globals);
1285 /* input args are live */
1286 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1287 arg = args[i];
1288 if (arg != TCG_CALL_DUMMY_ARG) {
1289 if (dead_temps[arg]) {
1290 dead_args |= (1 << i);
1292 dead_temps[arg] = 0;
1295 s->op_dead_args[op_index] = dead_args;
1297 args--;
1299 break;
1300 case INDEX_op_set_label:
1301 args--;
1302 /* mark end of basic block */
1303 tcg_la_bb_end(s, dead_temps);
1304 break;
1305 case INDEX_op_debug_insn_start:
1306 args -= def->nb_args;
1307 break;
1308 case INDEX_op_nopn:
1309 nb_args = args[-1];
1310 args -= nb_args;
1311 break;
1312 case INDEX_op_discard:
1313 args--;
1314 /* mark the temporary as dead */
1315 dead_temps[args[0]] = 1;
1316 break;
1317 case INDEX_op_end:
1318 break;
1319 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1320 default:
1321 args -= def->nb_args;
1322 nb_iargs = def->nb_iargs;
1323 nb_oargs = def->nb_oargs;
1325 /* Test if the operation can be removed because all
1326 its outputs are dead. We assume that nb_oargs == 0
1327 implies side effects */
1328 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1329 for(i = 0; i < nb_oargs; i++) {
1330 arg = args[i];
1331 if (!dead_temps[arg])
1332 goto do_not_remove;
1334 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args);
1335 #ifdef CONFIG_PROFILER
1336 s->del_op_count++;
1337 #endif
1338 } else {
1339 do_not_remove:
1341 /* output args are dead */
1342 dead_args = 0;
1343 for(i = 0; i < nb_oargs; i++) {
1344 arg = args[i];
1345 if (dead_temps[arg]) {
1346 dead_args |= (1 << i);
1348 dead_temps[arg] = 1;
1351 /* if end of basic block, update */
1352 if (def->flags & TCG_OPF_BB_END) {
1353 tcg_la_bb_end(s, dead_temps);
1354 } else if (def->flags & TCG_OPF_CALL_CLOBBER) {
1355 /* globals are live */
1356 memset(dead_temps, 0, s->nb_globals);
1359 /* input args are live */
1360 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1361 arg = args[i];
1362 if (dead_temps[arg]) {
1363 dead_args |= (1 << i);
1365 dead_temps[arg] = 0;
1367 s->op_dead_args[op_index] = dead_args;
1369 break;
1371 op_index--;
1374 if (args != gen_opparam_buf)
1375 tcg_abort();
1377 #else
1378 /* dummy liveness analysis */
1379 static void tcg_liveness_analysis(TCGContext *s)
1381 int nb_ops;
1382 nb_ops = gen_opc_ptr - gen_opc_buf;
1384 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1385 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1387 #endif
1389 #ifndef NDEBUG
1390 static void dump_regs(TCGContext *s)
1392 TCGTemp *ts;
1393 int i;
1394 char buf[64];
1396 for(i = 0; i < s->nb_temps; i++) {
1397 ts = &s->temps[i];
1398 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1399 switch(ts->val_type) {
1400 case TEMP_VAL_REG:
1401 printf("%s", tcg_target_reg_names[ts->reg]);
1402 break;
1403 case TEMP_VAL_MEM:
1404 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1405 break;
1406 case TEMP_VAL_CONST:
1407 printf("$0x%" TCG_PRIlx, ts->val);
1408 break;
1409 case TEMP_VAL_DEAD:
1410 printf("D");
1411 break;
1412 default:
1413 printf("???");
1414 break;
1416 printf("\n");
1419 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1420 if (s->reg_to_temp[i] >= 0) {
1421 printf("%s: %s\n",
1422 tcg_target_reg_names[i],
1423 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1428 static void check_regs(TCGContext *s)
1430 int reg, k;
1431 TCGTemp *ts;
1432 char buf[64];
1434 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1435 k = s->reg_to_temp[reg];
1436 if (k >= 0) {
1437 ts = &s->temps[k];
1438 if (ts->val_type != TEMP_VAL_REG ||
1439 ts->reg != reg) {
1440 printf("Inconsistency for register %s:\n",
1441 tcg_target_reg_names[reg]);
1442 goto fail;
1446 for(k = 0; k < s->nb_temps; k++) {
1447 ts = &s->temps[k];
1448 if (ts->val_type == TEMP_VAL_REG &&
1449 !ts->fixed_reg &&
1450 s->reg_to_temp[ts->reg] != k) {
1451 printf("Inconsistency for temp %s:\n",
1452 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1453 fail:
1454 printf("reg state:\n");
1455 dump_regs(s);
1456 tcg_abort();
1460 #endif
1462 static void temp_allocate_frame(TCGContext *s, int temp)
1464 TCGTemp *ts;
1465 ts = &s->temps[temp];
1466 #ifndef __sparc_v9__ /* Sparc64 stack is accessed with offset of 2047 */
1467 s->current_frame_offset = (s->current_frame_offset +
1468 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1469 ~(sizeof(tcg_target_long) - 1);
1470 #endif
1471 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1472 s->frame_end) {
1473 tcg_abort();
1475 ts->mem_offset = s->current_frame_offset;
1476 ts->mem_reg = s->frame_reg;
1477 ts->mem_allocated = 1;
1478 s->current_frame_offset += (tcg_target_long)sizeof(tcg_target_long);
1481 /* free register 'reg' by spilling the corresponding temporary if necessary */
1482 static void tcg_reg_free(TCGContext *s, int reg)
1484 TCGTemp *ts;
1485 int temp;
1487 temp = s->reg_to_temp[reg];
1488 if (temp != -1) {
1489 ts = &s->temps[temp];
1490 assert(ts->val_type == TEMP_VAL_REG);
1491 if (!ts->mem_coherent) {
1492 if (!ts->mem_allocated)
1493 temp_allocate_frame(s, temp);
1494 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1496 ts->val_type = TEMP_VAL_MEM;
1497 s->reg_to_temp[reg] = -1;
1501 /* Allocate a register belonging to reg1 & ~reg2 */
1502 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1504 int i, reg;
1505 TCGRegSet reg_ct;
1507 tcg_regset_andnot(reg_ct, reg1, reg2);
1509 /* first try free registers */
1510 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1511 reg = tcg_target_reg_alloc_order[i];
1512 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1513 return reg;
1516 /* XXX: do better spill choice */
1517 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1518 reg = tcg_target_reg_alloc_order[i];
1519 if (tcg_regset_test_reg(reg_ct, reg)) {
1520 tcg_reg_free(s, reg);
1521 return reg;
1525 tcg_abort();
1528 /* save a temporary to memory. 'allocated_regs' is used in case a
1529 temporary registers needs to be allocated to store a constant. */
1530 static void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1532 TCGTemp *ts;
1533 int reg;
1535 ts = &s->temps[temp];
1536 if (!ts->fixed_reg) {
1537 switch(ts->val_type) {
1538 case TEMP_VAL_REG:
1539 tcg_reg_free(s, ts->reg);
1540 break;
1541 case TEMP_VAL_DEAD:
1542 ts->val_type = TEMP_VAL_MEM;
1543 break;
1544 case TEMP_VAL_CONST:
1545 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1546 allocated_regs);
1547 if (!ts->mem_allocated)
1548 temp_allocate_frame(s, temp);
1549 tcg_out_movi(s, ts->type, reg, ts->val);
1550 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1551 ts->val_type = TEMP_VAL_MEM;
1552 break;
1553 case TEMP_VAL_MEM:
1554 break;
1555 default:
1556 tcg_abort();
1561 /* save globals to their canonical location and assume they can be
1562 modified be the following code. 'allocated_regs' is used in case a
1563 temporary registers needs to be allocated to store a constant. */
1564 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1566 int i;
1568 for(i = 0; i < s->nb_globals; i++) {
1569 temp_save(s, i, allocated_regs);
1573 /* at the end of a basic block, we assume all temporaries are dead and
1574 all globals are stored at their canonical location. */
1575 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1577 TCGTemp *ts;
1578 int i;
1580 for(i = s->nb_globals; i < s->nb_temps; i++) {
1581 ts = &s->temps[i];
1582 if (ts->temp_local) {
1583 temp_save(s, i, allocated_regs);
1584 } else {
1585 if (ts->val_type == TEMP_VAL_REG) {
1586 s->reg_to_temp[ts->reg] = -1;
1588 ts->val_type = TEMP_VAL_DEAD;
1592 save_globals(s, allocated_regs);
1595 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1597 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args)
1599 TCGTemp *ots;
1600 tcg_target_ulong val;
1602 ots = &s->temps[args[0]];
1603 val = args[1];
1605 if (ots->fixed_reg) {
1606 /* for fixed registers, we do not do any constant
1607 propagation */
1608 tcg_out_movi(s, ots->type, ots->reg, val);
1609 } else {
1610 /* The movi is not explicitly generated here */
1611 if (ots->val_type == TEMP_VAL_REG)
1612 s->reg_to_temp[ots->reg] = -1;
1613 ots->val_type = TEMP_VAL_CONST;
1614 ots->val = val;
1618 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1619 const TCGArg *args,
1620 unsigned int dead_args)
1622 TCGTemp *ts, *ots;
1623 int reg;
1624 const TCGArgConstraint *arg_ct;
1626 ots = &s->temps[args[0]];
1627 ts = &s->temps[args[1]];
1628 arg_ct = &def->args_ct[0];
1630 /* XXX: always mark arg dead if IS_DEAD_ARG(1) */
1631 if (ts->val_type == TEMP_VAL_REG) {
1632 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1633 /* the mov can be suppressed */
1634 if (ots->val_type == TEMP_VAL_REG)
1635 s->reg_to_temp[ots->reg] = -1;
1636 reg = ts->reg;
1637 s->reg_to_temp[reg] = -1;
1638 ts->val_type = TEMP_VAL_DEAD;
1639 } else {
1640 if (ots->val_type == TEMP_VAL_REG) {
1641 reg = ots->reg;
1642 } else {
1643 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1645 if (ts->reg != reg) {
1646 tcg_out_mov(s, ots->type, reg, ts->reg);
1649 } else if (ts->val_type == TEMP_VAL_MEM) {
1650 if (ots->val_type == TEMP_VAL_REG) {
1651 reg = ots->reg;
1652 } else {
1653 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1655 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1656 } else if (ts->val_type == TEMP_VAL_CONST) {
1657 if (ots->fixed_reg) {
1658 reg = ots->reg;
1659 tcg_out_movi(s, ots->type, reg, ts->val);
1660 } else {
1661 /* propagate constant */
1662 if (ots->val_type == TEMP_VAL_REG)
1663 s->reg_to_temp[ots->reg] = -1;
1664 ots->val_type = TEMP_VAL_CONST;
1665 ots->val = ts->val;
1666 return;
1668 } else {
1669 tcg_abort();
1671 s->reg_to_temp[reg] = args[0];
1672 ots->reg = reg;
1673 ots->val_type = TEMP_VAL_REG;
1674 ots->mem_coherent = 0;
1677 static void tcg_reg_alloc_op(TCGContext *s,
1678 const TCGOpDef *def, TCGOpcode opc,
1679 const TCGArg *args,
1680 unsigned int dead_args)
1682 TCGRegSet allocated_regs;
1683 int i, k, nb_iargs, nb_oargs, reg;
1684 TCGArg arg;
1685 const TCGArgConstraint *arg_ct;
1686 TCGTemp *ts;
1687 TCGArg new_args[TCG_MAX_OP_ARGS];
1688 int const_args[TCG_MAX_OP_ARGS];
1690 nb_oargs = def->nb_oargs;
1691 nb_iargs = def->nb_iargs;
1693 /* copy constants */
1694 memcpy(new_args + nb_oargs + nb_iargs,
1695 args + nb_oargs + nb_iargs,
1696 sizeof(TCGArg) * def->nb_cargs);
1698 /* satisfy input constraints */
1699 tcg_regset_set(allocated_regs, s->reserved_regs);
1700 for(k = 0; k < nb_iargs; k++) {
1701 i = def->sorted_args[nb_oargs + k];
1702 arg = args[i];
1703 arg_ct = &def->args_ct[i];
1704 ts = &s->temps[arg];
1705 if (ts->val_type == TEMP_VAL_MEM) {
1706 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1707 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1708 ts->val_type = TEMP_VAL_REG;
1709 ts->reg = reg;
1710 ts->mem_coherent = 1;
1711 s->reg_to_temp[reg] = arg;
1712 } else if (ts->val_type == TEMP_VAL_CONST) {
1713 if (tcg_target_const_match(ts->val, arg_ct)) {
1714 /* constant is OK for instruction */
1715 const_args[i] = 1;
1716 new_args[i] = ts->val;
1717 goto iarg_end;
1718 } else {
1719 /* need to move to a register */
1720 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1721 tcg_out_movi(s, ts->type, reg, ts->val);
1722 ts->val_type = TEMP_VAL_REG;
1723 ts->reg = reg;
1724 ts->mem_coherent = 0;
1725 s->reg_to_temp[reg] = arg;
1728 assert(ts->val_type == TEMP_VAL_REG);
1729 if (arg_ct->ct & TCG_CT_IALIAS) {
1730 if (ts->fixed_reg) {
1731 /* if fixed register, we must allocate a new register
1732 if the alias is not the same register */
1733 if (arg != args[arg_ct->alias_index])
1734 goto allocate_in_reg;
1735 } else {
1736 /* if the input is aliased to an output and if it is
1737 not dead after the instruction, we must allocate
1738 a new register and move it */
1739 if (!IS_DEAD_ARG(i)) {
1740 goto allocate_in_reg;
1744 reg = ts->reg;
1745 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1746 /* nothing to do : the constraint is satisfied */
1747 } else {
1748 allocate_in_reg:
1749 /* allocate a new register matching the constraint
1750 and move the temporary register into it */
1751 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1752 tcg_out_mov(s, ts->type, reg, ts->reg);
1754 new_args[i] = reg;
1755 const_args[i] = 0;
1756 tcg_regset_set_reg(allocated_regs, reg);
1757 iarg_end: ;
1760 if (def->flags & TCG_OPF_BB_END) {
1761 tcg_reg_alloc_bb_end(s, allocated_regs);
1762 } else {
1763 /* mark dead temporaries and free the associated registers */
1764 for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1765 arg = args[i];
1766 if (IS_DEAD_ARG(i)) {
1767 ts = &s->temps[arg];
1768 if (!ts->fixed_reg) {
1769 if (ts->val_type == TEMP_VAL_REG)
1770 s->reg_to_temp[ts->reg] = -1;
1771 ts->val_type = TEMP_VAL_DEAD;
1776 if (def->flags & TCG_OPF_CALL_CLOBBER) {
1777 /* XXX: permit generic clobber register list ? */
1778 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1779 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1780 tcg_reg_free(s, reg);
1783 /* XXX: for load/store we could do that only for the slow path
1784 (i.e. when a memory callback is called) */
1786 /* store globals and free associated registers (we assume the insn
1787 can modify any global. */
1788 save_globals(s, allocated_regs);
1791 /* satisfy the output constraints */
1792 tcg_regset_set(allocated_regs, s->reserved_regs);
1793 for(k = 0; k < nb_oargs; k++) {
1794 i = def->sorted_args[k];
1795 arg = args[i];
1796 arg_ct = &def->args_ct[i];
1797 ts = &s->temps[arg];
1798 if (arg_ct->ct & TCG_CT_ALIAS) {
1799 reg = new_args[arg_ct->alias_index];
1800 } else {
1801 /* if fixed register, we try to use it */
1802 reg = ts->reg;
1803 if (ts->fixed_reg &&
1804 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1805 goto oarg_end;
1807 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1809 tcg_regset_set_reg(allocated_regs, reg);
1810 /* if a fixed register is used, then a move will be done afterwards */
1811 if (!ts->fixed_reg) {
1812 if (ts->val_type == TEMP_VAL_REG)
1813 s->reg_to_temp[ts->reg] = -1;
1814 if (IS_DEAD_ARG(i)) {
1815 ts->val_type = TEMP_VAL_DEAD;
1816 } else {
1817 ts->val_type = TEMP_VAL_REG;
1818 ts->reg = reg;
1819 /* temp value is modified, so the value kept in memory is
1820 potentially not the same */
1821 ts->mem_coherent = 0;
1822 s->reg_to_temp[reg] = arg;
1825 oarg_end:
1826 new_args[i] = reg;
1830 /* emit instruction */
1831 tcg_out_op(s, opc, new_args, const_args);
1833 /* move the outputs in the correct register if needed */
1834 for(i = 0; i < nb_oargs; i++) {
1835 ts = &s->temps[args[i]];
1836 reg = new_args[i];
1837 if (ts->fixed_reg && ts->reg != reg) {
1838 tcg_out_mov(s, ts->type, ts->reg, reg);
1843 #ifdef TCG_TARGET_STACK_GROWSUP
1844 #define STACK_DIR(x) (-(x))
1845 #else
1846 #define STACK_DIR(x) (x)
1847 #endif
1849 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
1850 TCGOpcode opc, const TCGArg *args,
1851 unsigned int dead_args)
1853 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
1854 TCGArg arg, func_arg;
1855 TCGTemp *ts;
1856 tcg_target_long stack_offset, call_stack_size, func_addr;
1857 int const_func_arg, allocate_args;
1858 TCGRegSet allocated_regs;
1859 const TCGArgConstraint *arg_ct;
1861 arg = *args++;
1863 nb_oargs = arg >> 16;
1864 nb_iargs = arg & 0xffff;
1865 nb_params = nb_iargs - 1;
1867 flags = args[nb_oargs + nb_iargs];
1869 nb_regs = tcg_target_get_call_iarg_regs_count(flags);
1870 if (nb_regs > nb_params)
1871 nb_regs = nb_params;
1873 /* assign stack slots first */
1874 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
1875 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
1876 ~(TCG_TARGET_STACK_ALIGN - 1);
1877 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
1878 if (allocate_args) {
1879 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
1880 preallocate call stack */
1881 tcg_abort();
1884 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
1885 for(i = nb_regs; i < nb_params; i++) {
1886 arg = args[nb_oargs + i];
1887 #ifdef TCG_TARGET_STACK_GROWSUP
1888 stack_offset -= sizeof(tcg_target_long);
1889 #endif
1890 if (arg != TCG_CALL_DUMMY_ARG) {
1891 ts = &s->temps[arg];
1892 if (ts->val_type == TEMP_VAL_REG) {
1893 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
1894 } else if (ts->val_type == TEMP_VAL_MEM) {
1895 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1896 s->reserved_regs);
1897 /* XXX: not correct if reading values from the stack */
1898 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1899 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1900 } else if (ts->val_type == TEMP_VAL_CONST) {
1901 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1902 s->reserved_regs);
1903 /* XXX: sign extend may be needed on some targets */
1904 tcg_out_movi(s, ts->type, reg, ts->val);
1905 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1906 } else {
1907 tcg_abort();
1910 #ifndef TCG_TARGET_STACK_GROWSUP
1911 stack_offset += sizeof(tcg_target_long);
1912 #endif
1915 /* assign input registers */
1916 tcg_regset_set(allocated_regs, s->reserved_regs);
1917 for(i = 0; i < nb_regs; i++) {
1918 arg = args[nb_oargs + i];
1919 if (arg != TCG_CALL_DUMMY_ARG) {
1920 ts = &s->temps[arg];
1921 reg = tcg_target_call_iarg_regs[i];
1922 tcg_reg_free(s, reg);
1923 if (ts->val_type == TEMP_VAL_REG) {
1924 if (ts->reg != reg) {
1925 tcg_out_mov(s, ts->type, reg, ts->reg);
1927 } else if (ts->val_type == TEMP_VAL_MEM) {
1928 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1929 } else if (ts->val_type == TEMP_VAL_CONST) {
1930 /* XXX: sign extend ? */
1931 tcg_out_movi(s, ts->type, reg, ts->val);
1932 } else {
1933 tcg_abort();
1935 tcg_regset_set_reg(allocated_regs, reg);
1939 /* assign function address */
1940 func_arg = args[nb_oargs + nb_iargs - 1];
1941 arg_ct = &def->args_ct[0];
1942 ts = &s->temps[func_arg];
1943 func_addr = ts->val;
1944 const_func_arg = 0;
1945 if (ts->val_type == TEMP_VAL_MEM) {
1946 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1947 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1948 func_arg = reg;
1949 tcg_regset_set_reg(allocated_regs, reg);
1950 } else if (ts->val_type == TEMP_VAL_REG) {
1951 reg = ts->reg;
1952 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1953 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1954 tcg_out_mov(s, ts->type, reg, ts->reg);
1956 func_arg = reg;
1957 tcg_regset_set_reg(allocated_regs, reg);
1958 } else if (ts->val_type == TEMP_VAL_CONST) {
1959 if (tcg_target_const_match(func_addr, arg_ct)) {
1960 const_func_arg = 1;
1961 func_arg = func_addr;
1962 } else {
1963 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1964 tcg_out_movi(s, ts->type, reg, func_addr);
1965 func_arg = reg;
1966 tcg_regset_set_reg(allocated_regs, reg);
1968 } else {
1969 tcg_abort();
1973 /* mark dead temporaries and free the associated registers */
1974 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1975 arg = args[i];
1976 if (IS_DEAD_ARG(i)) {
1977 ts = &s->temps[arg];
1978 if (!ts->fixed_reg) {
1979 if (ts->val_type == TEMP_VAL_REG)
1980 s->reg_to_temp[ts->reg] = -1;
1981 ts->val_type = TEMP_VAL_DEAD;
1986 /* clobber call registers */
1987 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1988 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1989 tcg_reg_free(s, reg);
1993 /* store globals and free associated registers (we assume the call
1994 can modify any global. */
1995 if (!(flags & TCG_CALL_CONST)) {
1996 save_globals(s, allocated_regs);
1999 tcg_out_op(s, opc, &func_arg, &const_func_arg);
2001 /* assign output registers and emit moves if needed */
2002 for(i = 0; i < nb_oargs; i++) {
2003 arg = args[i];
2004 ts = &s->temps[arg];
2005 reg = tcg_target_call_oarg_regs[i];
2006 assert(s->reg_to_temp[reg] == -1);
2007 if (ts->fixed_reg) {
2008 if (ts->reg != reg) {
2009 tcg_out_mov(s, ts->type, ts->reg, reg);
2011 } else {
2012 if (ts->val_type == TEMP_VAL_REG)
2013 s->reg_to_temp[ts->reg] = -1;
2014 if (IS_DEAD_ARG(i)) {
2015 ts->val_type = TEMP_VAL_DEAD;
2016 } else {
2017 ts->val_type = TEMP_VAL_REG;
2018 ts->reg = reg;
2019 ts->mem_coherent = 0;
2020 s->reg_to_temp[reg] = arg;
2025 return nb_iargs + nb_oargs + def->nb_cargs + 1;
2028 #ifdef CONFIG_PROFILER
2030 static int64_t tcg_table_op_count[NB_OPS];
2032 static void dump_op_count(void)
2034 int i;
2035 FILE *f;
2036 f = fopen("/tmp/op.log", "w");
2037 for(i = INDEX_op_end; i < NB_OPS; i++) {
2038 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]);
2040 fclose(f);
2042 #endif
2045 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
2046 long search_pc)
2048 TCGOpcode opc;
2049 int op_index;
2050 const TCGOpDef *def;
2051 unsigned int dead_args;
2052 const TCGArg *args;
2054 #ifdef DEBUG_DISAS
2055 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2056 qemu_log("OP:\n");
2057 tcg_dump_ops(s);
2058 qemu_log("\n");
2060 #endif
2062 #ifdef USE_TCG_OPTIMIZATIONS
2063 gen_opparam_ptr =
2064 tcg_optimize(s, gen_opc_ptr, gen_opparam_buf, tcg_op_defs);
2065 #endif
2067 #ifdef CONFIG_PROFILER
2068 s->la_time -= profile_getclock();
2069 #endif
2070 tcg_liveness_analysis(s);
2071 #ifdef CONFIG_PROFILER
2072 s->la_time += profile_getclock();
2073 #endif
2075 #ifdef DEBUG_DISAS
2076 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2077 qemu_log("OP after liveness analysis:\n");
2078 tcg_dump_ops(s);
2079 qemu_log("\n");
2081 #endif
2083 tcg_reg_alloc_start(s);
2085 s->code_buf = gen_code_buf;
2086 s->code_ptr = gen_code_buf;
2088 args = gen_opparam_buf;
2089 op_index = 0;
2091 for(;;) {
2092 opc = gen_opc_buf[op_index];
2093 #ifdef CONFIG_PROFILER
2094 tcg_table_op_count[opc]++;
2095 #endif
2096 def = &tcg_op_defs[opc];
2097 #if 0
2098 printf("%s: %d %d %d\n", def->name,
2099 def->nb_oargs, def->nb_iargs, def->nb_cargs);
2100 // dump_regs(s);
2101 #endif
2102 switch(opc) {
2103 case INDEX_op_mov_i32:
2104 #if TCG_TARGET_REG_BITS == 64
2105 case INDEX_op_mov_i64:
2106 #endif
2107 dead_args = s->op_dead_args[op_index];
2108 tcg_reg_alloc_mov(s, def, args, dead_args);
2109 break;
2110 case INDEX_op_movi_i32:
2111 #if TCG_TARGET_REG_BITS == 64
2112 case INDEX_op_movi_i64:
2113 #endif
2114 tcg_reg_alloc_movi(s, args);
2115 break;
2116 case INDEX_op_debug_insn_start:
2117 /* debug instruction */
2118 break;
2119 case INDEX_op_nop:
2120 case INDEX_op_nop1:
2121 case INDEX_op_nop2:
2122 case INDEX_op_nop3:
2123 break;
2124 case INDEX_op_nopn:
2125 args += args[0];
2126 goto next;
2127 case INDEX_op_discard:
2129 TCGTemp *ts;
2130 ts = &s->temps[args[0]];
2131 /* mark the temporary as dead */
2132 if (!ts->fixed_reg) {
2133 if (ts->val_type == TEMP_VAL_REG)
2134 s->reg_to_temp[ts->reg] = -1;
2135 ts->val_type = TEMP_VAL_DEAD;
2138 break;
2139 case INDEX_op_set_label:
2140 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2141 tcg_out_label(s, args[0], s->code_ptr);
2142 break;
2143 case INDEX_op_call:
2144 dead_args = s->op_dead_args[op_index];
2145 args += tcg_reg_alloc_call(s, def, opc, args, dead_args);
2146 goto next;
2147 case INDEX_op_end:
2148 goto the_end;
2149 default:
2150 /* Sanity check that we've not introduced any unhandled opcodes. */
2151 if (def->flags & TCG_OPF_NOT_PRESENT) {
2152 tcg_abort();
2154 /* Note: in order to speed up the code, it would be much
2155 faster to have specialized register allocator functions for
2156 some common argument patterns */
2157 dead_args = s->op_dead_args[op_index];
2158 tcg_reg_alloc_op(s, def, opc, args, dead_args);
2159 break;
2161 args += def->nb_args;
2162 next:
2163 if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
2164 return op_index;
2166 op_index++;
2167 #ifndef NDEBUG
2168 check_regs(s);
2169 #endif
2171 the_end:
2172 return -1;
2175 int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf)
2177 #ifdef CONFIG_PROFILER
2179 int n;
2180 n = (gen_opc_ptr - gen_opc_buf);
2181 s->op_count += n;
2182 if (n > s->op_count_max)
2183 s->op_count_max = n;
2185 s->temp_count += s->nb_temps;
2186 if (s->nb_temps > s->temp_count_max)
2187 s->temp_count_max = s->nb_temps;
2189 #endif
2191 tcg_gen_code_common(s, gen_code_buf, -1);
2193 /* flush instruction cache */
2194 flush_icache_range((tcg_target_ulong)gen_code_buf,
2195 (tcg_target_ulong)s->code_ptr);
2197 return s->code_ptr - gen_code_buf;
2200 /* Return the index of the micro operation such as the pc after is <
2201 offset bytes from the start of the TB. The contents of gen_code_buf must
2202 not be changed, though writing the same values is ok.
2203 Return -1 if not found. */
2204 int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
2206 return tcg_gen_code_common(s, gen_code_buf, offset);
2209 #ifdef CONFIG_PROFILER
2210 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2212 TCGContext *s = &tcg_ctx;
2213 int64_t tot;
2215 tot = s->interm_time + s->code_time;
2216 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2217 tot, tot / 2.4e9);
2218 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2219 s->tb_count,
2220 s->tb_count1 - s->tb_count,
2221 s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0);
2222 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2223 s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max);
2224 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2225 s->tb_count ?
2226 (double)s->del_op_count / s->tb_count : 0);
2227 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2228 s->tb_count ?
2229 (double)s->temp_count / s->tb_count : 0,
2230 s->temp_count_max);
2232 cpu_fprintf(f, "cycles/op %0.1f\n",
2233 s->op_count ? (double)tot / s->op_count : 0);
2234 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2235 s->code_in_len ? (double)tot / s->code_in_len : 0);
2236 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2237 s->code_out_len ? (double)tot / s->code_out_len : 0);
2238 if (tot == 0)
2239 tot = 1;
2240 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2241 (double)s->interm_time / tot * 100.0);
2242 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2243 (double)s->code_time / tot * 100.0);
2244 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2245 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2246 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2247 s->restore_count);
2248 cpu_fprintf(f, " avg cycles %0.1f\n",
2249 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2251 dump_op_count();
2253 #else
2254 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2256 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2258 #endif
2260 #ifdef ELF_HOST_MACHINE
2261 /* In order to use this feature, the backend needs to do three things:
2263 (1) Define ELF_HOST_MACHINE to indicate both what value to
2264 put into the ELF image and to indicate support for the feature.
2266 (2) Define tcg_register_jit. This should create a buffer containing
2267 the contents of a .debug_frame section that describes the post-
2268 prologue unwind info for the tcg machine.
2270 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2273 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2274 typedef enum {
2275 JIT_NOACTION = 0,
2276 JIT_REGISTER_FN,
2277 JIT_UNREGISTER_FN
2278 } jit_actions_t;
2280 struct jit_code_entry {
2281 struct jit_code_entry *next_entry;
2282 struct jit_code_entry *prev_entry;
2283 const void *symfile_addr;
2284 uint64_t symfile_size;
2287 struct jit_descriptor {
2288 uint32_t version;
2289 uint32_t action_flag;
2290 struct jit_code_entry *relevant_entry;
2291 struct jit_code_entry *first_entry;
2294 void __jit_debug_register_code(void) __attribute__((noinline));
2295 void __jit_debug_register_code(void)
2297 asm("");
2300 /* Must statically initialize the version, because GDB may check
2301 the version before we can set it. */
2302 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2304 /* End GDB interface. */
2306 static int find_string(const char *strtab, const char *str)
2308 const char *p = strtab + 1;
2310 while (1) {
2311 if (strcmp(p, str) == 0) {
2312 return p - strtab;
2314 p += strlen(p) + 1;
2318 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2319 void *debug_frame, size_t debug_frame_size)
2321 struct __attribute__((packed)) DebugInfo {
2322 uint32_t len;
2323 uint16_t version;
2324 uint32_t abbrev;
2325 uint8_t ptr_size;
2326 uint8_t cu_die;
2327 uint16_t cu_lang;
2328 uintptr_t cu_low_pc;
2329 uintptr_t cu_high_pc;
2330 uint8_t fn_die;
2331 char fn_name[16];
2332 uintptr_t fn_low_pc;
2333 uintptr_t fn_high_pc;
2334 uint8_t cu_eoc;
2337 struct ElfImage {
2338 ElfW(Ehdr) ehdr;
2339 ElfW(Phdr) phdr;
2340 ElfW(Shdr) shdr[7];
2341 ElfW(Sym) sym[2];
2342 struct DebugInfo di;
2343 uint8_t da[24];
2344 char str[80];
2347 struct ElfImage *img;
2349 static const struct ElfImage img_template = {
2350 .ehdr = {
2351 .e_ident[EI_MAG0] = ELFMAG0,
2352 .e_ident[EI_MAG1] = ELFMAG1,
2353 .e_ident[EI_MAG2] = ELFMAG2,
2354 .e_ident[EI_MAG3] = ELFMAG3,
2355 .e_ident[EI_CLASS] = ELF_CLASS,
2356 .e_ident[EI_DATA] = ELF_DATA,
2357 .e_ident[EI_VERSION] = EV_CURRENT,
2358 .e_type = ET_EXEC,
2359 .e_machine = ELF_HOST_MACHINE,
2360 .e_version = EV_CURRENT,
2361 .e_phoff = offsetof(struct ElfImage, phdr),
2362 .e_shoff = offsetof(struct ElfImage, shdr),
2363 .e_ehsize = sizeof(ElfW(Shdr)),
2364 .e_phentsize = sizeof(ElfW(Phdr)),
2365 .e_phnum = 1,
2366 .e_shentsize = sizeof(ElfW(Shdr)),
2367 .e_shnum = ARRAY_SIZE(img->shdr),
2368 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2369 #ifdef ELF_HOST_FLAGS
2370 .e_flags = ELF_HOST_FLAGS,
2371 #endif
2372 #ifdef ELF_OSABI
2373 .e_ident[EI_OSABI] = ELF_OSABI,
2374 #endif
2376 .phdr = {
2377 .p_type = PT_LOAD,
2378 .p_flags = PF_X,
2380 .shdr = {
2381 [0] = { .sh_type = SHT_NULL },
2382 /* Trick: The contents of code_gen_buffer are not present in
2383 this fake ELF file; that got allocated elsewhere. Therefore
2384 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2385 will not look for contents. We can record any address. */
2386 [1] = { /* .text */
2387 .sh_type = SHT_NOBITS,
2388 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2390 [2] = { /* .debug_info */
2391 .sh_type = SHT_PROGBITS,
2392 .sh_offset = offsetof(struct ElfImage, di),
2393 .sh_size = sizeof(struct DebugInfo),
2395 [3] = { /* .debug_abbrev */
2396 .sh_type = SHT_PROGBITS,
2397 .sh_offset = offsetof(struct ElfImage, da),
2398 .sh_size = sizeof(img->da),
2400 [4] = { /* .debug_frame */
2401 .sh_type = SHT_PROGBITS,
2402 .sh_offset = sizeof(struct ElfImage),
2404 [5] = { /* .symtab */
2405 .sh_type = SHT_SYMTAB,
2406 .sh_offset = offsetof(struct ElfImage, sym),
2407 .sh_size = sizeof(img->sym),
2408 .sh_info = 1,
2409 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2410 .sh_entsize = sizeof(ElfW(Sym)),
2412 [6] = { /* .strtab */
2413 .sh_type = SHT_STRTAB,
2414 .sh_offset = offsetof(struct ElfImage, str),
2415 .sh_size = sizeof(img->str),
2418 .sym = {
2419 [1] = { /* code_gen_buffer */
2420 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2421 .st_shndx = 1,
2424 .di = {
2425 .len = sizeof(struct DebugInfo) - 4,
2426 .version = 2,
2427 .ptr_size = sizeof(void *),
2428 .cu_die = 1,
2429 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2430 .fn_die = 2,
2431 .fn_name = "code_gen_buffer"
2433 .da = {
2434 1, /* abbrev number (the cu) */
2435 0x11, 1, /* DW_TAG_compile_unit, has children */
2436 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2437 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2438 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2439 0, 0, /* end of abbrev */
2440 2, /* abbrev number (the fn) */
2441 0x2e, 0, /* DW_TAG_subprogram, no children */
2442 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2443 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2444 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2445 0, 0, /* end of abbrev */
2446 0 /* no more abbrev */
2448 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2449 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2452 /* We only need a single jit entry; statically allocate it. */
2453 static struct jit_code_entry one_entry;
2455 uintptr_t buf = (uintptr_t)buf_ptr;
2456 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2458 img = g_malloc(img_size);
2459 *img = img_template;
2460 memcpy(img + 1, debug_frame, debug_frame_size);
2462 img->phdr.p_vaddr = buf;
2463 img->phdr.p_paddr = buf;
2464 img->phdr.p_memsz = buf_size;
2466 img->shdr[1].sh_name = find_string(img->str, ".text");
2467 img->shdr[1].sh_addr = buf;
2468 img->shdr[1].sh_size = buf_size;
2470 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2471 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2473 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2474 img->shdr[4].sh_size = debug_frame_size;
2476 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2477 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2479 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2480 img->sym[1].st_value = buf;
2481 img->sym[1].st_size = buf_size;
2483 img->di.cu_low_pc = buf;
2484 img->di.cu_high_pc = buf_size;
2485 img->di.fn_low_pc = buf;
2486 img->di.fn_high_pc = buf_size;
2488 #ifdef DEBUG_JIT
2489 /* Enable this block to be able to debug the ELF image file creation.
2490 One can use readelf, objdump, or other inspection utilities. */
2492 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2493 if (f) {
2494 if (fwrite(img, img_size, 1, f) != img_size) {
2495 /* Avoid stupid unused return value warning for fwrite. */
2497 fclose(f);
2500 #endif
2502 one_entry.symfile_addr = img;
2503 one_entry.symfile_size = img_size;
2505 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2506 __jit_debug_descriptor.relevant_entry = &one_entry;
2507 __jit_debug_descriptor.first_entry = &one_entry;
2508 __jit_debug_register_code();
2510 #else
2511 /* No support for the feature. Provide the entry point expected by exec.c,
2512 and implement the internal function we declared earlier. */
2514 static void tcg_register_jit_int(void *buf, size_t size,
2515 void *debug_frame, size_t debug_frame_size)
2519 void tcg_register_jit(void *buf, size_t buf_size)
2522 #endif /* ELF_HOST_MACHINE */