2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
25 #include "translate.h"
26 #define QEMU_GENERATE /* Used internally by macros.h */
28 #include "mmvec/macros.h"
31 #include "gen_tcg_hvx.h"
34 TCGv
gen_read_reg(TCGv result
, int num
)
36 tcg_gen_mov_tl(result
, hex_gpr
[num
]);
40 TCGv
gen_read_preg(TCGv pred
, uint8_t num
)
42 tcg_gen_mov_tl(pred
, hex_pred
[num
]);
46 #define IMMUTABLE (~0)
48 static const target_ulong reg_immut_masks
[TOTAL_PER_THREAD_REGS
] = {
49 [HEX_REG_USR
] = 0xc13000c0,
50 [HEX_REG_PC
] = IMMUTABLE
,
52 [HEX_REG_UPCYCLELO
] = IMMUTABLE
,
53 [HEX_REG_UPCYCLEHI
] = IMMUTABLE
,
54 [HEX_REG_UTIMERLO
] = IMMUTABLE
,
55 [HEX_REG_UTIMERHI
] = IMMUTABLE
,
58 static inline void gen_masked_reg_write(TCGv new_val
, TCGv cur_val
,
59 target_ulong reg_mask
)
62 TCGv tmp
= tcg_temp_new();
64 /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */
65 tcg_gen_andi_tl(new_val
, new_val
, ~reg_mask
);
66 tcg_gen_andi_tl(tmp
, cur_val
, reg_mask
);
67 tcg_gen_or_tl(new_val
, new_val
, tmp
);
71 static TCGv
get_result_gpr(DisasContext
*ctx
, int rnum
)
73 return hex_new_value
[rnum
];
76 static TCGv_i64
get_result_gpr_pair(DisasContext
*ctx
, int rnum
)
78 TCGv_i64 result
= tcg_temp_new_i64();
79 tcg_gen_concat_i32_i64(result
, hex_new_value
[rnum
],
80 hex_new_value
[rnum
+ 1]);
84 void gen_log_reg_write(int rnum
, TCGv val
)
86 const target_ulong reg_mask
= reg_immut_masks
[rnum
];
88 gen_masked_reg_write(val
, hex_gpr
[rnum
], reg_mask
);
89 tcg_gen_mov_tl(hex_new_value
[rnum
], val
);
91 /* Do this so HELPER(debug_commit_end) will know */
92 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
96 static void gen_log_reg_write_pair(int rnum
, TCGv_i64 val
)
98 const target_ulong reg_mask_low
= reg_immut_masks
[rnum
];
99 const target_ulong reg_mask_high
= reg_immut_masks
[rnum
+ 1];
100 TCGv val32
= tcg_temp_new();
103 tcg_gen_extrl_i64_i32(val32
, val
);
104 gen_masked_reg_write(val32
, hex_gpr
[rnum
], reg_mask_low
);
105 tcg_gen_mov_tl(hex_new_value
[rnum
], val32
);
107 /* Do this so HELPER(debug_commit_end) will know */
108 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
112 tcg_gen_extrh_i64_i32(val32
, val
);
113 gen_masked_reg_write(val32
, hex_gpr
[rnum
+ 1], reg_mask_high
);
114 tcg_gen_mov_tl(hex_new_value
[rnum
+ 1], val32
);
116 /* Do this so HELPER(debug_commit_end) will know */
117 tcg_gen_movi_tl(hex_reg_written
[rnum
+ 1], 1);
121 void gen_log_pred_write(DisasContext
*ctx
, int pnum
, TCGv val
)
123 TCGv base_val
= tcg_temp_new();
125 tcg_gen_andi_tl(base_val
, val
, 0xff);
128 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
130 * Multiple writes to the same preg are and'ed together
131 * If this is the first predicate write in the packet, do a
132 * straight assignment. Otherwise, do an and.
134 if (!test_bit(pnum
, ctx
->pregs_written
)) {
135 tcg_gen_mov_tl(hex_new_pred_value
[pnum
], base_val
);
137 tcg_gen_and_tl(hex_new_pred_value
[pnum
],
138 hex_new_pred_value
[pnum
], base_val
);
140 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
, 1 << pnum
);
141 set_bit(pnum
, ctx
->pregs_written
);
144 static inline void gen_read_p3_0(TCGv control_reg
)
146 tcg_gen_movi_tl(control_reg
, 0);
147 for (int i
= 0; i
< NUM_PREGS
; i
++) {
148 tcg_gen_deposit_tl(control_reg
, control_reg
, hex_pred
[i
], i
* 8, 8);
153 * Certain control registers require special handling on read
154 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
155 * -> concat the 4 predicate registers together
156 * HEX_REG_PC actual value stored in DisasContext
157 * -> assign from ctx->base.pc_next
158 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
159 * -> add current TB changes to existing reg value
161 static inline void gen_read_ctrl_reg(DisasContext
*ctx
, const int reg_num
,
164 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
166 } else if (reg_num
== HEX_REG_PC
) {
167 tcg_gen_movi_tl(dest
, ctx
->base
.pc_next
);
168 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
169 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
171 } else if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
172 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
174 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
175 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
178 tcg_gen_mov_tl(dest
, hex_gpr
[reg_num
]);
182 static inline void gen_read_ctrl_reg_pair(DisasContext
*ctx
, const int reg_num
,
185 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
186 TCGv p3_0
= tcg_temp_new();
188 tcg_gen_concat_i32_i64(dest
, p3_0
, hex_gpr
[reg_num
+ 1]);
189 } else if (reg_num
== HEX_REG_PC
- 1) {
190 TCGv pc
= tcg_constant_tl(ctx
->base
.pc_next
);
191 tcg_gen_concat_i32_i64(dest
, hex_gpr
[reg_num
], pc
);
192 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
193 TCGv pkt_cnt
= tcg_temp_new();
194 TCGv insn_cnt
= tcg_temp_new();
195 tcg_gen_addi_tl(pkt_cnt
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
197 tcg_gen_addi_tl(insn_cnt
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
199 tcg_gen_concat_i32_i64(dest
, pkt_cnt
, insn_cnt
);
200 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
201 TCGv hvx_cnt
= tcg_temp_new();
202 tcg_gen_addi_tl(hvx_cnt
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
204 tcg_gen_concat_i32_i64(dest
, hvx_cnt
, hex_gpr
[reg_num
+ 1]);
206 tcg_gen_concat_i32_i64(dest
,
208 hex_gpr
[reg_num
+ 1]);
212 static void gen_write_p3_0(DisasContext
*ctx
, TCGv control_reg
)
214 TCGv hex_p8
= tcg_temp_new();
215 for (int i
= 0; i
< NUM_PREGS
; i
++) {
216 tcg_gen_extract_tl(hex_p8
, control_reg
, i
* 8, 8);
217 gen_log_pred_write(ctx
, i
, hex_p8
);
222 * Certain control registers require special handling on write
223 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
224 * -> break the value across 4 predicate registers
225 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
226 * -> clear the changes
228 static inline void gen_write_ctrl_reg(DisasContext
*ctx
, int reg_num
,
231 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
232 gen_write_p3_0(ctx
, val
);
234 gen_log_reg_write(reg_num
, val
);
235 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
236 ctx
->num_packets
= 0;
238 if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
241 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
242 ctx
->num_hvx_insns
= 0;
247 static inline void gen_write_ctrl_reg_pair(DisasContext
*ctx
, int reg_num
,
250 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
251 TCGv result
= get_result_gpr(ctx
, reg_num
+ 1);
252 TCGv val32
= tcg_temp_new();
253 tcg_gen_extrl_i64_i32(val32
, val
);
254 gen_write_p3_0(ctx
, val32
);
255 tcg_gen_extrh_i64_i32(val32
, val
);
256 tcg_gen_mov_tl(result
, val32
);
258 gen_log_reg_write_pair(reg_num
, val
);
259 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
260 ctx
->num_packets
= 0;
263 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
264 ctx
->num_hvx_insns
= 0;
269 TCGv
gen_get_byte(TCGv result
, int N
, TCGv src
, bool sign
)
272 tcg_gen_sextract_tl(result
, src
, N
* 8, 8);
274 tcg_gen_extract_tl(result
, src
, N
* 8, 8);
279 TCGv
gen_get_byte_i64(TCGv result
, int N
, TCGv_i64 src
, bool sign
)
281 TCGv_i64 res64
= tcg_temp_new_i64();
283 tcg_gen_sextract_i64(res64
, src
, N
* 8, 8);
285 tcg_gen_extract_i64(res64
, src
, N
* 8, 8);
287 tcg_gen_extrl_i64_i32(result
, res64
);
292 TCGv
gen_get_half(TCGv result
, int N
, TCGv src
, bool sign
)
295 tcg_gen_sextract_tl(result
, src
, N
* 16, 16);
297 tcg_gen_extract_tl(result
, src
, N
* 16, 16);
302 void gen_set_half(int N
, TCGv result
, TCGv src
)
304 tcg_gen_deposit_tl(result
, result
, src
, N
* 16, 16);
307 void gen_set_half_i64(int N
, TCGv_i64 result
, TCGv src
)
309 TCGv_i64 src64
= tcg_temp_new_i64();
310 tcg_gen_extu_i32_i64(src64
, src
);
311 tcg_gen_deposit_i64(result
, result
, src64
, N
* 16, 16);
314 void gen_set_byte_i64(int N
, TCGv_i64 result
, TCGv src
)
316 TCGv_i64 src64
= tcg_temp_new_i64();
317 tcg_gen_extu_i32_i64(src64
, src
);
318 tcg_gen_deposit_i64(result
, result
, src64
, N
* 8, 8);
321 static inline void gen_load_locked4u(TCGv dest
, TCGv vaddr
, int mem_index
)
323 tcg_gen_qemu_ld32u(dest
, vaddr
, mem_index
);
324 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
325 tcg_gen_mov_tl(hex_llsc_val
, dest
);
328 static inline void gen_load_locked8u(TCGv_i64 dest
, TCGv vaddr
, int mem_index
)
330 tcg_gen_qemu_ld64(dest
, vaddr
, mem_index
);
331 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
332 tcg_gen_mov_i64(hex_llsc_val_i64
, dest
);
335 static inline void gen_store_conditional4(DisasContext
*ctx
,
336 TCGv pred
, TCGv vaddr
, TCGv src
)
338 TCGLabel
*fail
= gen_new_label();
339 TCGLabel
*done
= gen_new_label();
342 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
344 one
= tcg_constant_tl(0xff);
345 zero
= tcg_constant_tl(0);
346 tmp
= tcg_temp_new();
347 tcg_gen_atomic_cmpxchg_tl(tmp
, hex_llsc_addr
, hex_llsc_val
, src
,
348 ctx
->mem_idx
, MO_32
);
349 tcg_gen_movcond_tl(TCG_COND_EQ
, pred
, tmp
, hex_llsc_val
,
354 tcg_gen_movi_tl(pred
, 0);
357 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
360 static inline void gen_store_conditional8(DisasContext
*ctx
,
361 TCGv pred
, TCGv vaddr
, TCGv_i64 src
)
363 TCGLabel
*fail
= gen_new_label();
364 TCGLabel
*done
= gen_new_label();
365 TCGv_i64 one
, zero
, tmp
;
367 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
369 one
= tcg_constant_i64(0xff);
370 zero
= tcg_constant_i64(0);
371 tmp
= tcg_temp_new_i64();
372 tcg_gen_atomic_cmpxchg_i64(tmp
, hex_llsc_addr
, hex_llsc_val_i64
, src
,
373 ctx
->mem_idx
, MO_64
);
374 tcg_gen_movcond_i64(TCG_COND_EQ
, tmp
, tmp
, hex_llsc_val_i64
,
376 tcg_gen_extrl_i64_i32(pred
, tmp
);
380 tcg_gen_movi_tl(pred
, 0);
383 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
386 void gen_store32(TCGv vaddr
, TCGv src
, int width
, uint32_t slot
)
388 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
389 tcg_gen_movi_tl(hex_store_width
[slot
], width
);
390 tcg_gen_mov_tl(hex_store_val32
[slot
], src
);
393 void gen_store1(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
395 gen_store32(vaddr
, src
, 1, slot
);
398 void gen_store1i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
400 TCGv tmp
= tcg_constant_tl(src
);
401 gen_store1(cpu_env
, vaddr
, tmp
, slot
);
404 void gen_store2(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
406 gen_store32(vaddr
, src
, 2, slot
);
409 void gen_store2i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
411 TCGv tmp
= tcg_constant_tl(src
);
412 gen_store2(cpu_env
, vaddr
, tmp
, slot
);
415 void gen_store4(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
417 gen_store32(vaddr
, src
, 4, slot
);
420 void gen_store4i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
422 TCGv tmp
= tcg_constant_tl(src
);
423 gen_store4(cpu_env
, vaddr
, tmp
, slot
);
426 void gen_store8(TCGv_env cpu_env
, TCGv vaddr
, TCGv_i64 src
, uint32_t slot
)
428 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
429 tcg_gen_movi_tl(hex_store_width
[slot
], 8);
430 tcg_gen_mov_i64(hex_store_val64
[slot
], src
);
433 void gen_store8i(TCGv_env cpu_env
, TCGv vaddr
, int64_t src
, uint32_t slot
)
435 TCGv_i64 tmp
= tcg_constant_i64(src
);
436 gen_store8(cpu_env
, vaddr
, tmp
, slot
);
439 TCGv
gen_8bitsof(TCGv result
, TCGv value
)
441 TCGv zero
= tcg_constant_tl(0);
442 TCGv ones
= tcg_constant_tl(0xff);
443 tcg_gen_movcond_tl(TCG_COND_NE
, result
, value
, zero
, ones
, zero
);
448 static void gen_write_new_pc_addr(DisasContext
*ctx
, TCGv addr
,
449 TCGCond cond
, TCGv pred
)
451 TCGLabel
*pred_false
= NULL
;
452 if (cond
!= TCG_COND_ALWAYS
) {
453 pred_false
= gen_new_label();
454 tcg_gen_brcondi_tl(cond
, pred
, 0, pred_false
);
457 if (ctx
->pkt
->pkt_has_multi_cof
) {
458 /* If there are multiple branches in a packet, ignore the second one */
459 tcg_gen_movcond_tl(TCG_COND_NE
, hex_gpr
[HEX_REG_PC
],
460 hex_branch_taken
, tcg_constant_tl(0),
461 hex_gpr
[HEX_REG_PC
], addr
);
462 tcg_gen_movi_tl(hex_branch_taken
, 1);
464 tcg_gen_mov_tl(hex_gpr
[HEX_REG_PC
], addr
);
467 if (cond
!= TCG_COND_ALWAYS
) {
468 gen_set_label(pred_false
);
472 static void gen_write_new_pc_pcrel(DisasContext
*ctx
, int pc_off
,
473 TCGCond cond
, TCGv pred
)
475 target_ulong dest
= ctx
->pkt
->pc
+ pc_off
;
476 if (ctx
->pkt
->pkt_has_multi_cof
) {
477 gen_write_new_pc_addr(ctx
, tcg_constant_tl(dest
), cond
, pred
);
479 /* Defer this jump to the end of the TB */
480 ctx
->branch_cond
= TCG_COND_ALWAYS
;
482 ctx
->branch_cond
= cond
;
483 tcg_gen_mov_tl(hex_branch_taken
, pred
);
485 ctx
->branch_dest
= dest
;
489 void gen_set_usr_field(DisasContext
*ctx
, int field
, TCGv val
)
491 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
492 tcg_gen_deposit_tl(usr
, usr
, val
,
493 reg_field_info
[field
].offset
,
494 reg_field_info
[field
].width
);
497 void gen_set_usr_fieldi(DisasContext
*ctx
, int field
, int x
)
499 if (reg_field_info
[field
].width
== 1) {
500 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
501 target_ulong bit
= 1 << reg_field_info
[field
].offset
;
503 tcg_gen_ori_tl(usr
, usr
, bit
);
505 tcg_gen_andi_tl(usr
, usr
, ~bit
);
508 TCGv val
= tcg_constant_tl(x
);
509 gen_set_usr_field(ctx
, field
, val
);
513 static void gen_compare(TCGCond cond
, TCGv res
, TCGv arg1
, TCGv arg2
)
515 TCGv one
= tcg_constant_tl(0xff);
516 TCGv zero
= tcg_constant_tl(0);
518 tcg_gen_movcond_tl(cond
, res
, arg1
, arg2
, one
, zero
);
521 static void gen_cond_jumpr(DisasContext
*ctx
, TCGv dst_pc
,
522 TCGCond cond
, TCGv pred
)
524 gen_write_new_pc_addr(ctx
, dst_pc
, cond
, pred
);
527 static void gen_cond_jumpr31(DisasContext
*ctx
, TCGCond cond
, TCGv pred
)
529 TCGv LSB
= tcg_temp_new();
530 tcg_gen_andi_tl(LSB
, pred
, 1);
531 gen_cond_jumpr(ctx
, hex_gpr
[HEX_REG_LR
], cond
, LSB
);
534 static void gen_cond_jump(DisasContext
*ctx
, TCGCond cond
, TCGv pred
,
537 gen_write_new_pc_pcrel(ctx
, pc_off
, cond
, pred
);
540 static void gen_cmpnd_cmp_jmp(DisasContext
*ctx
,
541 int pnum
, TCGCond cond1
, TCGv arg1
, TCGv arg2
,
542 TCGCond cond2
, int pc_off
)
544 if (ctx
->insn
->part1
) {
545 TCGv pred
= tcg_temp_new();
546 gen_compare(cond1
, pred
, arg1
, arg2
);
547 gen_log_pred_write(ctx
, pnum
, pred
);
549 TCGv pred
= tcg_temp_new();
550 tcg_gen_mov_tl(pred
, hex_new_pred_value
[pnum
]);
551 gen_cond_jump(ctx
, cond2
, pred
, pc_off
);
555 static void gen_cmpnd_cmp_jmp_t(DisasContext
*ctx
,
556 int pnum
, TCGCond cond
, TCGv arg1
, TCGv arg2
,
559 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, arg2
, TCG_COND_EQ
, pc_off
);
562 static void gen_cmpnd_cmp_jmp_f(DisasContext
*ctx
,
563 int pnum
, TCGCond cond
, TCGv arg1
, TCGv arg2
,
566 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, arg2
, TCG_COND_NE
, pc_off
);
569 static void gen_cmpnd_cmpi_jmp_t(DisasContext
*ctx
,
570 int pnum
, TCGCond cond
, TCGv arg1
, int arg2
,
573 TCGv tmp
= tcg_constant_tl(arg2
);
574 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, tmp
, TCG_COND_EQ
, pc_off
);
577 static void gen_cmpnd_cmpi_jmp_f(DisasContext
*ctx
,
578 int pnum
, TCGCond cond
, TCGv arg1
, int arg2
,
581 TCGv tmp
= tcg_constant_tl(arg2
);
582 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, tmp
, TCG_COND_NE
, pc_off
);
585 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext
*ctx
, int pnum
, TCGCond cond
,
586 TCGv arg
, int pc_off
)
588 gen_cmpnd_cmpi_jmp_t(ctx
, pnum
, cond
, arg
, -1, pc_off
);
591 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext
*ctx
, int pnum
, TCGCond cond
,
592 TCGv arg
, int pc_off
)
594 gen_cmpnd_cmpi_jmp_f(ctx
, pnum
, cond
, arg
, -1, pc_off
);
597 static void gen_cmpnd_tstbit0_jmp(DisasContext
*ctx
,
598 int pnum
, TCGv arg
, TCGCond cond
, int pc_off
)
600 if (ctx
->insn
->part1
) {
601 TCGv pred
= tcg_temp_new();
602 tcg_gen_andi_tl(pred
, arg
, 1);
603 gen_8bitsof(pred
, pred
);
604 gen_log_pred_write(ctx
, pnum
, pred
);
606 TCGv pred
= tcg_temp_new();
607 tcg_gen_mov_tl(pred
, hex_new_pred_value
[pnum
]);
608 gen_cond_jump(ctx
, cond
, pred
, pc_off
);
612 static void gen_testbit0_jumpnv(DisasContext
*ctx
,
613 TCGv arg
, TCGCond cond
, int pc_off
)
615 TCGv pred
= tcg_temp_new();
616 tcg_gen_andi_tl(pred
, arg
, 1);
617 gen_cond_jump(ctx
, cond
, pred
, pc_off
);
620 static void gen_jump(DisasContext
*ctx
, int pc_off
)
622 gen_write_new_pc_pcrel(ctx
, pc_off
, TCG_COND_ALWAYS
, NULL
);
625 static void gen_jumpr(DisasContext
*ctx
, TCGv new_pc
)
627 gen_write_new_pc_addr(ctx
, new_pc
, TCG_COND_ALWAYS
, NULL
);
630 static void gen_call(DisasContext
*ctx
, int pc_off
)
632 TCGv lr
= get_result_gpr(ctx
, HEX_REG_LR
);
633 tcg_gen_movi_tl(lr
, ctx
->next_PC
);
634 gen_write_new_pc_pcrel(ctx
, pc_off
, TCG_COND_ALWAYS
, NULL
);
637 static void gen_callr(DisasContext
*ctx
, TCGv new_pc
)
639 TCGv lr
= get_result_gpr(ctx
, HEX_REG_LR
);
640 tcg_gen_movi_tl(lr
, ctx
->next_PC
);
641 gen_write_new_pc_addr(ctx
, new_pc
, TCG_COND_ALWAYS
, NULL
);
644 static void gen_cond_call(DisasContext
*ctx
, TCGv pred
,
645 TCGCond cond
, int pc_off
)
647 TCGv lr
= get_result_gpr(ctx
, HEX_REG_LR
);
648 TCGv lsb
= tcg_temp_new();
649 TCGLabel
*skip
= gen_new_label();
650 tcg_gen_andi_tl(lsb
, pred
, 1);
651 gen_write_new_pc_pcrel(ctx
, pc_off
, cond
, lsb
);
652 tcg_gen_brcondi_tl(cond
, lsb
, 0, skip
);
653 tcg_gen_movi_tl(lr
, ctx
->next_PC
);
657 static void gen_cond_callr(DisasContext
*ctx
,
658 TCGCond cond
, TCGv pred
, TCGv new_pc
)
660 TCGv lsb
= tcg_temp_new();
661 TCGLabel
*skip
= gen_new_label();
662 tcg_gen_andi_tl(lsb
, pred
, 1);
663 tcg_gen_brcondi_tl(cond
, lsb
, 0, skip
);
664 gen_callr(ctx
, new_pc
);
668 /* frame ^= (int64_t)FRAMEKEY << 32 */
669 static void gen_frame_unscramble(TCGv_i64 frame
)
671 TCGv_i64 framekey
= tcg_temp_new_i64();
672 tcg_gen_extu_i32_i64(framekey
, hex_gpr
[HEX_REG_FRAMEKEY
]);
673 tcg_gen_shli_i64(framekey
, framekey
, 32);
674 tcg_gen_xor_i64(frame
, frame
, framekey
);
677 static void gen_load_frame(DisasContext
*ctx
, TCGv_i64 frame
, TCGv EA
)
679 Insn
*insn
= ctx
->insn
; /* Needed for CHECK_NOSHUF */
681 tcg_gen_qemu_ld64(frame
, EA
, ctx
->mem_idx
);
684 static void gen_return(DisasContext
*ctx
, TCGv_i64 dst
, TCGv src
)
688 * dst = frame_unscramble(frame)
692 TCGv_i64 frame
= tcg_temp_new_i64();
693 TCGv r31
= tcg_temp_new();
694 TCGv r29
= get_result_gpr(ctx
, HEX_REG_SP
);
696 gen_load_frame(ctx
, frame
, src
);
697 gen_frame_unscramble(frame
);
698 tcg_gen_mov_i64(dst
, frame
);
699 tcg_gen_addi_tl(r29
, src
, 8);
700 tcg_gen_extrh_i64_i32(r31
, dst
);
704 /* if (pred) dst = dealloc_return(src):raw */
705 static void gen_cond_return(DisasContext
*ctx
, TCGv_i64 dst
, TCGv src
,
706 TCGv pred
, TCGCond cond
)
708 TCGv LSB
= tcg_temp_new();
709 TCGLabel
*skip
= gen_new_label();
710 tcg_gen_andi_tl(LSB
, pred
, 1);
712 tcg_gen_brcondi_tl(cond
, LSB
, 0, skip
);
713 gen_return(ctx
, dst
, src
);
717 /* sub-instruction version (no RddV, so handle it manually) */
718 static void gen_cond_return_subinsn(DisasContext
*ctx
, TCGCond cond
, TCGv pred
)
720 TCGv_i64 RddV
= get_result_gpr_pair(ctx
, HEX_REG_FP
);
721 gen_cond_return(ctx
, RddV
, hex_gpr
[HEX_REG_FP
], pred
, cond
);
722 gen_log_reg_write_pair(HEX_REG_FP
, RddV
);
725 static void gen_endloop0(DisasContext
*ctx
)
727 TCGv lpcfg
= tcg_temp_new();
729 GET_USR_FIELD(USR_LPCFG
, lpcfg
);
733 * hex_new_pred_value[3] = 0xff;
734 * hex_pred_written |= 1 << 3;
737 TCGLabel
*label1
= gen_new_label();
738 tcg_gen_brcondi_tl(TCG_COND_NE
, lpcfg
, 1, label1
);
740 tcg_gen_movi_tl(hex_new_pred_value
[3], 0xff);
741 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
, 1 << 3);
743 gen_set_label(label1
);
747 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
750 TCGLabel
*label2
= gen_new_label();
751 tcg_gen_brcondi_tl(TCG_COND_EQ
, lpcfg
, 0, label2
);
753 tcg_gen_subi_tl(lpcfg
, lpcfg
, 1);
754 gen_set_usr_field(ctx
, USR_LPCFG
, lpcfg
);
756 gen_set_label(label2
);
759 * If we're in a tight loop, we'll do this at the end of the TB to take
760 * advantage of direct block chaining.
762 if (!ctx
->is_tight_loop
) {
764 * if (hex_gpr[HEX_REG_LC0] > 1) {
765 * PC = hex_gpr[HEX_REG_SA0];
766 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
769 TCGLabel
*label3
= gen_new_label();
770 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC0
], 1, label3
);
772 TCGv lc0
= get_result_gpr(ctx
, HEX_REG_LC0
);
773 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA0
]);
774 tcg_gen_subi_tl(lc0
, hex_gpr
[HEX_REG_LC0
], 1);
776 gen_set_label(label3
);
780 static void gen_endloop1(DisasContext
*ctx
)
783 * if (hex_gpr[HEX_REG_LC1] > 1) {
784 * PC = hex_gpr[HEX_REG_SA1];
785 * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1;
788 TCGLabel
*label
= gen_new_label();
789 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC1
], 1, label
);
791 TCGv lc1
= get_result_gpr(ctx
, HEX_REG_LC1
);
792 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA1
]);
793 tcg_gen_subi_tl(lc1
, hex_gpr
[HEX_REG_LC1
], 1);
795 gen_set_label(label
);
798 static void gen_endloop01(DisasContext
*ctx
)
800 TCGv lpcfg
= tcg_temp_new();
801 TCGLabel
*label1
= gen_new_label();
802 TCGLabel
*label2
= gen_new_label();
803 TCGLabel
*label3
= gen_new_label();
804 TCGLabel
*done
= gen_new_label();
806 GET_USR_FIELD(USR_LPCFG
, lpcfg
);
810 * hex_new_pred_value[3] = 0xff;
811 * hex_pred_written |= 1 << 3;
814 tcg_gen_brcondi_tl(TCG_COND_NE
, lpcfg
, 1, label1
);
816 tcg_gen_movi_tl(hex_new_pred_value
[3], 0xff);
817 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
, 1 << 3);
819 gen_set_label(label1
);
823 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
826 tcg_gen_brcondi_tl(TCG_COND_EQ
, lpcfg
, 0, label2
);
828 tcg_gen_subi_tl(lpcfg
, lpcfg
, 1);
829 gen_set_usr_field(ctx
, USR_LPCFG
, lpcfg
);
831 gen_set_label(label2
);
834 * if (hex_gpr[HEX_REG_LC0] > 1) {
835 * PC = hex_gpr[HEX_REG_SA0];
836 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
838 * if (hex_gpr[HEX_REG_LC1] > 1) {
839 * hex_next_pc = hex_gpr[HEX_REG_SA1];
840 * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1;
844 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC0
], 1, label3
);
846 TCGv lc0
= get_result_gpr(ctx
, HEX_REG_LC0
);
847 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA0
]);
848 tcg_gen_subi_tl(lc0
, hex_gpr
[HEX_REG_LC0
], 1);
851 gen_set_label(label3
);
852 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC1
], 1, done
);
854 TCGv lc1
= get_result_gpr(ctx
, HEX_REG_LC1
);
855 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA1
]);
856 tcg_gen_subi_tl(lc1
, hex_gpr
[HEX_REG_LC1
], 1);
861 static void gen_cmp_jumpnv(DisasContext
*ctx
,
862 TCGCond cond
, TCGv val
, TCGv src
, int pc_off
)
864 TCGv pred
= tcg_temp_new();
865 tcg_gen_setcond_tl(cond
, pred
, val
, src
);
866 gen_cond_jump(ctx
, TCG_COND_EQ
, pred
, pc_off
);
869 static void gen_cmpi_jumpnv(DisasContext
*ctx
,
870 TCGCond cond
, TCGv val
, int src
, int pc_off
)
872 TCGv pred
= tcg_temp_new();
873 tcg_gen_setcondi_tl(cond
, pred
, val
, src
);
874 gen_cond_jump(ctx
, TCG_COND_EQ
, pred
, pc_off
);
877 /* Shift left with saturation */
878 static void gen_shl_sat(DisasContext
*ctx
, TCGv dst
, TCGv src
, TCGv shift_amt
)
880 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
881 TCGv sh32
= tcg_temp_new();
882 TCGv dst_sar
= tcg_temp_new();
883 TCGv ovf
= tcg_temp_new();
884 TCGv satval
= tcg_temp_new();
885 TCGv min
= tcg_constant_tl(0x80000000);
886 TCGv max
= tcg_constant_tl(0x7fffffff);
889 * Possible values for shift_amt are 0 .. 64
890 * We need special handling for values above 31
893 * dst = sh32 == shift ? src : 0;
895 * dst_sar = dst >> sh32;
896 * satval = src < 0 ? min : max;
897 * if (dst_asr != src) {
903 tcg_gen_andi_tl(sh32
, shift_amt
, 31);
904 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, sh32
, shift_amt
,
905 src
, tcg_constant_tl(0));
906 tcg_gen_shl_tl(dst
, dst
, sh32
);
907 tcg_gen_sar_tl(dst_sar
, dst
, sh32
);
908 tcg_gen_movcond_tl(TCG_COND_LT
, satval
, src
, tcg_constant_tl(0), min
, max
);
910 tcg_gen_setcond_tl(TCG_COND_NE
, ovf
, dst_sar
, src
);
911 tcg_gen_shli_tl(ovf
, ovf
, reg_field_info
[USR_OVF
].offset
);
912 tcg_gen_or_tl(usr
, usr
, ovf
);
914 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, dst_sar
, src
, dst
, satval
);
917 static void gen_sar(TCGv dst
, TCGv src
, TCGv shift_amt
)
920 * Shift arithmetic right
921 * Robust when shift_amt is >31 bits
923 TCGv tmp
= tcg_temp_new();
924 tcg_gen_umin_tl(tmp
, shift_amt
, tcg_constant_tl(31));
925 tcg_gen_sar_tl(dst
, src
, tmp
);
928 /* Bidirectional shift right with saturation */
929 static void gen_asr_r_r_sat(DisasContext
*ctx
, TCGv RdV
, TCGv RsV
, TCGv RtV
)
931 TCGv shift_amt
= tcg_temp_new();
932 TCGLabel
*positive
= gen_new_label();
933 TCGLabel
*done
= gen_new_label();
935 tcg_gen_sextract_i32(shift_amt
, RtV
, 0, 7);
936 tcg_gen_brcondi_tl(TCG_COND_GE
, shift_amt
, 0, positive
);
938 /* Negative shift amount => shift left */
939 tcg_gen_neg_tl(shift_amt
, shift_amt
);
940 gen_shl_sat(ctx
, RdV
, RsV
, shift_amt
);
943 gen_set_label(positive
);
944 /* Positive shift amount => shift right */
945 gen_sar(RdV
, RsV
, shift_amt
);
950 /* Bidirectional shift left with saturation */
951 static void gen_asl_r_r_sat(DisasContext
*ctx
, TCGv RdV
, TCGv RsV
, TCGv RtV
)
953 TCGv shift_amt
= tcg_temp_new();
954 TCGLabel
*positive
= gen_new_label();
955 TCGLabel
*done
= gen_new_label();
957 tcg_gen_sextract_i32(shift_amt
, RtV
, 0, 7);
958 tcg_gen_brcondi_tl(TCG_COND_GE
, shift_amt
, 0, positive
);
960 /* Negative shift amount => shift right */
961 tcg_gen_neg_tl(shift_amt
, shift_amt
);
962 gen_sar(RdV
, RsV
, shift_amt
);
965 gen_set_label(positive
);
966 /* Positive shift amount => shift left */
967 gen_shl_sat(ctx
, RdV
, RsV
, shift_amt
);
972 static intptr_t vreg_src_off(DisasContext
*ctx
, int num
)
974 intptr_t offset
= offsetof(CPUHexagonState
, VRegs
[num
]);
976 if (test_bit(num
, ctx
->vregs_select
)) {
977 offset
= ctx_future_vreg_off(ctx
, num
, 1, false);
979 if (test_bit(num
, ctx
->vregs_updated_tmp
)) {
980 offset
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
985 static void gen_log_vreg_write(DisasContext
*ctx
, intptr_t srcoff
, int num
,
990 if (type
!= EXT_TMP
) {
991 dstoff
= ctx_future_vreg_off(ctx
, num
, 1, true);
992 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
993 sizeof(MMVector
), sizeof(MMVector
));
995 dstoff
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
996 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
997 sizeof(MMVector
), sizeof(MMVector
));
1001 static void gen_log_vreg_write_pair(DisasContext
*ctx
, intptr_t srcoff
, int num
,
1004 gen_log_vreg_write(ctx
, srcoff
, num
^ 0, type
);
1005 srcoff
+= sizeof(MMVector
);
1006 gen_log_vreg_write(ctx
, srcoff
, num
^ 1, type
);
1009 static intptr_t get_result_qreg(DisasContext
*ctx
, int qnum
)
1011 return offsetof(CPUHexagonState
, future_QRegs
[qnum
]);
1014 static void gen_vreg_load(DisasContext
*ctx
, intptr_t dstoff
, TCGv src
,
1017 TCGv_i64 tmp
= tcg_temp_new_i64();
1019 tcg_gen_andi_tl(src
, src
, ~((int32_t)sizeof(MMVector
) - 1));
1021 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
1022 tcg_gen_qemu_ld64(tmp
, src
, ctx
->mem_idx
);
1023 tcg_gen_addi_tl(src
, src
, 8);
1024 tcg_gen_st_i64(tmp
, cpu_env
, dstoff
+ i
* 8);
1028 static void gen_vreg_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
1029 int slot
, bool aligned
)
1031 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
1032 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
1034 if (is_gather_store_insn(ctx
)) {
1035 TCGv sl
= tcg_constant_tl(slot
);
1036 gen_helper_gather_store(cpu_env
, EA
, sl
);
1040 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
1042 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
1043 ~((int32_t)sizeof(MMVector
) - 1));
1045 tcg_gen_mov_tl(hex_vstore_addr
[slot
], EA
);
1047 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
1049 /* Copy the data to the vstore buffer */
1050 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
1051 /* Set the mask to all 1's */
1052 tcg_gen_gvec_dup_imm(MO_64
, maskoff
, sizeof(MMQReg
), sizeof(MMQReg
), ~0LL);
1055 static void gen_vreg_masked_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
1056 intptr_t bitsoff
, int slot
, bool invert
)
1058 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
1059 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
1061 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
1062 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
1063 ~((int32_t)sizeof(MMVector
) - 1));
1064 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
1066 /* Copy the data to the vstore buffer */
1067 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
1069 tcg_gen_gvec_mov(MO_64
, maskoff
, bitsoff
, sizeof(MMQReg
), sizeof(MMQReg
));
1071 tcg_gen_gvec_not(MO_64
, maskoff
, maskoff
,
1072 sizeof(MMQReg
), sizeof(MMQReg
));
1076 static void vec_to_qvec(size_t size
, intptr_t dstoff
, intptr_t srcoff
)
1078 TCGv_i64 tmp
= tcg_temp_new_i64();
1079 TCGv_i64 word
= tcg_temp_new_i64();
1080 TCGv_i64 bits
= tcg_temp_new_i64();
1081 TCGv_i64 mask
= tcg_temp_new_i64();
1082 TCGv_i64 zero
= tcg_constant_i64(0);
1083 TCGv_i64 ones
= tcg_constant_i64(~0);
1085 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
1086 tcg_gen_ld_i64(tmp
, cpu_env
, srcoff
+ i
* 8);
1087 tcg_gen_movi_i64(mask
, 0);
1089 for (int j
= 0; j
< 8; j
+= size
) {
1090 tcg_gen_extract_i64(word
, tmp
, j
* 8, size
* 8);
1091 tcg_gen_movcond_i64(TCG_COND_NE
, bits
, word
, zero
, ones
, zero
);
1092 tcg_gen_deposit_i64(mask
, mask
, bits
, j
, size
);
1095 tcg_gen_st8_i64(mask
, cpu_env
, dstoff
+ i
);
1099 void probe_noshuf_load(TCGv va
, int s
, int mi
)
1101 TCGv size
= tcg_constant_tl(s
);
1102 TCGv mem_idx
= tcg_constant_tl(mi
);
1103 gen_helper_probe_noshuf_load(cpu_env
, va
, size
, mem_idx
);
1107 * Note: Since this function might branch, `val` is
1108 * required to be a `tcg_temp_local`.
1110 void gen_set_usr_field_if(DisasContext
*ctx
, int field
, TCGv val
)
1112 /* Sets the USR field if `val` is non-zero */
1113 if (reg_field_info
[field
].width
== 1) {
1114 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
1115 TCGv tmp
= tcg_temp_new();
1116 tcg_gen_extract_tl(tmp
, val
, 0, reg_field_info
[field
].width
);
1117 tcg_gen_shli_tl(tmp
, tmp
, reg_field_info
[field
].offset
);
1118 tcg_gen_or_tl(usr
, usr
, tmp
);
1120 TCGLabel
*skip_label
= gen_new_label();
1121 tcg_gen_brcondi_tl(TCG_COND_EQ
, val
, 0, skip_label
);
1122 gen_set_usr_field(ctx
, field
, val
);
1123 gen_set_label(skip_label
);
1127 void gen_sat_i32(TCGv dest
, TCGv source
, int width
)
1129 TCGv max_val
= tcg_constant_tl((1 << (width
- 1)) - 1);
1130 TCGv min_val
= tcg_constant_tl(-(1 << (width
- 1)));
1131 tcg_gen_smin_tl(dest
, source
, max_val
);
1132 tcg_gen_smax_tl(dest
, dest
, min_val
);
1135 void gen_sat_i32_ovfl(TCGv ovfl
, TCGv dest
, TCGv source
, int width
)
1137 gen_sat_i32(dest
, source
, width
);
1138 tcg_gen_setcond_tl(TCG_COND_NE
, ovfl
, source
, dest
);
1141 void gen_satu_i32(TCGv dest
, TCGv source
, int width
)
1143 TCGv max_val
= tcg_constant_tl((1 << width
) - 1);
1144 TCGv zero
= tcg_constant_tl(0);
1145 tcg_gen_movcond_tl(TCG_COND_GTU
, dest
, source
, max_val
, max_val
, source
);
1146 tcg_gen_movcond_tl(TCG_COND_LT
, dest
, source
, zero
, zero
, dest
);
1149 void gen_satu_i32_ovfl(TCGv ovfl
, TCGv dest
, TCGv source
, int width
)
1151 gen_satu_i32(dest
, source
, width
);
1152 tcg_gen_setcond_tl(TCG_COND_NE
, ovfl
, source
, dest
);
1155 void gen_sat_i64(TCGv_i64 dest
, TCGv_i64 source
, int width
)
1157 TCGv_i64 max_val
= tcg_constant_i64((1LL << (width
- 1)) - 1LL);
1158 TCGv_i64 min_val
= tcg_constant_i64(-(1LL << (width
- 1)));
1159 tcg_gen_smin_i64(dest
, source
, max_val
);
1160 tcg_gen_smax_i64(dest
, dest
, min_val
);
1163 void gen_sat_i64_ovfl(TCGv ovfl
, TCGv_i64 dest
, TCGv_i64 source
, int width
)
1166 gen_sat_i64(dest
, source
, width
);
1167 ovfl_64
= tcg_temp_new_i64();
1168 tcg_gen_setcond_i64(TCG_COND_NE
, ovfl_64
, dest
, source
);
1169 tcg_gen_trunc_i64_tl(ovfl
, ovfl_64
);
1172 void gen_satu_i64(TCGv_i64 dest
, TCGv_i64 source
, int width
)
1174 TCGv_i64 max_val
= tcg_constant_i64((1LL << width
) - 1LL);
1175 TCGv_i64 zero
= tcg_constant_i64(0);
1176 tcg_gen_movcond_i64(TCG_COND_GTU
, dest
, source
, max_val
, max_val
, source
);
1177 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, source
, zero
, zero
, dest
);
1180 void gen_satu_i64_ovfl(TCGv ovfl
, TCGv_i64 dest
, TCGv_i64 source
, int width
)
1183 gen_satu_i64(dest
, source
, width
);
1184 ovfl_64
= tcg_temp_new_i64();
1185 tcg_gen_setcond_i64(TCG_COND_NE
, ovfl_64
, dest
, source
);
1186 tcg_gen_trunc_i64_tl(ovfl
, ovfl_64
);
1189 /* Implements the fADDSAT64 macro in TCG */
1190 void gen_add_sat_i64(DisasContext
*ctx
, TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
1192 TCGv_i64 sum
= tcg_temp_new_i64();
1193 TCGv_i64
xor = tcg_temp_new_i64();
1194 TCGv_i64 cond1
= tcg_temp_new_i64();
1195 TCGv_i64 cond2
= tcg_temp_new_i64();
1196 TCGv_i64 cond3
= tcg_temp_new_i64();
1197 TCGv_i64 mask
= tcg_constant_i64(0x8000000000000000ULL
);
1198 TCGv_i64 max_pos
= tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL
);
1199 TCGv_i64 max_neg
= tcg_constant_i64(0x8000000000000000LL
);
1200 TCGv_i64 zero
= tcg_constant_i64(0);
1201 TCGLabel
*no_ovfl_label
= gen_new_label();
1202 TCGLabel
*ovfl_label
= gen_new_label();
1203 TCGLabel
*ret_label
= gen_new_label();
1205 tcg_gen_add_i64(sum
, a
, b
);
1206 tcg_gen_xor_i64(xor, a
, b
);
1208 /* if (xor & mask) */
1209 tcg_gen_and_i64(cond1
, xor, mask
);
1210 tcg_gen_brcondi_i64(TCG_COND_NE
, cond1
, 0, no_ovfl_label
);
1212 /* else if ((a ^ sum) & mask) */
1213 tcg_gen_xor_i64(cond2
, a
, sum
);
1214 tcg_gen_and_i64(cond2
, cond2
, mask
);
1215 tcg_gen_brcondi_i64(TCG_COND_NE
, cond2
, 0, ovfl_label
);
1216 /* fallthrough to no_ovfl_label branch */
1219 gen_set_label(no_ovfl_label
);
1220 tcg_gen_mov_i64(ret
, sum
);
1221 tcg_gen_br(ret_label
);
1223 /* else if branch */
1224 gen_set_label(ovfl_label
);
1225 tcg_gen_and_i64(cond3
, sum
, mask
);
1226 tcg_gen_movcond_i64(TCG_COND_NE
, ret
, cond3
, zero
, max_pos
, max_neg
);
1227 gen_set_usr_fieldi(ctx
, USR_OVF
, 1);
1229 gen_set_label(ret_label
);
1232 #include "tcg_funcs_generated.c.inc"
1233 #include "tcg_func_table_generated.c.inc"