2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "exec/helper-gen.h"
26 #include "translate.h"
27 #define QEMU_GENERATE /* Used internally by macros.h */
29 #include "mmvec/macros.h"
32 #include "gen_tcg_hvx.h"
35 TCGv
gen_read_reg(TCGv result
, int num
)
37 tcg_gen_mov_tl(result
, hex_gpr
[num
]);
41 TCGv
gen_read_preg(TCGv pred
, uint8_t num
)
43 tcg_gen_mov_tl(pred
, hex_pred
[num
]);
47 #define IMMUTABLE (~0)
49 const target_ulong reg_immut_masks
[TOTAL_PER_THREAD_REGS
] = {
50 [HEX_REG_USR
] = 0xc13000c0,
51 [HEX_REG_PC
] = IMMUTABLE
,
53 [HEX_REG_UPCYCLELO
] = IMMUTABLE
,
54 [HEX_REG_UPCYCLEHI
] = IMMUTABLE
,
55 [HEX_REG_UTIMERLO
] = IMMUTABLE
,
56 [HEX_REG_UTIMERHI
] = IMMUTABLE
,
59 static inline void gen_masked_reg_write(TCGv new_val
, TCGv cur_val
,
60 target_ulong reg_mask
)
63 TCGv tmp
= tcg_temp_new();
65 /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */
66 tcg_gen_andi_tl(new_val
, new_val
, ~reg_mask
);
67 tcg_gen_andi_tl(tmp
, cur_val
, reg_mask
);
68 tcg_gen_or_tl(new_val
, new_val
, tmp
);
72 TCGv
get_result_gpr(DisasContext
*ctx
, int rnum
)
74 if (ctx
->need_commit
) {
75 if (rnum
== HEX_REG_USR
) {
76 return hex_new_value_usr
;
78 if (ctx
->new_value
[rnum
] == NULL
) {
79 ctx
->new_value
[rnum
] = tcg_temp_new();
80 tcg_gen_movi_tl(ctx
->new_value
[rnum
], 0);
82 return ctx
->new_value
[rnum
];
89 static TCGv_i64
get_result_gpr_pair(DisasContext
*ctx
, int rnum
)
91 TCGv_i64 result
= tcg_temp_new_i64();
92 tcg_gen_concat_i32_i64(result
, get_result_gpr(ctx
, rnum
),
93 get_result_gpr(ctx
, rnum
+ 1));
97 void gen_log_reg_write(DisasContext
*ctx
, int rnum
, TCGv val
)
99 const target_ulong reg_mask
= reg_immut_masks
[rnum
];
101 gen_masked_reg_write(val
, hex_gpr
[rnum
], reg_mask
);
102 tcg_gen_mov_tl(get_result_gpr(ctx
, rnum
), val
);
104 /* Do this so HELPER(debug_commit_end) will know */
105 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
109 static void gen_log_reg_write_pair(DisasContext
*ctx
, int rnum
, TCGv_i64 val
)
111 TCGv val32
= tcg_temp_new();
114 tcg_gen_extrl_i64_i32(val32
, val
);
115 gen_log_reg_write(ctx
, rnum
, val32
);
118 tcg_gen_extrh_i64_i32(val32
, val
);
119 gen_log_reg_write(ctx
, rnum
+ 1, val32
);
122 TCGv
get_result_pred(DisasContext
*ctx
, int pnum
)
124 if (ctx
->need_commit
) {
125 if (ctx
->new_pred_value
[pnum
] == NULL
) {
126 ctx
->new_pred_value
[pnum
] = tcg_temp_new();
127 tcg_gen_movi_tl(ctx
->new_pred_value
[pnum
], 0);
129 return ctx
->new_pred_value
[pnum
];
131 return hex_pred
[pnum
];
135 void gen_log_pred_write(DisasContext
*ctx
, int pnum
, TCGv val
)
137 TCGv pred
= get_result_pred(ctx
, pnum
);
138 TCGv base_val
= tcg_temp_new();
140 tcg_gen_andi_tl(base_val
, val
, 0xff);
143 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
145 * Multiple writes to the same preg are and'ed together
146 * If this is the first predicate write in the packet, do a
147 * straight assignment. Otherwise, do an and.
149 if (!test_bit(pnum
, ctx
->pregs_written
)) {
150 tcg_gen_mov_tl(pred
, base_val
);
152 tcg_gen_and_tl(pred
, pred
, base_val
);
155 tcg_gen_ori_tl(ctx
->pred_written
, ctx
->pred_written
, 1 << pnum
);
157 set_bit(pnum
, ctx
->pregs_written
);
160 static inline void gen_read_p3_0(TCGv control_reg
)
162 tcg_gen_movi_tl(control_reg
, 0);
163 for (int i
= 0; i
< NUM_PREGS
; i
++) {
164 tcg_gen_deposit_tl(control_reg
, control_reg
, hex_pred
[i
], i
* 8, 8);
169 * Certain control registers require special handling on read
170 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
171 * -> concat the 4 predicate registers together
172 * HEX_REG_PC actual value stored in DisasContext
173 * -> assign from ctx->base.pc_next
174 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
175 * -> add current TB changes to existing reg value
177 static inline void gen_read_ctrl_reg(DisasContext
*ctx
, const int reg_num
,
180 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
182 } else if (reg_num
== HEX_REG_PC
) {
183 tcg_gen_movi_tl(dest
, ctx
->base
.pc_next
);
184 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
185 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
187 } else if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
188 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
190 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
191 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
194 tcg_gen_mov_tl(dest
, hex_gpr
[reg_num
]);
198 static inline void gen_read_ctrl_reg_pair(DisasContext
*ctx
, const int reg_num
,
201 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
202 TCGv p3_0
= tcg_temp_new();
204 tcg_gen_concat_i32_i64(dest
, p3_0
, hex_gpr
[reg_num
+ 1]);
205 } else if (reg_num
== HEX_REG_PC
- 1) {
206 TCGv pc
= tcg_constant_tl(ctx
->base
.pc_next
);
207 tcg_gen_concat_i32_i64(dest
, hex_gpr
[reg_num
], pc
);
208 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
209 TCGv pkt_cnt
= tcg_temp_new();
210 TCGv insn_cnt
= tcg_temp_new();
211 tcg_gen_addi_tl(pkt_cnt
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
213 tcg_gen_addi_tl(insn_cnt
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
215 tcg_gen_concat_i32_i64(dest
, pkt_cnt
, insn_cnt
);
216 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
217 TCGv hvx_cnt
= tcg_temp_new();
218 tcg_gen_addi_tl(hvx_cnt
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
220 tcg_gen_concat_i32_i64(dest
, hvx_cnt
, hex_gpr
[reg_num
+ 1]);
222 tcg_gen_concat_i32_i64(dest
,
224 hex_gpr
[reg_num
+ 1]);
228 static void gen_write_p3_0(DisasContext
*ctx
, TCGv control_reg
)
230 TCGv hex_p8
= tcg_temp_new();
231 for (int i
= 0; i
< NUM_PREGS
; i
++) {
232 tcg_gen_extract_tl(hex_p8
, control_reg
, i
* 8, 8);
233 gen_log_pred_write(ctx
, i
, hex_p8
);
238 * Certain control registers require special handling on write
239 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
240 * -> break the value across 4 predicate registers
241 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
242 * -> clear the changes
244 static inline void gen_write_ctrl_reg(DisasContext
*ctx
, int reg_num
,
247 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
248 gen_write_p3_0(ctx
, val
);
250 gen_log_reg_write(ctx
, reg_num
, val
);
251 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
252 ctx
->num_packets
= 0;
254 if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
257 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
258 ctx
->num_hvx_insns
= 0;
263 static inline void gen_write_ctrl_reg_pair(DisasContext
*ctx
, int reg_num
,
266 if (reg_num
== HEX_REG_P3_0_ALIASED
) {
267 TCGv result
= get_result_gpr(ctx
, reg_num
+ 1);
268 TCGv val32
= tcg_temp_new();
269 tcg_gen_extrl_i64_i32(val32
, val
);
270 gen_write_p3_0(ctx
, val32
);
271 tcg_gen_extrh_i64_i32(val32
, val
);
272 tcg_gen_mov_tl(result
, val32
);
274 gen_log_reg_write_pair(ctx
, reg_num
, val
);
275 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
276 ctx
->num_packets
= 0;
279 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
280 ctx
->num_hvx_insns
= 0;
285 TCGv
gen_get_byte(TCGv result
, int N
, TCGv src
, bool sign
)
288 tcg_gen_sextract_tl(result
, src
, N
* 8, 8);
290 tcg_gen_extract_tl(result
, src
, N
* 8, 8);
295 TCGv
gen_get_byte_i64(TCGv result
, int N
, TCGv_i64 src
, bool sign
)
297 TCGv_i64 res64
= tcg_temp_new_i64();
299 tcg_gen_sextract_i64(res64
, src
, N
* 8, 8);
301 tcg_gen_extract_i64(res64
, src
, N
* 8, 8);
303 tcg_gen_extrl_i64_i32(result
, res64
);
308 TCGv
gen_get_half(TCGv result
, int N
, TCGv src
, bool sign
)
311 tcg_gen_sextract_tl(result
, src
, N
* 16, 16);
313 tcg_gen_extract_tl(result
, src
, N
* 16, 16);
318 void gen_set_half(int N
, TCGv result
, TCGv src
)
320 tcg_gen_deposit_tl(result
, result
, src
, N
* 16, 16);
323 void gen_set_half_i64(int N
, TCGv_i64 result
, TCGv src
)
325 TCGv_i64 src64
= tcg_temp_new_i64();
326 tcg_gen_extu_i32_i64(src64
, src
);
327 tcg_gen_deposit_i64(result
, result
, src64
, N
* 16, 16);
330 void gen_set_byte_i64(int N
, TCGv_i64 result
, TCGv src
)
332 TCGv_i64 src64
= tcg_temp_new_i64();
333 tcg_gen_extu_i32_i64(src64
, src
);
334 tcg_gen_deposit_i64(result
, result
, src64
, N
* 8, 8);
337 static inline void gen_load_locked4u(TCGv dest
, TCGv vaddr
, int mem_index
)
339 tcg_gen_qemu_ld_tl(dest
, vaddr
, mem_index
, MO_TEUL
);
340 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
341 tcg_gen_mov_tl(hex_llsc_val
, dest
);
344 static inline void gen_load_locked8u(TCGv_i64 dest
, TCGv vaddr
, int mem_index
)
346 tcg_gen_qemu_ld_i64(dest
, vaddr
, mem_index
, MO_TEUQ
);
347 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
348 tcg_gen_mov_i64(hex_llsc_val_i64
, dest
);
351 static inline void gen_store_conditional4(DisasContext
*ctx
,
352 TCGv pred
, TCGv vaddr
, TCGv src
)
354 TCGLabel
*fail
= gen_new_label();
355 TCGLabel
*done
= gen_new_label();
358 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
360 one
= tcg_constant_tl(0xff);
361 zero
= tcg_constant_tl(0);
362 tmp
= tcg_temp_new();
363 tcg_gen_atomic_cmpxchg_tl(tmp
, hex_llsc_addr
, hex_llsc_val
, src
,
364 ctx
->mem_idx
, MO_32
);
365 tcg_gen_movcond_tl(TCG_COND_EQ
, pred
, tmp
, hex_llsc_val
,
370 tcg_gen_movi_tl(pred
, 0);
373 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
376 static inline void gen_store_conditional8(DisasContext
*ctx
,
377 TCGv pred
, TCGv vaddr
, TCGv_i64 src
)
379 TCGLabel
*fail
= gen_new_label();
380 TCGLabel
*done
= gen_new_label();
381 TCGv_i64 one
, zero
, tmp
;
383 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
385 one
= tcg_constant_i64(0xff);
386 zero
= tcg_constant_i64(0);
387 tmp
= tcg_temp_new_i64();
388 tcg_gen_atomic_cmpxchg_i64(tmp
, hex_llsc_addr
, hex_llsc_val_i64
, src
,
389 ctx
->mem_idx
, MO_64
);
390 tcg_gen_movcond_i64(TCG_COND_EQ
, tmp
, tmp
, hex_llsc_val_i64
,
392 tcg_gen_extrl_i64_i32(pred
, tmp
);
396 tcg_gen_movi_tl(pred
, 0);
399 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
402 #ifndef CONFIG_HEXAGON_IDEF_PARSER
403 static TCGv
gen_slotval(DisasContext
*ctx
)
405 int slotval
= (ctx
->pkt
->pkt_has_store_s1
& 1) | (ctx
->insn
->slot
<< 1);
406 return tcg_constant_tl(slotval
);
410 void gen_store32(TCGv vaddr
, TCGv src
, int width
, uint32_t slot
)
412 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
413 tcg_gen_movi_tl(hex_store_width
[slot
], width
);
414 tcg_gen_mov_tl(hex_store_val32
[slot
], src
);
417 void gen_store1(TCGv_env tcg_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
419 gen_store32(vaddr
, src
, 1, slot
);
422 void gen_store1i(TCGv_env tcg_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
424 TCGv tmp
= tcg_constant_tl(src
);
425 gen_store1(tcg_env
, vaddr
, tmp
, slot
);
428 void gen_store2(TCGv_env tcg_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
430 gen_store32(vaddr
, src
, 2, slot
);
433 void gen_store2i(TCGv_env tcg_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
435 TCGv tmp
= tcg_constant_tl(src
);
436 gen_store2(tcg_env
, vaddr
, tmp
, slot
);
439 void gen_store4(TCGv_env tcg_env
, TCGv vaddr
, TCGv src
, uint32_t slot
)
441 gen_store32(vaddr
, src
, 4, slot
);
444 void gen_store4i(TCGv_env tcg_env
, TCGv vaddr
, int32_t src
, uint32_t slot
)
446 TCGv tmp
= tcg_constant_tl(src
);
447 gen_store4(tcg_env
, vaddr
, tmp
, slot
);
450 void gen_store8(TCGv_env tcg_env
, TCGv vaddr
, TCGv_i64 src
, uint32_t slot
)
452 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
453 tcg_gen_movi_tl(hex_store_width
[slot
], 8);
454 tcg_gen_mov_i64(hex_store_val64
[slot
], src
);
457 void gen_store8i(TCGv_env tcg_env
, TCGv vaddr
, int64_t src
, uint32_t slot
)
459 TCGv_i64 tmp
= tcg_constant_i64(src
);
460 gen_store8(tcg_env
, vaddr
, tmp
, slot
);
463 TCGv
gen_8bitsof(TCGv result
, TCGv value
)
465 TCGv zero
= tcg_constant_tl(0);
466 TCGv ones
= tcg_constant_tl(0xff);
467 tcg_gen_movcond_tl(TCG_COND_NE
, result
, value
, zero
, ones
, zero
);
472 static void gen_write_new_pc_addr(DisasContext
*ctx
, TCGv addr
,
473 TCGCond cond
, TCGv pred
)
475 TCGLabel
*pred_false
= NULL
;
476 if (cond
!= TCG_COND_ALWAYS
) {
477 pred_false
= gen_new_label();
478 tcg_gen_brcondi_tl(cond
, pred
, 0, pred_false
);
481 if (ctx
->pkt
->pkt_has_multi_cof
) {
482 /* If there are multiple branches in a packet, ignore the second one */
483 tcg_gen_movcond_tl(TCG_COND_NE
, hex_gpr
[HEX_REG_PC
],
484 ctx
->branch_taken
, tcg_constant_tl(0),
485 hex_gpr
[HEX_REG_PC
], addr
);
486 tcg_gen_movi_tl(ctx
->branch_taken
, 1);
488 tcg_gen_mov_tl(hex_gpr
[HEX_REG_PC
], addr
);
491 if (cond
!= TCG_COND_ALWAYS
) {
492 gen_set_label(pred_false
);
496 static void gen_write_new_pc_pcrel(DisasContext
*ctx
, int pc_off
,
497 TCGCond cond
, TCGv pred
)
499 target_ulong dest
= ctx
->pkt
->pc
+ pc_off
;
500 if (ctx
->pkt
->pkt_has_multi_cof
) {
501 gen_write_new_pc_addr(ctx
, tcg_constant_tl(dest
), cond
, pred
);
503 /* Defer this jump to the end of the TB */
504 ctx
->branch_cond
= TCG_COND_ALWAYS
;
506 ctx
->branch_cond
= cond
;
507 tcg_gen_mov_tl(ctx
->branch_taken
, pred
);
509 ctx
->branch_dest
= dest
;
513 void gen_set_usr_field(DisasContext
*ctx
, int field
, TCGv val
)
515 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
516 tcg_gen_deposit_tl(usr
, usr
, val
,
517 reg_field_info
[field
].offset
,
518 reg_field_info
[field
].width
);
521 void gen_set_usr_fieldi(DisasContext
*ctx
, int field
, int x
)
523 if (reg_field_info
[field
].width
== 1) {
524 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
525 target_ulong bit
= 1 << reg_field_info
[field
].offset
;
527 tcg_gen_ori_tl(usr
, usr
, bit
);
529 tcg_gen_andi_tl(usr
, usr
, ~bit
);
532 TCGv val
= tcg_constant_tl(x
);
533 gen_set_usr_field(ctx
, field
, val
);
537 static void gen_compare(TCGCond cond
, TCGv res
, TCGv arg1
, TCGv arg2
)
539 TCGv one
= tcg_constant_tl(0xff);
540 TCGv zero
= tcg_constant_tl(0);
542 tcg_gen_movcond_tl(cond
, res
, arg1
, arg2
, one
, zero
);
545 #ifndef CONFIG_HEXAGON_IDEF_PARSER
546 static inline void gen_loop0r(DisasContext
*ctx
, TCGv RsV
, int riV
)
550 gen_log_reg_write(ctx
, HEX_REG_LC0
, RsV
);
551 gen_log_reg_write(ctx
, HEX_REG_SA0
, tcg_constant_tl(ctx
->pkt
->pc
+ riV
));
552 gen_set_usr_fieldi(ctx
, USR_LPCFG
, 0);
555 static void gen_loop0i(DisasContext
*ctx
, int count
, int riV
)
557 gen_loop0r(ctx
, tcg_constant_tl(count
), riV
);
560 static inline void gen_loop1r(DisasContext
*ctx
, TCGv RsV
, int riV
)
564 gen_log_reg_write(ctx
, HEX_REG_LC1
, RsV
);
565 gen_log_reg_write(ctx
, HEX_REG_SA1
, tcg_constant_tl(ctx
->pkt
->pc
+ riV
));
568 static void gen_loop1i(DisasContext
*ctx
, int count
, int riV
)
570 gen_loop1r(ctx
, tcg_constant_tl(count
), riV
);
573 static void gen_ploopNsr(DisasContext
*ctx
, int N
, TCGv RsV
, int riV
)
577 gen_log_reg_write(ctx
, HEX_REG_LC0
, RsV
);
578 gen_log_reg_write(ctx
, HEX_REG_SA0
, tcg_constant_tl(ctx
->pkt
->pc
+ riV
));
579 gen_set_usr_fieldi(ctx
, USR_LPCFG
, N
);
580 gen_log_pred_write(ctx
, 3, tcg_constant_tl(0));
583 static void gen_ploopNsi(DisasContext
*ctx
, int N
, int count
, int riV
)
585 gen_ploopNsr(ctx
, N
, tcg_constant_tl(count
), riV
);
588 static inline void gen_comparei(TCGCond cond
, TCGv res
, TCGv arg1
, int arg2
)
590 gen_compare(cond
, res
, arg1
, tcg_constant_tl(arg2
));
594 static void gen_cond_jumpr(DisasContext
*ctx
, TCGv dst_pc
,
595 TCGCond cond
, TCGv pred
)
597 gen_write_new_pc_addr(ctx
, dst_pc
, cond
, pred
);
600 static void gen_cond_jumpr31(DisasContext
*ctx
, TCGCond cond
, TCGv pred
)
602 TCGv LSB
= tcg_temp_new();
603 tcg_gen_andi_tl(LSB
, pred
, 1);
604 gen_cond_jumpr(ctx
, hex_gpr
[HEX_REG_LR
], cond
, LSB
);
607 static void gen_cond_jump(DisasContext
*ctx
, TCGCond cond
, TCGv pred
,
610 gen_write_new_pc_pcrel(ctx
, pc_off
, cond
, pred
);
613 static void gen_cmpnd_cmp_jmp(DisasContext
*ctx
,
614 int pnum
, TCGCond cond1
, TCGv arg1
, TCGv arg2
,
615 TCGCond cond2
, int pc_off
)
617 if (ctx
->insn
->part1
) {
618 TCGv pred
= tcg_temp_new();
619 gen_compare(cond1
, pred
, arg1
, arg2
);
620 gen_log_pred_write(ctx
, pnum
, pred
);
622 TCGv pred
= tcg_temp_new();
623 tcg_gen_mov_tl(pred
, ctx
->new_pred_value
[pnum
]);
624 gen_cond_jump(ctx
, cond2
, pred
, pc_off
);
628 static void gen_cmpnd_cmp_jmp_t(DisasContext
*ctx
,
629 int pnum
, TCGCond cond
, TCGv arg1
, TCGv arg2
,
632 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, arg2
, TCG_COND_EQ
, pc_off
);
635 static void gen_cmpnd_cmp_jmp_f(DisasContext
*ctx
,
636 int pnum
, TCGCond cond
, TCGv arg1
, TCGv arg2
,
639 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, arg2
, TCG_COND_NE
, pc_off
);
642 static void gen_cmpnd_cmpi_jmp_t(DisasContext
*ctx
,
643 int pnum
, TCGCond cond
, TCGv arg1
, int arg2
,
646 TCGv tmp
= tcg_constant_tl(arg2
);
647 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, tmp
, TCG_COND_EQ
, pc_off
);
650 static void gen_cmpnd_cmpi_jmp_f(DisasContext
*ctx
,
651 int pnum
, TCGCond cond
, TCGv arg1
, int arg2
,
654 TCGv tmp
= tcg_constant_tl(arg2
);
655 gen_cmpnd_cmp_jmp(ctx
, pnum
, cond
, arg1
, tmp
, TCG_COND_NE
, pc_off
);
658 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext
*ctx
, int pnum
, TCGCond cond
,
659 TCGv arg
, int pc_off
)
661 gen_cmpnd_cmpi_jmp_t(ctx
, pnum
, cond
, arg
, -1, pc_off
);
664 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext
*ctx
, int pnum
, TCGCond cond
,
665 TCGv arg
, int pc_off
)
667 gen_cmpnd_cmpi_jmp_f(ctx
, pnum
, cond
, arg
, -1, pc_off
);
670 static void gen_cmpnd_tstbit0_jmp(DisasContext
*ctx
,
671 int pnum
, TCGv arg
, TCGCond cond
, int pc_off
)
673 if (ctx
->insn
->part1
) {
674 TCGv pred
= tcg_temp_new();
675 tcg_gen_andi_tl(pred
, arg
, 1);
676 gen_8bitsof(pred
, pred
);
677 gen_log_pred_write(ctx
, pnum
, pred
);
679 TCGv pred
= tcg_temp_new();
680 tcg_gen_mov_tl(pred
, ctx
->new_pred_value
[pnum
]);
681 gen_cond_jump(ctx
, cond
, pred
, pc_off
);
685 static void gen_testbit0_jumpnv(DisasContext
*ctx
,
686 TCGv arg
, TCGCond cond
, int pc_off
)
688 TCGv pred
= tcg_temp_new();
689 tcg_gen_andi_tl(pred
, arg
, 1);
690 gen_cond_jump(ctx
, cond
, pred
, pc_off
);
693 static void gen_jump(DisasContext
*ctx
, int pc_off
)
695 gen_write_new_pc_pcrel(ctx
, pc_off
, TCG_COND_ALWAYS
, NULL
);
698 static void gen_jumpr(DisasContext
*ctx
, TCGv new_pc
)
700 gen_write_new_pc_addr(ctx
, new_pc
, TCG_COND_ALWAYS
, NULL
);
703 static void gen_call(DisasContext
*ctx
, int pc_off
)
705 TCGv lr
= get_result_gpr(ctx
, HEX_REG_LR
);
706 tcg_gen_movi_tl(lr
, ctx
->next_PC
);
707 gen_write_new_pc_pcrel(ctx
, pc_off
, TCG_COND_ALWAYS
, NULL
);
710 static void gen_callr(DisasContext
*ctx
, TCGv new_pc
)
712 TCGv lr
= get_result_gpr(ctx
, HEX_REG_LR
);
713 tcg_gen_movi_tl(lr
, ctx
->next_PC
);
714 gen_write_new_pc_addr(ctx
, new_pc
, TCG_COND_ALWAYS
, NULL
);
717 static void gen_cond_call(DisasContext
*ctx
, TCGv pred
,
718 TCGCond cond
, int pc_off
)
720 TCGv lr
= get_result_gpr(ctx
, HEX_REG_LR
);
721 TCGv lsb
= tcg_temp_new();
722 TCGLabel
*skip
= gen_new_label();
723 tcg_gen_andi_tl(lsb
, pred
, 1);
724 gen_write_new_pc_pcrel(ctx
, pc_off
, cond
, lsb
);
725 tcg_gen_brcondi_tl(cond
, lsb
, 0, skip
);
726 tcg_gen_movi_tl(lr
, ctx
->next_PC
);
730 static void gen_cond_callr(DisasContext
*ctx
,
731 TCGCond cond
, TCGv pred
, TCGv new_pc
)
733 TCGv lsb
= tcg_temp_new();
734 TCGLabel
*skip
= gen_new_label();
735 tcg_gen_andi_tl(lsb
, pred
, 1);
736 tcg_gen_brcondi_tl(cond
, lsb
, 0, skip
);
737 gen_callr(ctx
, new_pc
);
741 #ifndef CONFIG_HEXAGON_IDEF_PARSER
742 /* frame = ((LR << 32) | FP) ^ (FRAMEKEY << 32)) */
743 static TCGv_i64
gen_frame_scramble(void)
745 TCGv_i64 frame
= tcg_temp_new_i64();
746 TCGv tmp
= tcg_temp_new();
747 tcg_gen_xor_tl(tmp
, hex_gpr
[HEX_REG_LR
], hex_gpr
[HEX_REG_FRAMEKEY
]);
748 tcg_gen_concat_i32_i64(frame
, hex_gpr
[HEX_REG_FP
], tmp
);
753 /* frame ^= (int64_t)FRAMEKEY << 32 */
754 static void gen_frame_unscramble(TCGv_i64 frame
)
756 TCGv_i64 framekey
= tcg_temp_new_i64();
757 tcg_gen_extu_i32_i64(framekey
, hex_gpr
[HEX_REG_FRAMEKEY
]);
758 tcg_gen_shli_i64(framekey
, framekey
, 32);
759 tcg_gen_xor_i64(frame
, frame
, framekey
);
762 static void gen_load_frame(DisasContext
*ctx
, TCGv_i64 frame
, TCGv EA
)
764 Insn
*insn
= ctx
->insn
; /* Needed for CHECK_NOSHUF */
766 tcg_gen_qemu_ld_i64(frame
, EA
, ctx
->mem_idx
, MO_TEUQ
);
769 #ifndef CONFIG_HEXAGON_IDEF_PARSER
770 /* Stack overflow check */
771 static void gen_framecheck(TCGv EA
, int framesize
)
773 /* Not modelled in linux-user mode */
774 /* Placeholder for system mode */
775 #ifndef CONFIG_USER_ONLY
776 g_assert_not_reached();
780 static void gen_allocframe(DisasContext
*ctx
, TCGv r29
, int framesize
)
782 TCGv r30
= tcg_temp_new();
784 tcg_gen_addi_tl(r30
, r29
, -8);
785 frame
= gen_frame_scramble();
786 gen_store8(tcg_env
, r30
, frame
, ctx
->insn
->slot
);
787 gen_log_reg_write(ctx
, HEX_REG_FP
, r30
);
788 gen_framecheck(r30
, framesize
);
789 tcg_gen_subi_tl(r29
, r30
, framesize
);
792 static void gen_deallocframe(DisasContext
*ctx
, TCGv_i64 r31_30
, TCGv r30
)
794 TCGv r29
= tcg_temp_new();
795 TCGv_i64 frame
= tcg_temp_new_i64();
796 gen_load_frame(ctx
, frame
, r30
);
797 gen_frame_unscramble(frame
);
798 tcg_gen_mov_i64(r31_30
, frame
);
799 tcg_gen_addi_tl(r29
, r30
, 8);
800 gen_log_reg_write(ctx
, HEX_REG_SP
, r29
);
804 static void gen_return(DisasContext
*ctx
, TCGv_i64 dst
, TCGv src
)
808 * dst = frame_unscramble(frame)
812 TCGv_i64 frame
= tcg_temp_new_i64();
813 TCGv r31
= tcg_temp_new();
814 TCGv r29
= get_result_gpr(ctx
, HEX_REG_SP
);
816 gen_load_frame(ctx
, frame
, src
);
817 gen_frame_unscramble(frame
);
818 tcg_gen_mov_i64(dst
, frame
);
819 tcg_gen_addi_tl(r29
, src
, 8);
820 tcg_gen_extrh_i64_i32(r31
, dst
);
824 /* if (pred) dst = dealloc_return(src):raw */
825 static void gen_cond_return(DisasContext
*ctx
, TCGv_i64 dst
, TCGv src
,
826 TCGv pred
, TCGCond cond
)
828 TCGv LSB
= tcg_temp_new();
829 TCGLabel
*skip
= gen_new_label();
830 tcg_gen_andi_tl(LSB
, pred
, 1);
832 tcg_gen_brcondi_tl(cond
, LSB
, 0, skip
);
833 gen_return(ctx
, dst
, src
);
837 /* sub-instruction version (no RddV, so handle it manually) */
838 static void gen_cond_return_subinsn(DisasContext
*ctx
, TCGCond cond
, TCGv pred
)
840 TCGv_i64 RddV
= get_result_gpr_pair(ctx
, HEX_REG_FP
);
841 gen_cond_return(ctx
, RddV
, hex_gpr
[HEX_REG_FP
], pred
, cond
);
842 gen_log_reg_write_pair(ctx
, HEX_REG_FP
, RddV
);
845 static void gen_endloop0(DisasContext
*ctx
)
847 TCGv lpcfg
= tcg_temp_new();
849 GET_USR_FIELD(USR_LPCFG
, lpcfg
);
856 TCGLabel
*label1
= gen_new_label();
857 tcg_gen_brcondi_tl(TCG_COND_NE
, lpcfg
, 1, label1
);
859 gen_log_pred_write(ctx
, 3, tcg_constant_tl(0xff));
861 gen_set_label(label1
);
865 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
868 TCGLabel
*label2
= gen_new_label();
869 tcg_gen_brcondi_tl(TCG_COND_EQ
, lpcfg
, 0, label2
);
871 tcg_gen_subi_tl(lpcfg
, lpcfg
, 1);
872 gen_set_usr_field(ctx
, USR_LPCFG
, lpcfg
);
874 gen_set_label(label2
);
877 * If we're in a tight loop, we'll do this at the end of the TB to take
878 * advantage of direct block chaining.
880 if (!ctx
->is_tight_loop
) {
887 TCGLabel
*label3
= gen_new_label();
888 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC0
], 1, label3
);
890 TCGv lc0
= get_result_gpr(ctx
, HEX_REG_LC0
);
891 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA0
]);
892 tcg_gen_subi_tl(lc0
, hex_gpr
[HEX_REG_LC0
], 1);
894 gen_set_label(label3
);
898 static void gen_endloop1(DisasContext
*ctx
)
906 TCGLabel
*label
= gen_new_label();
907 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC1
], 1, label
);
909 TCGv lc1
= get_result_gpr(ctx
, HEX_REG_LC1
);
910 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA1
]);
911 tcg_gen_subi_tl(lc1
, hex_gpr
[HEX_REG_LC1
], 1);
913 gen_set_label(label
);
916 static void gen_endloop01(DisasContext
*ctx
)
918 TCGv lpcfg
= tcg_temp_new();
919 TCGLabel
*label1
= gen_new_label();
920 TCGLabel
*label2
= gen_new_label();
921 TCGLabel
*label3
= gen_new_label();
922 TCGLabel
*done
= gen_new_label();
924 GET_USR_FIELD(USR_LPCFG
, lpcfg
);
931 tcg_gen_brcondi_tl(TCG_COND_NE
, lpcfg
, 1, label1
);
933 gen_log_pred_write(ctx
, 3, tcg_constant_tl(0xff));
935 gen_set_label(label1
);
939 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
942 tcg_gen_brcondi_tl(TCG_COND_EQ
, lpcfg
, 0, label2
);
944 tcg_gen_subi_tl(lpcfg
, lpcfg
, 1);
945 gen_set_usr_field(ctx
, USR_LPCFG
, lpcfg
);
947 gen_set_label(label2
);
953 * } else if (LC1 > 1) {
958 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC0
], 1, label3
);
960 TCGv lc0
= get_result_gpr(ctx
, HEX_REG_LC0
);
961 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA0
]);
962 tcg_gen_subi_tl(lc0
, hex_gpr
[HEX_REG_LC0
], 1);
965 gen_set_label(label3
);
966 tcg_gen_brcondi_tl(TCG_COND_LEU
, hex_gpr
[HEX_REG_LC1
], 1, done
);
968 TCGv lc1
= get_result_gpr(ctx
, HEX_REG_LC1
);
969 gen_jumpr(ctx
, hex_gpr
[HEX_REG_SA1
]);
970 tcg_gen_subi_tl(lc1
, hex_gpr
[HEX_REG_LC1
], 1);
975 static void gen_cmp_jumpnv(DisasContext
*ctx
,
976 TCGCond cond
, TCGv val
, TCGv src
, int pc_off
)
978 TCGv pred
= tcg_temp_new();
979 tcg_gen_setcond_tl(cond
, pred
, val
, src
);
980 gen_cond_jump(ctx
, TCG_COND_EQ
, pred
, pc_off
);
983 static void gen_cmpi_jumpnv(DisasContext
*ctx
,
984 TCGCond cond
, TCGv val
, int src
, int pc_off
)
986 TCGv pred
= tcg_temp_new();
987 tcg_gen_setcondi_tl(cond
, pred
, val
, src
);
988 gen_cond_jump(ctx
, TCG_COND_EQ
, pred
, pc_off
);
991 /* Shift left with saturation */
992 static void gen_shl_sat(DisasContext
*ctx
, TCGv dst
, TCGv src
, TCGv shift_amt
)
994 TCGv tmp
= tcg_temp_new(); /* In case dst == src */
995 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
996 TCGv sh32
= tcg_temp_new();
997 TCGv dst_sar
= tcg_temp_new();
998 TCGv ovf
= tcg_temp_new();
999 TCGv satval
= tcg_temp_new();
1000 TCGv min
= tcg_constant_tl(0x80000000);
1001 TCGv max
= tcg_constant_tl(0x7fffffff);
1004 * Possible values for shift_amt are 0 .. 64
1005 * We need special handling for values above 31
1007 * sh32 = shift & 31;
1008 * dst = sh32 == shift ? src : 0;
1010 * dst_sar = dst >> sh32;
1011 * satval = src < 0 ? min : max;
1012 * if (dst_asr != src) {
1018 tcg_gen_andi_tl(sh32
, shift_amt
, 31);
1019 tcg_gen_movcond_tl(TCG_COND_EQ
, tmp
, sh32
, shift_amt
,
1020 src
, tcg_constant_tl(0));
1021 tcg_gen_shl_tl(tmp
, tmp
, sh32
);
1022 tcg_gen_sar_tl(dst_sar
, tmp
, sh32
);
1023 tcg_gen_movcond_tl(TCG_COND_LT
, satval
, src
, tcg_constant_tl(0), min
, max
);
1025 tcg_gen_setcond_tl(TCG_COND_NE
, ovf
, dst_sar
, src
);
1026 tcg_gen_shli_tl(ovf
, ovf
, reg_field_info
[USR_OVF
].offset
);
1027 tcg_gen_or_tl(usr
, usr
, ovf
);
1029 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, dst_sar
, src
, tmp
, satval
);
1032 static void gen_sar(TCGv dst
, TCGv src
, TCGv shift_amt
)
1035 * Shift arithmetic right
1036 * Robust when shift_amt is >31 bits
1038 TCGv tmp
= tcg_temp_new();
1039 tcg_gen_umin_tl(tmp
, shift_amt
, tcg_constant_tl(31));
1040 tcg_gen_sar_tl(dst
, src
, tmp
);
1043 /* Bidirectional shift right with saturation */
1044 static void gen_asr_r_r_sat(DisasContext
*ctx
, TCGv RdV
, TCGv RsV
, TCGv RtV
)
1046 TCGv shift_amt
= tcg_temp_new();
1047 TCGLabel
*positive
= gen_new_label();
1048 TCGLabel
*done
= gen_new_label();
1050 tcg_gen_sextract_i32(shift_amt
, RtV
, 0, 7);
1051 tcg_gen_brcondi_tl(TCG_COND_GE
, shift_amt
, 0, positive
);
1053 /* Negative shift amount => shift left */
1054 tcg_gen_neg_tl(shift_amt
, shift_amt
);
1055 gen_shl_sat(ctx
, RdV
, RsV
, shift_amt
);
1058 gen_set_label(positive
);
1059 /* Positive shift amount => shift right */
1060 gen_sar(RdV
, RsV
, shift_amt
);
1062 gen_set_label(done
);
1065 /* Bidirectional shift left with saturation */
1066 static void gen_asl_r_r_sat(DisasContext
*ctx
, TCGv RdV
, TCGv RsV
, TCGv RtV
)
1068 TCGv shift_amt
= tcg_temp_new();
1069 TCGLabel
*positive
= gen_new_label();
1070 TCGLabel
*done
= gen_new_label();
1072 tcg_gen_sextract_i32(shift_amt
, RtV
, 0, 7);
1073 tcg_gen_brcondi_tl(TCG_COND_GE
, shift_amt
, 0, positive
);
1075 /* Negative shift amount => shift right */
1076 tcg_gen_neg_tl(shift_amt
, shift_amt
);
1077 gen_sar(RdV
, RsV
, shift_amt
);
1080 gen_set_label(positive
);
1081 /* Positive shift amount => shift left */
1082 gen_shl_sat(ctx
, RdV
, RsV
, shift_amt
);
1084 gen_set_label(done
);
1087 static void gen_insert_rp(DisasContext
*ctx
, TCGv RxV
, TCGv RsV
, TCGv_i64 RttV
)
1090 * int width = fZXTN(6, 32, (fGETWORD(1, RttV)));
1091 * int offset = fSXTN(7, 32, (fGETWORD(0, RttV)));
1092 * size8u_t mask = ((fCONSTLL(1) << width) - 1);
1096 * RxV &= ~(mask << offset);
1097 * RxV |= ((RsV & mask) << offset);
1101 TCGv width
= tcg_temp_new();
1102 TCGv offset
= tcg_temp_new();
1103 TCGv_i64 mask
= tcg_temp_new_i64();
1104 TCGv_i64 result
= tcg_temp_new_i64();
1105 TCGv_i64 tmp
= tcg_temp_new_i64();
1106 TCGv_i64 offset64
= tcg_temp_new_i64();
1107 TCGLabel
*label
= gen_new_label();
1108 TCGLabel
*done
= gen_new_label();
1110 tcg_gen_extrh_i64_i32(width
, RttV
);
1111 tcg_gen_extract_tl(width
, width
, 0, 6);
1112 tcg_gen_extrl_i64_i32(offset
, RttV
);
1113 tcg_gen_sextract_tl(offset
, offset
, 0, 7);
1114 /* Possible values for offset are -64 .. 63 */
1115 tcg_gen_brcondi_tl(TCG_COND_GE
, offset
, 0, label
);
1116 /* For negative offsets, zero out the result */
1117 tcg_gen_movi_tl(RxV
, 0);
1119 gen_set_label(label
);
1120 /* At this point, possible values of offset are 0 .. 63 */
1121 tcg_gen_ext_i32_i64(mask
, width
);
1122 tcg_gen_shl_i64(mask
, tcg_constant_i64(1), mask
);
1123 tcg_gen_subi_i64(mask
, mask
, 1);
1124 tcg_gen_extu_i32_i64(result
, RxV
);
1125 tcg_gen_ext_i32_i64(tmp
, offset
);
1126 tcg_gen_shl_i64(tmp
, mask
, tmp
);
1127 tcg_gen_andc_i64(result
, result
, tmp
);
1128 tcg_gen_extu_i32_i64(tmp
, RsV
);
1129 tcg_gen_and_i64(tmp
, tmp
, mask
);
1130 tcg_gen_extu_i32_i64(offset64
, offset
);
1131 tcg_gen_shl_i64(tmp
, tmp
, offset64
);
1132 tcg_gen_or_i64(result
, result
, tmp
);
1133 tcg_gen_extrl_i64_i32(RxV
, result
);
1134 gen_set_label(done
);
1137 static void gen_asr_r_svw_trun(DisasContext
*ctx
, TCGv RdV
,
1138 TCGv_i64 RssV
, TCGv RtV
)
1141 * for (int i = 0; i < 2; i++) {
1142 * fSETHALF(i, RdV, fGETHALF(0, ((fSXTN(7, 32, RtV) > 0) ?
1143 * (fCAST4_8s(fGETWORD(i, RssV)) >> fSXTN(7, 32, RtV)) :
1144 * (fCAST4_8s(fGETWORD(i, RssV)) << -fSXTN(7, 32, RtV)))));
1147 TCGv shift_amt32
= tcg_temp_new();
1148 TCGv_i64 shift_amt64
= tcg_temp_new_i64();
1149 TCGv_i64 tmp64
= tcg_temp_new_i64();
1150 TCGv tmp32
= tcg_temp_new();
1151 TCGLabel
*label
= gen_new_label();
1152 TCGLabel
*zero
= gen_new_label();
1153 TCGLabel
*done
= gen_new_label();
1155 tcg_gen_sextract_tl(shift_amt32
, RtV
, 0, 7);
1156 /* Possible values of shift_amt32 are -64 .. 63 */
1157 tcg_gen_brcondi_tl(TCG_COND_LE
, shift_amt32
, 0, label
);
1158 /* After branch, possible values of shift_amt32 are 1 .. 63 */
1159 tcg_gen_ext_i32_i64(shift_amt64
, shift_amt32
);
1160 for (int i
= 0; i
< 2; i
++) {
1161 tcg_gen_sextract_i64(tmp64
, RssV
, i
* 32, 32);
1162 tcg_gen_sar_i64(tmp64
, tmp64
, shift_amt64
);
1163 tcg_gen_extrl_i64_i32(tmp32
, tmp64
);
1164 tcg_gen_deposit_tl(RdV
, RdV
, tmp32
, i
* 16, 16);
1167 gen_set_label(label
);
1168 tcg_gen_neg_tl(shift_amt32
, shift_amt32
);
1169 /*At this point, possible values of shift_amt32 are 0 .. 64 */
1170 tcg_gen_brcondi_tl(TCG_COND_GT
, shift_amt32
, 63, zero
);
1171 /*At this point, possible values of shift_amt32 are 0 .. 63 */
1172 tcg_gen_ext_i32_i64(shift_amt64
, shift_amt32
);
1173 for (int i
= 0; i
< 2; i
++) {
1174 tcg_gen_sextract_i64(tmp64
, RssV
, i
* 32, 32);
1175 tcg_gen_shl_i64(tmp64
, tmp64
, shift_amt64
);
1176 tcg_gen_extrl_i64_i32(tmp32
, tmp64
);
1177 tcg_gen_deposit_tl(RdV
, RdV
, tmp32
, i
* 16, 16);
1180 gen_set_label(zero
);
1181 /* When the shift_amt is 64, zero out the result */
1182 tcg_gen_movi_tl(RdV
, 0);
1183 gen_set_label(done
);
1186 static intptr_t vreg_src_off(DisasContext
*ctx
, int num
)
1188 intptr_t offset
= offsetof(CPUHexagonState
, VRegs
[num
]);
1190 if (test_bit(num
, ctx
->vregs_select
)) {
1191 offset
= ctx_future_vreg_off(ctx
, num
, 1, false);
1193 if (test_bit(num
, ctx
->vregs_updated_tmp
)) {
1194 offset
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
1199 static void gen_log_vreg_write(DisasContext
*ctx
, intptr_t srcoff
, int num
,
1204 if (type
!= EXT_TMP
) {
1205 dstoff
= ctx_future_vreg_off(ctx
, num
, 1, true);
1206 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
1207 sizeof(MMVector
), sizeof(MMVector
));
1209 dstoff
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
1210 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
1211 sizeof(MMVector
), sizeof(MMVector
));
1215 static void gen_log_vreg_write_pair(DisasContext
*ctx
, intptr_t srcoff
, int num
,
1218 gen_log_vreg_write(ctx
, srcoff
, num
^ 0, type
);
1219 srcoff
+= sizeof(MMVector
);
1220 gen_log_vreg_write(ctx
, srcoff
, num
^ 1, type
);
1223 static intptr_t get_result_qreg(DisasContext
*ctx
, int qnum
)
1225 if (ctx
->need_commit
) {
1226 return offsetof(CPUHexagonState
, future_QRegs
[qnum
]);
1228 return offsetof(CPUHexagonState
, QRegs
[qnum
]);
1232 static void gen_vreg_load(DisasContext
*ctx
, intptr_t dstoff
, TCGv src
,
1235 TCGv_i64 tmp
= tcg_temp_new_i64();
1237 tcg_gen_andi_tl(src
, src
, ~((int32_t)sizeof(MMVector
) - 1));
1239 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
1240 tcg_gen_qemu_ld_i64(tmp
, src
, ctx
->mem_idx
, MO_TEUQ
);
1241 tcg_gen_addi_tl(src
, src
, 8);
1242 tcg_gen_st_i64(tmp
, tcg_env
, dstoff
+ i
* 8);
1246 static void gen_vreg_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
1247 int slot
, bool aligned
)
1249 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
1250 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
1252 if (is_gather_store_insn(ctx
)) {
1253 TCGv sl
= tcg_constant_tl(slot
);
1254 gen_helper_gather_store(tcg_env
, EA
, sl
);
1258 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
1260 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
1261 ~((int32_t)sizeof(MMVector
) - 1));
1263 tcg_gen_mov_tl(hex_vstore_addr
[slot
], EA
);
1265 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
1267 /* Copy the data to the vstore buffer */
1268 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
1269 /* Set the mask to all 1's */
1270 tcg_gen_gvec_dup_imm(MO_64
, maskoff
, sizeof(MMQReg
), sizeof(MMQReg
), ~0LL);
1273 static void gen_vreg_masked_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
1274 intptr_t bitsoff
, int slot
, bool invert
)
1276 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
1277 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
1279 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
1280 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
1281 ~((int32_t)sizeof(MMVector
) - 1));
1282 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
1284 /* Copy the data to the vstore buffer */
1285 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
1287 tcg_gen_gvec_mov(MO_64
, maskoff
, bitsoff
, sizeof(MMQReg
), sizeof(MMQReg
));
1289 tcg_gen_gvec_not(MO_64
, maskoff
, maskoff
,
1290 sizeof(MMQReg
), sizeof(MMQReg
));
1294 static void vec_to_qvec(size_t size
, intptr_t dstoff
, intptr_t srcoff
)
1296 TCGv_i64 tmp
= tcg_temp_new_i64();
1297 TCGv_i64 word
= tcg_temp_new_i64();
1298 TCGv_i64 bits
= tcg_temp_new_i64();
1299 TCGv_i64 mask
= tcg_temp_new_i64();
1300 TCGv_i64 zero
= tcg_constant_i64(0);
1301 TCGv_i64 ones
= tcg_constant_i64(~0);
1303 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
1304 tcg_gen_ld_i64(tmp
, tcg_env
, srcoff
+ i
* 8);
1305 tcg_gen_movi_i64(mask
, 0);
1307 for (int j
= 0; j
< 8; j
+= size
) {
1308 tcg_gen_extract_i64(word
, tmp
, j
* 8, size
* 8);
1309 tcg_gen_movcond_i64(TCG_COND_NE
, bits
, word
, zero
, ones
, zero
);
1310 tcg_gen_deposit_i64(mask
, mask
, bits
, j
, size
);
1313 tcg_gen_st8_i64(mask
, tcg_env
, dstoff
+ i
);
1317 void probe_noshuf_load(TCGv va
, int s
, int mi
)
1319 TCGv size
= tcg_constant_tl(s
);
1320 TCGv mem_idx
= tcg_constant_tl(mi
);
1321 gen_helper_probe_noshuf_load(tcg_env
, va
, size
, mem_idx
);
1325 * Note: Since this function might branch, `val` is
1326 * required to be a `tcg_temp_local`.
1328 void gen_set_usr_field_if(DisasContext
*ctx
, int field
, TCGv val
)
1330 /* Sets the USR field if `val` is non-zero */
1331 if (reg_field_info
[field
].width
== 1) {
1332 TCGv usr
= get_result_gpr(ctx
, HEX_REG_USR
);
1333 TCGv tmp
= tcg_temp_new();
1334 tcg_gen_extract_tl(tmp
, val
, 0, reg_field_info
[field
].width
);
1335 tcg_gen_shli_tl(tmp
, tmp
, reg_field_info
[field
].offset
);
1336 tcg_gen_or_tl(usr
, usr
, tmp
);
1338 TCGLabel
*skip_label
= gen_new_label();
1339 tcg_gen_brcondi_tl(TCG_COND_EQ
, val
, 0, skip_label
);
1340 gen_set_usr_field(ctx
, field
, val
);
1341 gen_set_label(skip_label
);
1345 void gen_sat_i32(TCGv dest
, TCGv source
, int width
)
1347 TCGv max_val
= tcg_constant_tl((1 << (width
- 1)) - 1);
1348 TCGv min_val
= tcg_constant_tl(-(1 << (width
- 1)));
1349 tcg_gen_smin_tl(dest
, source
, max_val
);
1350 tcg_gen_smax_tl(dest
, dest
, min_val
);
1353 void gen_sat_i32_ovfl(TCGv ovfl
, TCGv dest
, TCGv source
, int width
)
1355 TCGv tmp
= tcg_temp_new(); /* In case dest == source */
1356 gen_sat_i32(tmp
, source
, width
);
1357 tcg_gen_setcond_tl(TCG_COND_NE
, ovfl
, source
, tmp
);
1358 tcg_gen_mov_tl(dest
, tmp
);
1361 void gen_satu_i32(TCGv dest
, TCGv source
, int width
)
1363 TCGv tmp
= tcg_temp_new(); /* In case dest == source */
1364 TCGv max_val
= tcg_constant_tl((1 << width
) - 1);
1365 TCGv zero
= tcg_constant_tl(0);
1366 tcg_gen_movcond_tl(TCG_COND_GTU
, tmp
, source
, max_val
, max_val
, source
);
1367 tcg_gen_movcond_tl(TCG_COND_LT
, tmp
, source
, zero
, zero
, tmp
);
1368 tcg_gen_mov_tl(dest
, tmp
);
1371 void gen_satu_i32_ovfl(TCGv ovfl
, TCGv dest
, TCGv source
, int width
)
1373 TCGv tmp
= tcg_temp_new(); /* In case dest == source */
1374 gen_satu_i32(tmp
, source
, width
);
1375 tcg_gen_setcond_tl(TCG_COND_NE
, ovfl
, source
, tmp
);
1376 tcg_gen_mov_tl(dest
, tmp
);
1379 void gen_sat_i64(TCGv_i64 dest
, TCGv_i64 source
, int width
)
1381 TCGv_i64 max_val
= tcg_constant_i64((1LL << (width
- 1)) - 1LL);
1382 TCGv_i64 min_val
= tcg_constant_i64(-(1LL << (width
- 1)));
1383 tcg_gen_smin_i64(dest
, source
, max_val
);
1384 tcg_gen_smax_i64(dest
, dest
, min_val
);
1387 void gen_sat_i64_ovfl(TCGv ovfl
, TCGv_i64 dest
, TCGv_i64 source
, int width
)
1389 TCGv_i64 tmp
= tcg_temp_new_i64(); /* In case dest == source */
1391 gen_sat_i64(tmp
, source
, width
);
1392 ovfl_64
= tcg_temp_new_i64();
1393 tcg_gen_setcond_i64(TCG_COND_NE
, ovfl_64
, tmp
, source
);
1394 tcg_gen_mov_i64(dest
, tmp
);
1395 tcg_gen_trunc_i64_tl(ovfl
, ovfl_64
);
1398 void gen_satu_i64(TCGv_i64 dest
, TCGv_i64 source
, int width
)
1400 TCGv_i64 tmp
= tcg_temp_new_i64(); /* In case dest == source */
1401 TCGv_i64 max_val
= tcg_constant_i64((1LL << width
) - 1LL);
1402 TCGv_i64 zero
= tcg_constant_i64(0);
1403 tcg_gen_movcond_i64(TCG_COND_GTU
, tmp
, source
, max_val
, max_val
, source
);
1404 tcg_gen_movcond_i64(TCG_COND_LT
, tmp
, source
, zero
, zero
, tmp
);
1405 tcg_gen_mov_i64(dest
, tmp
);
1408 void gen_satu_i64_ovfl(TCGv ovfl
, TCGv_i64 dest
, TCGv_i64 source
, int width
)
1410 TCGv_i64 tmp
= tcg_temp_new_i64(); /* In case dest == source */
1412 gen_satu_i64(tmp
, source
, width
);
1413 ovfl_64
= tcg_temp_new_i64();
1414 tcg_gen_setcond_i64(TCG_COND_NE
, ovfl_64
, tmp
, source
);
1415 tcg_gen_mov_i64(dest
, tmp
);
1416 tcg_gen_trunc_i64_tl(ovfl
, ovfl_64
);
1419 /* Implements the fADDSAT64 macro in TCG */
1420 void gen_add_sat_i64(DisasContext
*ctx
, TCGv_i64 ret
, TCGv_i64 a
, TCGv_i64 b
)
1422 TCGv_i64 sum
= tcg_temp_new_i64();
1423 TCGv_i64
xor = tcg_temp_new_i64();
1424 TCGv_i64 cond1
= tcg_temp_new_i64();
1425 TCGv_i64 cond2
= tcg_temp_new_i64();
1426 TCGv_i64 cond3
= tcg_temp_new_i64();
1427 TCGv_i64 mask
= tcg_constant_i64(0x8000000000000000ULL
);
1428 TCGv_i64 max_pos
= tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL
);
1429 TCGv_i64 max_neg
= tcg_constant_i64(0x8000000000000000LL
);
1430 TCGv_i64 zero
= tcg_constant_i64(0);
1431 TCGLabel
*no_ovfl_label
= gen_new_label();
1432 TCGLabel
*ovfl_label
= gen_new_label();
1433 TCGLabel
*ret_label
= gen_new_label();
1435 tcg_gen_add_i64(sum
, a
, b
);
1436 tcg_gen_xor_i64(xor, a
, b
);
1438 /* if (xor & mask) */
1439 tcg_gen_and_i64(cond1
, xor, mask
);
1440 tcg_gen_brcondi_i64(TCG_COND_NE
, cond1
, 0, no_ovfl_label
);
1442 /* else if ((a ^ sum) & mask) */
1443 tcg_gen_xor_i64(cond2
, a
, sum
);
1444 tcg_gen_and_i64(cond2
, cond2
, mask
);
1445 tcg_gen_brcondi_i64(TCG_COND_NE
, cond2
, 0, ovfl_label
);
1446 /* fallthrough to no_ovfl_label branch */
1449 gen_set_label(no_ovfl_label
);
1450 tcg_gen_mov_i64(ret
, sum
);
1451 tcg_gen_br(ret_label
);
1453 /* else if branch */
1454 gen_set_label(ovfl_label
);
1455 tcg_gen_and_i64(cond3
, sum
, mask
);
1456 tcg_gen_movcond_i64(TCG_COND_NE
, ret
, cond3
, zero
, max_pos
, max_neg
);
1457 gen_set_usr_fieldi(ctx
, USR_OVF
, 1);
1459 gen_set_label(ret_label
);
1462 #include "tcg_funcs_generated.c.inc"
1463 #include "tcg_func_table_generated.c.inc"