Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / target / hppa / op_helper.c
blob744325969f5c5e9780a631db42e811ca7f9fe166
1 /*
2 * Helpers for HPPA instructions.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "qemu/timer.h"
27 #include "trace.h"
29 G_NORETURN void HELPER(excp)(CPUHPPAState *env, int excp)
31 CPUState *cs = env_cpu(env);
33 cs->exception_index = excp;
34 cpu_loop_exit(cs);
37 G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra)
39 CPUState *cs = env_cpu(env);
41 cs->exception_index = excp;
42 cpu_loop_exit_restore(cs, ra);
45 static void atomic_store_mask32(CPUHPPAState *env, target_ulong addr,
46 uint32_t val, uint32_t mask, uintptr_t ra)
48 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
49 uint32_t old, new, cmp, *haddr;
50 void *vaddr;
52 vaddr = probe_access(env, addr, 3, MMU_DATA_STORE, mmu_idx, ra);
53 if (vaddr == NULL) {
54 cpu_loop_exit_atomic(env_cpu(env), ra);
56 haddr = (uint32_t *)((uintptr_t)vaddr & -4);
57 mask = addr & 1 ? 0x00ffffffu : 0xffffff00u;
59 old = *haddr;
60 while (1) {
61 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
62 cmp = qatomic_cmpxchg(haddr, old, new);
63 if (cmp == old) {
64 return;
66 old = cmp;
70 static void atomic_store_mask64(CPUHPPAState *env, target_ulong addr,
71 uint64_t val, uint64_t mask,
72 int size, uintptr_t ra)
74 #ifdef CONFIG_ATOMIC64
75 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
76 uint64_t old, new, cmp, *haddr;
77 void *vaddr;
79 vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
80 if (vaddr == NULL) {
81 cpu_loop_exit_atomic(env_cpu(env), ra);
83 haddr = (uint64_t *)((uintptr_t)vaddr & -8);
85 old = *haddr;
86 while (1) {
87 new = be32_to_cpu((cpu_to_be32(old) & ~mask) | (val & mask));
88 cmp = qatomic_cmpxchg__nocheck(haddr, old, new);
89 if (cmp == old) {
90 return;
92 old = cmp;
94 #else
95 cpu_loop_exit_atomic(env_cpu(env), ra);
96 #endif
99 static void do_stby_b(CPUHPPAState *env, target_ulong addr, target_ulong val,
100 bool parallel, uintptr_t ra)
102 switch (addr & 3) {
103 case 3:
104 cpu_stb_data_ra(env, addr, val, ra);
105 break;
106 case 2:
107 cpu_stw_data_ra(env, addr, val, ra);
108 break;
109 case 1:
110 /* The 3 byte store must appear atomic. */
111 if (parallel) {
112 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
113 } else {
114 cpu_stb_data_ra(env, addr, val >> 16, ra);
115 cpu_stw_data_ra(env, addr + 1, val, ra);
117 break;
118 default:
119 cpu_stl_data_ra(env, addr, val, ra);
120 break;
124 static void do_stdby_b(CPUHPPAState *env, target_ulong addr, uint64_t val,
125 bool parallel, uintptr_t ra)
127 switch (addr & 7) {
128 case 7:
129 cpu_stb_data_ra(env, addr, val, ra);
130 break;
131 case 6:
132 cpu_stw_data_ra(env, addr, val, ra);
133 break;
134 case 5:
135 /* The 3 byte store must appear atomic. */
136 if (parallel) {
137 atomic_store_mask32(env, addr, val, 0x00ffffffu, ra);
138 } else {
139 cpu_stb_data_ra(env, addr, val >> 16, ra);
140 cpu_stw_data_ra(env, addr + 1, val, ra);
142 break;
143 case 4:
144 cpu_stl_data_ra(env, addr, val, ra);
145 break;
146 case 3:
147 /* The 5 byte store must appear atomic. */
148 if (parallel) {
149 atomic_store_mask64(env, addr, val, 0x000000ffffffffffull, 5, ra);
150 } else {
151 cpu_stb_data_ra(env, addr, val >> 32, ra);
152 cpu_stl_data_ra(env, addr + 1, val, ra);
154 break;
155 case 2:
156 /* The 6 byte store must appear atomic. */
157 if (parallel) {
158 atomic_store_mask64(env, addr, val, 0x0000ffffffffffffull, 6, ra);
159 } else {
160 cpu_stw_data_ra(env, addr, val >> 32, ra);
161 cpu_stl_data_ra(env, addr + 2, val, ra);
163 break;
164 case 1:
165 /* The 7 byte store must appear atomic. */
166 if (parallel) {
167 atomic_store_mask64(env, addr, val, 0x00ffffffffffffffull, 7, ra);
168 } else {
169 cpu_stb_data_ra(env, addr, val >> 48, ra);
170 cpu_stw_data_ra(env, addr + 1, val >> 32, ra);
171 cpu_stl_data_ra(env, addr + 3, val, ra);
173 break;
174 default:
175 cpu_stq_data_ra(env, addr, val, ra);
176 break;
180 void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
182 do_stby_b(env, addr, val, false, GETPC());
185 void HELPER(stby_b_parallel)(CPUHPPAState *env, target_ulong addr,
186 target_ulong val)
188 do_stby_b(env, addr, val, true, GETPC());
191 void HELPER(stdby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
193 do_stdby_b(env, addr, val, false, GETPC());
196 void HELPER(stdby_b_parallel)(CPUHPPAState *env, target_ulong addr,
197 target_ulong val)
199 do_stdby_b(env, addr, val, true, GETPC());
202 static void do_stby_e(CPUHPPAState *env, target_ulong addr, target_ulong val,
203 bool parallel, uintptr_t ra)
205 switch (addr & 3) {
206 case 3:
207 /* The 3 byte store must appear atomic. */
208 if (parallel) {
209 atomic_store_mask32(env, addr - 3, val, 0xffffff00u, ra);
210 } else {
211 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
212 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
214 break;
215 case 2:
216 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
217 break;
218 case 1:
219 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
220 break;
221 default:
222 /* Nothing is stored, but protection is checked and the
223 cacheline is marked dirty. */
224 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
225 break;
229 static void do_stdby_e(CPUHPPAState *env, target_ulong addr, uint64_t val,
230 bool parallel, uintptr_t ra)
232 switch (addr & 7) {
233 case 7:
234 /* The 7 byte store must appear atomic. */
235 if (parallel) {
236 atomic_store_mask64(env, addr - 7, val,
237 0xffffffffffffff00ull, 7, ra);
238 } else {
239 cpu_stl_data_ra(env, addr - 7, val >> 32, ra);
240 cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
241 cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
243 break;
244 case 6:
245 /* The 6 byte store must appear atomic. */
246 if (parallel) {
247 atomic_store_mask64(env, addr - 6, val,
248 0xffffffffffff0000ull, 6, ra);
249 } else {
250 cpu_stl_data_ra(env, addr - 6, val >> 32, ra);
251 cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
253 break;
254 case 5:
255 /* The 5 byte store must appear atomic. */
256 if (parallel) {
257 atomic_store_mask64(env, addr - 5, val,
258 0xffffffffff000000ull, 5, ra);
259 } else {
260 cpu_stl_data_ra(env, addr - 5, val >> 32, ra);
261 cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
263 break;
264 case 4:
265 cpu_stl_data_ra(env, addr - 4, val >> 32, ra);
266 break;
267 case 3:
268 /* The 3 byte store must appear atomic. */
269 if (parallel) {
270 atomic_store_mask32(env, addr - 3, val >> 32, 0xffffff00u, ra);
271 } else {
272 cpu_stw_data_ra(env, addr - 3, val >> 48, ra);
273 cpu_stb_data_ra(env, addr - 1, val >> 40, ra);
275 break;
276 case 2:
277 cpu_stw_data_ra(env, addr - 2, val >> 48, ra);
278 break;
279 case 1:
280 cpu_stb_data_ra(env, addr - 1, val >> 56, ra);
281 break;
282 default:
283 /* Nothing is stored, but protection is checked and the
284 cacheline is marked dirty. */
285 probe_write(env, addr, 0, cpu_mmu_index(env_cpu(env), 0), ra);
286 break;
290 void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
292 do_stby_e(env, addr, val, false, GETPC());
295 void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
296 target_ulong val)
298 do_stby_e(env, addr, val, true, GETPC());
301 void HELPER(stdby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
303 do_stdby_e(env, addr, val, false, GETPC());
306 void HELPER(stdby_e_parallel)(CPUHPPAState *env, target_ulong addr,
307 target_ulong val)
309 do_stdby_e(env, addr, val, true, GETPC());
312 void HELPER(ldc_check)(target_ulong addr)
314 if (unlikely(addr & 0xf)) {
315 qemu_log_mask(LOG_GUEST_ERROR,
316 "Undefined ldc to unaligned address mod 16: "
317 TARGET_FMT_lx "\n", addr);
321 target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr,
322 uint32_t level, uint32_t want)
324 #ifdef CONFIG_USER_ONLY
325 return page_check_range(addr, 1, want);
326 #else
327 int prot, excp, mmu_idx;
328 hwaddr phys;
330 trace_hppa_tlb_probe(addr, level, want);
331 /* Fail if the requested privilege level is higher than current. */
332 if (level < (env->iaoq_f & 3)) {
333 return 0;
336 mmu_idx = PRIV_P_TO_MMU_IDX(level, env->psw & PSW_P);
337 excp = hppa_get_physical_address(env, addr, mmu_idx, 0, 0, &phys, &prot);
338 if (excp >= 0) {
339 cpu_restore_state(env_cpu(env), GETPC());
340 hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
341 if (excp == EXCP_DTLB_MISS) {
342 excp = EXCP_NA_DTLB_MISS;
344 helper_excp(env, excp);
346 return (want & prot) != 0;
347 #endif
350 target_ulong HELPER(read_interval_timer)(void)
352 #ifdef CONFIG_USER_ONLY
353 /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
354 Just pass through the host cpu clock ticks. */
355 return cpu_get_host_ticks();
356 #else
357 /* In system mode we have access to a decent high-resolution clock.
358 In order to make OS-level time accounting work with the cr16,
359 present it with a well-timed clock fixed at 250MHz. */
360 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2;
361 #endif
364 uint64_t HELPER(hadd_ss)(uint64_t r1, uint64_t r2)
366 uint64_t ret = 0;
368 for (int i = 0; i < 64; i += 16) {
369 int f1 = sextract64(r1, i, 16);
370 int f2 = sextract64(r2, i, 16);
371 int fr = f1 + f2;
373 fr = MIN(fr, INT16_MAX);
374 fr = MAX(fr, INT16_MIN);
375 ret = deposit64(ret, i, 16, fr);
377 return ret;
380 uint64_t HELPER(hadd_us)(uint64_t r1, uint64_t r2)
382 uint64_t ret = 0;
384 for (int i = 0; i < 64; i += 16) {
385 int f1 = extract64(r1, i, 16);
386 int f2 = sextract64(r2, i, 16);
387 int fr = f1 + f2;
389 fr = MIN(fr, UINT16_MAX);
390 fr = MAX(fr, 0);
391 ret = deposit64(ret, i, 16, fr);
393 return ret;
396 uint64_t HELPER(havg)(uint64_t r1, uint64_t r2)
398 uint64_t ret = 0;
400 for (int i = 0; i < 64; i += 16) {
401 int f1 = extract64(r1, i, 16);
402 int f2 = extract64(r2, i, 16);
403 int fr = f1 + f2;
405 ret = deposit64(ret, i, 16, (fr >> 1) | (fr & 1));
407 return ret;
410 uint64_t HELPER(hsub_ss)(uint64_t r1, uint64_t r2)
412 uint64_t ret = 0;
414 for (int i = 0; i < 64; i += 16) {
415 int f1 = sextract64(r1, i, 16);
416 int f2 = sextract64(r2, i, 16);
417 int fr = f1 - f2;
419 fr = MIN(fr, INT16_MAX);
420 fr = MAX(fr, INT16_MIN);
421 ret = deposit64(ret, i, 16, fr);
423 return ret;
426 uint64_t HELPER(hsub_us)(uint64_t r1, uint64_t r2)
428 uint64_t ret = 0;
430 for (int i = 0; i < 64; i += 16) {
431 int f1 = extract64(r1, i, 16);
432 int f2 = sextract64(r2, i, 16);
433 int fr = f1 - f2;
435 fr = MIN(fr, UINT16_MAX);
436 fr = MAX(fr, 0);
437 ret = deposit64(ret, i, 16, fr);
439 return ret;
442 uint64_t HELPER(hshladd)(uint64_t r1, uint64_t r2, uint32_t sh)
444 uint64_t ret = 0;
446 for (int i = 0; i < 64; i += 16) {
447 int f1 = sextract64(r1, i, 16);
448 int f2 = sextract64(r2, i, 16);
449 int fr = (f1 << sh) + f2;
451 fr = MIN(fr, INT16_MAX);
452 fr = MAX(fr, INT16_MIN);
453 ret = deposit64(ret, i, 16, fr);
455 return ret;
458 uint64_t HELPER(hshradd)(uint64_t r1, uint64_t r2, uint32_t sh)
460 uint64_t ret = 0;
462 for (int i = 0; i < 64; i += 16) {
463 int f1 = sextract64(r1, i, 16);
464 int f2 = sextract64(r2, i, 16);
465 int fr = (f1 >> sh) + f2;
467 fr = MIN(fr, INT16_MAX);
468 fr = MAX(fr, INT16_MIN);
469 ret = deposit64(ret, i, 16, fr);
471 return ret;