4 * Copyright (c) 2007 CodeSourcery
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "exec/cpu_ldst.h"
24 #include "semihosting/semihost.h"
27 #if defined(CONFIG_USER_ONLY)
29 void m68k_cpu_do_interrupt(CPUState
*cs
)
31 cs
->exception_index
= -1;
34 static inline void do_interrupt_m68k_hardirq(CPUM68KState
*env
)
40 static void cf_rte(CPUM68KState
*env
)
46 fmt
= cpu_ldl_mmuidx_ra(env
, sp
, MMU_KERNEL_IDX
, 0);
47 env
->pc
= cpu_ldl_mmuidx_ra(env
, sp
+ 4, MMU_KERNEL_IDX
, 0);
48 sp
|= (fmt
>> 28) & 3;
49 env
->aregs
[7] = sp
+ 8;
51 cpu_m68k_set_sr(env
, fmt
);
54 static void m68k_rte(CPUM68KState
*env
)
62 sr
= cpu_lduw_mmuidx_ra(env
, sp
, MMU_KERNEL_IDX
, 0);
64 env
->pc
= cpu_ldl_mmuidx_ra(env
, sp
, MMU_KERNEL_IDX
, 0);
66 if (m68k_feature(env
, M68K_FEATURE_QUAD_MULDIV
)) {
67 /* all except 68000 */
68 fmt
= cpu_lduw_mmuidx_ra(env
, sp
, MMU_KERNEL_IDX
, 0);
75 cpu_m68k_set_sr(env
, sr
);
90 cpu_m68k_set_sr(env
, sr
);
93 static const char *m68k_exception_name(int index
)
97 return "Access Fault";
99 return "Address Error";
101 return "Illegal Instruction";
103 return "Divide by Zero";
107 return "FTRAPcc, TRAPcc, TRAPV";
109 return "Privilege Violation";
116 case EXCP_DEBEGBP
: /* 68020/030 only */
117 return "Copro Protocol Violation";
119 return "Format Error";
120 case EXCP_UNINITIALIZED
:
121 return "Uninitialized Interrupt";
123 return "Spurious Interrupt";
124 case EXCP_INT_LEVEL_1
:
125 return "Level 1 Interrupt";
126 case EXCP_INT_LEVEL_1
+ 1:
127 return "Level 2 Interrupt";
128 case EXCP_INT_LEVEL_1
+ 2:
129 return "Level 3 Interrupt";
130 case EXCP_INT_LEVEL_1
+ 3:
131 return "Level 4 Interrupt";
132 case EXCP_INT_LEVEL_1
+ 4:
133 return "Level 5 Interrupt";
134 case EXCP_INT_LEVEL_1
+ 5:
135 return "Level 6 Interrupt";
136 case EXCP_INT_LEVEL_1
+ 6:
137 return "Level 7 Interrupt";
158 case EXCP_TRAP0
+ 10:
160 case EXCP_TRAP0
+ 11:
162 case EXCP_TRAP0
+ 12:
164 case EXCP_TRAP0
+ 13:
166 case EXCP_TRAP0
+ 14:
168 case EXCP_TRAP0
+ 15:
171 return "FP Branch/Set on unordered condition";
173 return "FP Inexact Result";
175 return "FP Divide by Zero";
177 return "FP Underflow";
179 return "FP Operand Error";
181 return "FP Overflow";
183 return "FP Signaling NAN";
185 return "FP Unimplemented Data Type";
186 case EXCP_MMU_CONF
: /* 68030/68851 only */
187 return "MMU Configuration Error";
188 case EXCP_MMU_ILLEGAL
: /* 68851 only */
189 return "MMU Illegal Operation";
190 case EXCP_MMU_ACCESS
: /* 68851 only */
191 return "MMU Access Level Violation";
193 return "User Defined Vector";
198 static void cf_interrupt_all(CPUM68KState
*env
, int is_hw
)
200 CPUState
*cs
= env_cpu(env
);
211 switch (cs
->exception_index
) {
213 /* Return from an exception. */
217 if (semihosting_enabled()
218 && (env
->sr
& SR_S
) != 0
219 && (env
->pc
& 3) == 0
220 && cpu_lduw_code(env
, env
->pc
- 4) == 0x4e71
221 && cpu_ldl_code(env
, env
->pc
) == 0x4e7bf000) {
223 do_m68k_semihosting(env
, env
->dregs
[0]);
227 cs
->exception_index
= EXCP_HLT
;
231 if (cs
->exception_index
>= EXCP_TRAP0
232 && cs
->exception_index
<= EXCP_TRAP15
) {
233 /* Move the PC after the trap instruction. */
238 vector
= cs
->exception_index
<< 2;
240 sr
= env
->sr
| cpu_m68k_get_ccr(env
);
241 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
243 qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
244 ++count
, m68k_exception_name(cs
->exception_index
),
245 vector
, env
->pc
, env
->aregs
[7], sr
);
254 env
->sr
= (env
->sr
& ~SR_I
) | (env
->pending_level
<< SR_I_SHIFT
);
259 fmt
|= (sp
& 3) << 28;
261 /* ??? This could cause MMU faults. */
264 cpu_stl_mmuidx_ra(env
, sp
, retaddr
, MMU_KERNEL_IDX
, 0);
266 cpu_stl_mmuidx_ra(env
, sp
, fmt
, MMU_KERNEL_IDX
, 0);
268 /* Jump to vector. */
269 env
->pc
= cpu_ldl_mmuidx_ra(env
, env
->vbr
+ vector
, MMU_KERNEL_IDX
, 0);
272 static inline void do_stack_frame(CPUM68KState
*env
, uint32_t *sp
,
273 uint16_t format
, uint16_t sr
,
274 uint32_t addr
, uint32_t retaddr
)
276 if (m68k_feature(env
, M68K_FEATURE_QUAD_MULDIV
)) {
277 /* all except 68000 */
278 CPUState
*cs
= env_cpu(env
);
282 cpu_stl_mmuidx_ra(env
, *sp
, env
->pc
, MMU_KERNEL_IDX
, 0);
284 cpu_stl_mmuidx_ra(env
, *sp
, addr
, MMU_KERNEL_IDX
, 0);
289 cpu_stl_mmuidx_ra(env
, *sp
, addr
, MMU_KERNEL_IDX
, 0);
293 cpu_stw_mmuidx_ra(env
, *sp
, (format
<< 12) + (cs
->exception_index
<< 2),
297 cpu_stl_mmuidx_ra(env
, *sp
, retaddr
, MMU_KERNEL_IDX
, 0);
299 cpu_stw_mmuidx_ra(env
, *sp
, sr
, MMU_KERNEL_IDX
, 0);
302 static void m68k_interrupt_all(CPUM68KState
*env
, int is_hw
)
304 CPUState
*cs
= env_cpu(env
);
313 switch (cs
->exception_index
) {
315 /* Return from an exception. */
318 case EXCP_TRAP0
... EXCP_TRAP15
:
319 /* Move the PC after the trap instruction. */
325 vector
= cs
->exception_index
<< 2;
327 sr
= env
->sr
| cpu_m68k_get_ccr(env
);
328 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
330 qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
331 ++count
, m68k_exception_name(cs
->exception_index
),
332 vector
, env
->pc
, env
->aregs
[7], sr
);
336 * MC68040UM/AD, chapter 9.3.10
339 /* "the processor first make an internal copy" */
341 /* "set the mode to supervisor" */
343 /* "suppress tracing" */
345 /* "sets the processor interrupt mask" */
347 sr
|= (env
->sr
& ~SR_I
) | (env
->pending_level
<< SR_I_SHIFT
);
349 cpu_m68k_set_sr(env
, sr
);
352 if (!m68k_feature(env
, M68K_FEATURE_UNALIGNED_DATA
)) {
356 if (cs
->exception_index
== EXCP_ACCESS
) {
357 if (env
->mmu
.fault
) {
358 cpu_abort(cs
, "DOUBLE MMU FAULT\n");
360 env
->mmu
.fault
= true;
363 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
366 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
369 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
370 /* write back 1 / push data 0 */
372 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
373 /* write back 1 address */
375 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
376 /* write back 2 data */
378 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
379 /* write back 2 address */
381 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
382 /* write back 3 data */
384 cpu_stl_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
385 /* write back 3 address */
387 cpu_stl_mmuidx_ra(env
, sp
, env
->mmu
.ar
, MMU_KERNEL_IDX
, 0);
390 cpu_stl_mmuidx_ra(env
, sp
, env
->mmu
.ar
, MMU_KERNEL_IDX
, 0);
391 /* write back 1 status */
393 cpu_stw_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
394 /* write back 2 status */
396 cpu_stw_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
397 /* write back 3 status */
399 cpu_stw_mmuidx_ra(env
, sp
, 0, MMU_KERNEL_IDX
, 0);
400 /* special status word */
402 cpu_stw_mmuidx_ra(env
, sp
, env
->mmu
.ssw
, MMU_KERNEL_IDX
, 0);
403 /* effective address */
405 cpu_stl_mmuidx_ra(env
, sp
, env
->mmu
.ar
, MMU_KERNEL_IDX
, 0);
407 do_stack_frame(env
, &sp
, 7, oldsr
, 0, retaddr
);
408 env
->mmu
.fault
= false;
409 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
411 "ssw: %08x ea: %08x sfc: %d dfc: %d\n",
412 env
->mmu
.ssw
, env
->mmu
.ar
, env
->sfc
, env
->dfc
);
414 } else if (cs
->exception_index
== EXCP_ADDRESS
) {
415 do_stack_frame(env
, &sp
, 2, oldsr
, 0, retaddr
);
416 } else if (cs
->exception_index
== EXCP_ILLEGAL
||
417 cs
->exception_index
== EXCP_DIV0
||
418 cs
->exception_index
== EXCP_CHK
||
419 cs
->exception_index
== EXCP_TRAPCC
||
420 cs
->exception_index
== EXCP_TRACE
) {
421 /* FIXME: addr is not only env->pc */
422 do_stack_frame(env
, &sp
, 2, oldsr
, env
->pc
, retaddr
);
423 } else if (is_hw
&& oldsr
& SR_M
&&
424 cs
->exception_index
>= EXCP_SPURIOUS
&&
425 cs
->exception_index
<= EXCP_INT_LEVEL_7
) {
426 do_stack_frame(env
, &sp
, 0, oldsr
, 0, retaddr
);
429 cpu_m68k_set_sr(env
, sr
&= ~SR_M
);
430 sp
= env
->aregs
[7] & ~1;
431 do_stack_frame(env
, &sp
, 1, oldsr
, 0, retaddr
);
433 do_stack_frame(env
, &sp
, 0, oldsr
, 0, retaddr
);
437 /* Jump to vector. */
438 env
->pc
= cpu_ldl_mmuidx_ra(env
, env
->vbr
+ vector
, MMU_KERNEL_IDX
, 0);
441 static void do_interrupt_all(CPUM68KState
*env
, int is_hw
)
443 if (m68k_feature(env
, M68K_FEATURE_M68000
)) {
444 m68k_interrupt_all(env
, is_hw
);
447 cf_interrupt_all(env
, is_hw
);
450 void m68k_cpu_do_interrupt(CPUState
*cs
)
452 M68kCPU
*cpu
= M68K_CPU(cs
);
453 CPUM68KState
*env
= &cpu
->env
;
455 do_interrupt_all(env
, 0);
458 static inline void do_interrupt_m68k_hardirq(CPUM68KState
*env
)
460 do_interrupt_all(env
, 1);
463 void m68k_cpu_transaction_failed(CPUState
*cs
, hwaddr physaddr
, vaddr addr
,
464 unsigned size
, MMUAccessType access_type
,
465 int mmu_idx
, MemTxAttrs attrs
,
466 MemTxResult response
, uintptr_t retaddr
)
468 M68kCPU
*cpu
= M68K_CPU(cs
);
469 CPUM68KState
*env
= &cpu
->env
;
471 cpu_restore_state(cs
, retaddr
, true);
473 if (m68k_feature(env
, M68K_FEATURE_M68040
)) {
477 * According to the MC68040 users manual the ATC bit of the SSW is
478 * used to distinguish between ATC faults and physical bus errors.
479 * In the case of a bus error e.g. during nubus read from an empty
480 * slot this bit should not be set
482 if (response
!= MEMTX_DECODE_ERROR
) {
483 env
->mmu
.ssw
|= M68K_ATC_040
;
486 /* FIXME: manage MMU table access error */
487 env
->mmu
.ssw
&= ~M68K_TM_040
;
488 if (env
->sr
& SR_S
) { /* SUPERVISOR */
489 env
->mmu
.ssw
|= M68K_TM_040_SUPER
;
491 if (access_type
== MMU_INST_FETCH
) { /* instruction or data */
492 env
->mmu
.ssw
|= M68K_TM_040_CODE
;
494 env
->mmu
.ssw
|= M68K_TM_040_DATA
;
496 env
->mmu
.ssw
&= ~M68K_BA_SIZE_MASK
;
499 env
->mmu
.ssw
|= M68K_BA_SIZE_BYTE
;
502 env
->mmu
.ssw
|= M68K_BA_SIZE_WORD
;
505 env
->mmu
.ssw
|= M68K_BA_SIZE_LONG
;
509 if (access_type
!= MMU_DATA_STORE
) {
510 env
->mmu
.ssw
|= M68K_RW_040
;
515 cs
->exception_index
= EXCP_ACCESS
;
521 bool m68k_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
523 M68kCPU
*cpu
= M68K_CPU(cs
);
524 CPUM68KState
*env
= &cpu
->env
;
526 if (interrupt_request
& CPU_INTERRUPT_HARD
527 && ((env
->sr
& SR_I
) >> SR_I_SHIFT
) < env
->pending_level
) {
529 * Real hardware gets the interrupt vector via an IACK cycle
530 * at this point. Current emulated hardware doesn't rely on
531 * this, so we provide/save the vector when the interrupt is
534 cs
->exception_index
= env
->pending_vector
;
535 do_interrupt_m68k_hardirq(env
);
541 static void raise_exception_ra(CPUM68KState
*env
, int tt
, uintptr_t raddr
)
543 CPUState
*cs
= env_cpu(env
);
545 cs
->exception_index
= tt
;
546 cpu_loop_exit_restore(cs
, raddr
);
549 static void raise_exception(CPUM68KState
*env
, int tt
)
551 raise_exception_ra(env
, tt
, 0);
554 void HELPER(raise_exception
)(CPUM68KState
*env
, uint32_t tt
)
556 raise_exception(env
, tt
);
559 void HELPER(divuw
)(CPUM68KState
*env
, int destr
, uint32_t den
)
561 uint32_t num
= env
->dregs
[destr
];
565 raise_exception_ra(env
, EXCP_DIV0
, GETPC());
570 env
->cc_c
= 0; /* always cleared, even if overflow */
574 * real 68040 keeps N and unset Z on overflow,
575 * whereas documentation says "undefined"
580 env
->dregs
[destr
] = deposit32(quot
, 16, 16, rem
);
581 env
->cc_z
= (int16_t)quot
;
582 env
->cc_n
= (int16_t)quot
;
586 void HELPER(divsw
)(CPUM68KState
*env
, int destr
, int32_t den
)
588 int32_t num
= env
->dregs
[destr
];
592 raise_exception_ra(env
, EXCP_DIV0
, GETPC());
597 env
->cc_c
= 0; /* always cleared, even if overflow */
598 if (quot
!= (int16_t)quot
) {
600 /* nothing else is modified */
602 * real 68040 keeps N and unset Z on overflow,
603 * whereas documentation says "undefined"
608 env
->dregs
[destr
] = deposit32(quot
, 16, 16, rem
);
609 env
->cc_z
= (int16_t)quot
;
610 env
->cc_n
= (int16_t)quot
;
614 void HELPER(divul
)(CPUM68KState
*env
, int numr
, int regr
, uint32_t den
)
616 uint32_t num
= env
->dregs
[numr
];
620 raise_exception_ra(env
, EXCP_DIV0
, GETPC());
630 if (m68k_feature(env
, M68K_FEATURE_CF_ISA_A
)) {
632 env
->dregs
[numr
] = quot
;
634 env
->dregs
[regr
] = rem
;
637 env
->dregs
[regr
] = rem
;
638 env
->dregs
[numr
] = quot
;
642 void HELPER(divsl
)(CPUM68KState
*env
, int numr
, int regr
, int32_t den
)
644 int32_t num
= env
->dregs
[numr
];
648 raise_exception_ra(env
, EXCP_DIV0
, GETPC());
658 if (m68k_feature(env
, M68K_FEATURE_CF_ISA_A
)) {
660 env
->dregs
[numr
] = quot
;
662 env
->dregs
[regr
] = rem
;
665 env
->dregs
[regr
] = rem
;
666 env
->dregs
[numr
] = quot
;
670 void HELPER(divull
)(CPUM68KState
*env
, int numr
, int regr
, uint32_t den
)
672 uint64_t num
= deposit64(env
->dregs
[numr
], 32, 32, env
->dregs
[regr
]);
677 raise_exception_ra(env
, EXCP_DIV0
, GETPC());
682 env
->cc_c
= 0; /* always cleared, even if overflow */
683 if (quot
> 0xffffffffULL
) {
686 * real 68040 keeps N and unset Z on overflow,
687 * whereas documentation says "undefined"
697 * If Dq and Dr are the same, the quotient is returned.
698 * therefore we set Dq last.
701 env
->dregs
[regr
] = rem
;
702 env
->dregs
[numr
] = quot
;
705 void HELPER(divsll
)(CPUM68KState
*env
, int numr
, int regr
, int32_t den
)
707 int64_t num
= deposit64(env
->dregs
[numr
], 32, 32, env
->dregs
[regr
]);
712 raise_exception_ra(env
, EXCP_DIV0
, GETPC());
717 env
->cc_c
= 0; /* always cleared, even if overflow */
718 if (quot
!= (int32_t)quot
) {
721 * real 68040 keeps N and unset Z on overflow,
722 * whereas documentation says "undefined"
732 * If Dq and Dr are the same, the quotient is returned.
733 * therefore we set Dq last.
736 env
->dregs
[regr
] = rem
;
737 env
->dregs
[numr
] = quot
;
740 /* We're executing in a serial context -- no need to be atomic. */
741 void HELPER(cas2w
)(CPUM68KState
*env
, uint32_t regs
, uint32_t a1
, uint32_t a2
)
743 uint32_t Dc1
= extract32(regs
, 9, 3);
744 uint32_t Dc2
= extract32(regs
, 6, 3);
745 uint32_t Du1
= extract32(regs
, 3, 3);
746 uint32_t Du2
= extract32(regs
, 0, 3);
747 int16_t c1
= env
->dregs
[Dc1
];
748 int16_t c2
= env
->dregs
[Dc2
];
749 int16_t u1
= env
->dregs
[Du1
];
750 int16_t u2
= env
->dregs
[Du2
];
752 uintptr_t ra
= GETPC();
754 l1
= cpu_lduw_data_ra(env
, a1
, ra
);
755 l2
= cpu_lduw_data_ra(env
, a2
, ra
);
756 if (l1
== c1
&& l2
== c2
) {
757 cpu_stw_data_ra(env
, a1
, u1
, ra
);
758 cpu_stw_data_ra(env
, a2
, u2
, ra
);
768 env
->cc_op
= CC_OP_CMPW
;
769 env
->dregs
[Dc1
] = deposit32(env
->dregs
[Dc1
], 0, 16, l1
);
770 env
->dregs
[Dc2
] = deposit32(env
->dregs
[Dc2
], 0, 16, l2
);
773 static void do_cas2l(CPUM68KState
*env
, uint32_t regs
, uint32_t a1
, uint32_t a2
,
776 uint32_t Dc1
= extract32(regs
, 9, 3);
777 uint32_t Dc2
= extract32(regs
, 6, 3);
778 uint32_t Du1
= extract32(regs
, 3, 3);
779 uint32_t Du2
= extract32(regs
, 0, 3);
780 uint32_t c1
= env
->dregs
[Dc1
];
781 uint32_t c2
= env
->dregs
[Dc2
];
782 uint32_t u1
= env
->dregs
[Du1
];
783 uint32_t u2
= env
->dregs
[Du2
];
785 uintptr_t ra
= GETPC();
786 #if defined(CONFIG_ATOMIC64)
787 int mmu_idx
= cpu_mmu_index(env
, 0);
788 TCGMemOpIdx oi
= make_memop_idx(MO_BEQ
, mmu_idx
);
792 /* We're executing in a parallel context -- must be atomic. */
793 #ifdef CONFIG_ATOMIC64
795 if ((a1
& 7) == 0 && a2
== a1
+ 4) {
796 c
= deposit64(c2
, 32, 32, c1
);
797 u
= deposit64(u2
, 32, 32, u1
);
798 l
= cpu_atomic_cmpxchgq_be_mmu(env
, a1
, c
, u
, oi
, ra
);
801 } else if ((a2
& 7) == 0 && a1
== a2
+ 4) {
802 c
= deposit64(c1
, 32, 32, c2
);
803 u
= deposit64(u1
, 32, 32, u2
);
804 l
= cpu_atomic_cmpxchgq_be_mmu(env
, a2
, c
, u
, oi
, ra
);
810 /* Tell the main loop we need to serialize this insn. */
811 cpu_loop_exit_atomic(env_cpu(env
), ra
);
814 /* We're executing in a serial context -- no need to be atomic. */
815 l1
= cpu_ldl_data_ra(env
, a1
, ra
);
816 l2
= cpu_ldl_data_ra(env
, a2
, ra
);
817 if (l1
== c1
&& l2
== c2
) {
818 cpu_stl_data_ra(env
, a1
, u1
, ra
);
819 cpu_stl_data_ra(env
, a2
, u2
, ra
);
830 env
->cc_op
= CC_OP_CMPL
;
831 env
->dregs
[Dc1
] = l1
;
832 env
->dregs
[Dc2
] = l2
;
835 void HELPER(cas2l
)(CPUM68KState
*env
, uint32_t regs
, uint32_t a1
, uint32_t a2
)
837 do_cas2l(env
, regs
, a1
, a2
, false);
840 void HELPER(cas2l_parallel
)(CPUM68KState
*env
, uint32_t regs
, uint32_t a1
,
843 do_cas2l(env
, regs
, a1
, a2
, true);
853 static struct bf_data
bf_prep(uint32_t addr
, int32_t ofs
, uint32_t len
)
857 /* Bound length; map 0 to 32. */
858 len
= ((len
- 1) & 31) + 1;
860 /* Note that ofs is signed. */
869 * Compute the number of bytes required (minus one) to
870 * satisfy the bitfield.
872 blen
= (bofs
+ len
- 1) / 8;
875 * Canonicalize the bit offset for data loaded into a 64-bit big-endian
876 * word. For the cases where BLEN is not a power of 2, adjust ADDR so
877 * that we can use the next power of two sized load without crossing a
878 * page boundary, unless the field itself crosses the boundary.
898 bofs
+= 8 * (addr
& 3);
903 g_assert_not_reached();
906 return (struct bf_data
){
914 static uint64_t bf_load(CPUM68KState
*env
, uint32_t addr
, int blen
,
919 return cpu_ldub_data_ra(env
, addr
, ra
);
921 return cpu_lduw_data_ra(env
, addr
, ra
);
924 return cpu_ldl_data_ra(env
, addr
, ra
);
926 return cpu_ldq_data_ra(env
, addr
, ra
);
928 g_assert_not_reached();
932 static void bf_store(CPUM68KState
*env
, uint32_t addr
, int blen
,
933 uint64_t data
, uintptr_t ra
)
937 cpu_stb_data_ra(env
, addr
, data
, ra
);
940 cpu_stw_data_ra(env
, addr
, data
, ra
);
944 cpu_stl_data_ra(env
, addr
, data
, ra
);
947 cpu_stq_data_ra(env
, addr
, data
, ra
);
950 g_assert_not_reached();
954 uint32_t HELPER(bfexts_mem
)(CPUM68KState
*env
, uint32_t addr
,
955 int32_t ofs
, uint32_t len
)
957 uintptr_t ra
= GETPC();
958 struct bf_data d
= bf_prep(addr
, ofs
, len
);
959 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
961 return (int64_t)(data
<< d
.bofs
) >> (64 - d
.len
);
964 uint64_t HELPER(bfextu_mem
)(CPUM68KState
*env
, uint32_t addr
,
965 int32_t ofs
, uint32_t len
)
967 uintptr_t ra
= GETPC();
968 struct bf_data d
= bf_prep(addr
, ofs
, len
);
969 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
972 * Put CC_N at the top of the high word; put the zero-extended value
973 * at the bottom of the low word.
977 data
|= data
<< (64 - d
.len
);
982 uint32_t HELPER(bfins_mem
)(CPUM68KState
*env
, uint32_t addr
, uint32_t val
,
983 int32_t ofs
, uint32_t len
)
985 uintptr_t ra
= GETPC();
986 struct bf_data d
= bf_prep(addr
, ofs
, len
);
987 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
988 uint64_t mask
= -1ull << (64 - d
.len
) >> d
.bofs
;
990 data
= (data
& ~mask
) | (((uint64_t)val
<< (64 - d
.len
)) >> d
.bofs
);
992 bf_store(env
, d
.addr
, d
.blen
, data
, ra
);
994 /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */
995 return val
<< (32 - d
.len
);
998 uint32_t HELPER(bfchg_mem
)(CPUM68KState
*env
, uint32_t addr
,
999 int32_t ofs
, uint32_t len
)
1001 uintptr_t ra
= GETPC();
1002 struct bf_data d
= bf_prep(addr
, ofs
, len
);
1003 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
1004 uint64_t mask
= -1ull << (64 - d
.len
) >> d
.bofs
;
1006 bf_store(env
, d
.addr
, d
.blen
, data
^ mask
, ra
);
1008 return ((data
& mask
) << d
.bofs
) >> 32;
1011 uint32_t HELPER(bfclr_mem
)(CPUM68KState
*env
, uint32_t addr
,
1012 int32_t ofs
, uint32_t len
)
1014 uintptr_t ra
= GETPC();
1015 struct bf_data d
= bf_prep(addr
, ofs
, len
);
1016 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
1017 uint64_t mask
= -1ull << (64 - d
.len
) >> d
.bofs
;
1019 bf_store(env
, d
.addr
, d
.blen
, data
& ~mask
, ra
);
1021 return ((data
& mask
) << d
.bofs
) >> 32;
1024 uint32_t HELPER(bfset_mem
)(CPUM68KState
*env
, uint32_t addr
,
1025 int32_t ofs
, uint32_t len
)
1027 uintptr_t ra
= GETPC();
1028 struct bf_data d
= bf_prep(addr
, ofs
, len
);
1029 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
1030 uint64_t mask
= -1ull << (64 - d
.len
) >> d
.bofs
;
1032 bf_store(env
, d
.addr
, d
.blen
, data
| mask
, ra
);
1034 return ((data
& mask
) << d
.bofs
) >> 32;
1037 uint32_t HELPER(bfffo_reg
)(uint32_t n
, uint32_t ofs
, uint32_t len
)
1039 return (n
? clz32(n
) : len
) + ofs
;
1042 uint64_t HELPER(bfffo_mem
)(CPUM68KState
*env
, uint32_t addr
,
1043 int32_t ofs
, uint32_t len
)
1045 uintptr_t ra
= GETPC();
1046 struct bf_data d
= bf_prep(addr
, ofs
, len
);
1047 uint64_t data
= bf_load(env
, d
.addr
, d
.blen
, ra
);
1048 uint64_t mask
= -1ull << (64 - d
.len
) >> d
.bofs
;
1049 uint64_t n
= (data
& mask
) << d
.bofs
;
1050 uint32_t ffo
= helper_bfffo_reg(n
>> 32, ofs
, d
.len
);
1053 * Return FFO in the low word and N in the high word.
1054 * Note that because of MASK and the shift, the low word
1060 void HELPER(chk
)(CPUM68KState
*env
, int32_t val
, int32_t ub
)
1064 * X: Not affected, C,V,Z: Undefined,
1065 * N: Set if val < 0; cleared if val > ub, undefined otherwise
1066 * We implement here values found from a real MC68040:
1067 * X,V,Z: Not affected
1068 * N: Set if val < 0; cleared if val >= 0
1069 * C: if 0 <= ub: set if val < 0 or val > ub, cleared otherwise
1070 * if 0 > ub: set if val > ub and val < 0, cleared otherwise
1073 env
->cc_c
= 0 <= ub
? val
< 0 || val
> ub
: val
> ub
&& val
< 0;
1075 if (val
< 0 || val
> ub
) {
1076 CPUState
*cs
= env_cpu(env
);
1078 /* Recover PC and CC_OP for the beginning of the insn. */
1079 cpu_restore_state(cs
, GETPC(), true);
1081 /* flags have been modified by gen_flush_flags() */
1082 env
->cc_op
= CC_OP_FLAGS
;
1083 /* Adjust PC to end of the insn. */
1086 cs
->exception_index
= EXCP_CHK
;
1091 void HELPER(chk2
)(CPUM68KState
*env
, int32_t val
, int32_t lb
, int32_t ub
)
1095 * X: Not affected, N,V: Undefined,
1096 * Z: Set if val is equal to lb or ub
1097 * C: Set if val < lb or val > ub, cleared otherwise
1098 * We implement here values found from a real MC68040:
1099 * X,N,V: Not affected
1100 * Z: Set if val is equal to lb or ub
1101 * C: if lb <= ub: set if val < lb or val > ub, cleared otherwise
1102 * if lb > ub: set if val > ub and val < lb, cleared otherwise
1104 env
->cc_z
= val
!= lb
&& val
!= ub
;
1105 env
->cc_c
= lb
<= ub
? val
< lb
|| val
> ub
: val
> ub
&& val
< lb
;
1108 CPUState
*cs
= env_cpu(env
);
1110 /* Recover PC and CC_OP for the beginning of the insn. */
1111 cpu_restore_state(cs
, GETPC(), true);
1113 /* flags have been modified by gen_flush_flags() */
1114 env
->cc_op
= CC_OP_FLAGS
;
1115 /* Adjust PC to end of the insn. */
1118 cs
->exception_index
= EXCP_CHK
;