Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / tests / tcg / multiarch / sigbus.c
blob8134c5fd56805c420a711da65525e02182e4c5ca
1 #define _GNU_SOURCE 1
3 #include <assert.h>
4 #include <stdlib.h>
5 #include <signal.h>
6 #include <endian.h>
9 unsigned long long x = 0x8877665544332211ull;
10 void * volatile p = (void *)&x + 1;
12 void sigbus(int sig, siginfo_t *info, void *uc)
14 assert(sig == SIGBUS);
15 assert(info->si_signo == SIGBUS);
16 #ifdef BUS_ADRALN
17 assert(info->si_code == BUS_ADRALN);
18 #endif
19 assert(info->si_addr == p);
20 exit(EXIT_SUCCESS);
23 int main()
25 struct sigaction sa = {
26 .sa_sigaction = sigbus,
27 .sa_flags = SA_SIGINFO
29 int allow_fail = 0;
30 int tmp;
32 tmp = sigaction(SIGBUS, &sa, NULL);
33 assert(tmp == 0);
36 * Select an operation that's likely to enforce alignment.
37 * On many guests that support unaligned accesses by default,
38 * this is often an atomic operation.
40 #if defined(__aarch64__)
41 asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
42 #elif defined(__alpha__)
43 asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
44 #elif defined(__arm__)
45 asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
46 #elif defined(__powerpc__)
47 asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
48 #elif defined(__riscv_atomic)
49 asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
50 #else
51 /* No insn known to fault unaligned -- try for a straight load. */
52 allow_fail = 1;
53 tmp = *(volatile int *)p;
54 #endif
56 assert(allow_fail);
59 * We didn't see a signal.
60 * We might as well validate the unaligned load worked.
62 if (BYTE_ORDER == LITTLE_ENDIAN) {
63 assert(tmp == 0x55443322);
64 } else {
65 assert(tmp == 0x77665544);
67 return EXIT_SUCCESS;