9 unsigned long long x
= 0x8877665544332211ull
;
10 void * volatile p
= (void *)&x
+ 1;
12 void sigbus(int sig
, siginfo_t
*info
, void *uc
)
14 assert(sig
== SIGBUS
);
15 assert(info
->si_signo
== SIGBUS
);
17 assert(info
->si_code
== BUS_ADRALN
);
19 assert(info
->si_addr
== p
);
25 struct sigaction sa
= {
26 .sa_sigaction
= sigbus
,
27 .sa_flags
= SA_SIGINFO
32 tmp
= sigaction(SIGBUS
, &sa
, NULL
);
36 * Select an operation that's likely to enforce alignment.
37 * On many guests that support unaligned accesses by default,
38 * this is often an atomic operation.
40 #if defined(__aarch64__)
41 asm volatile("ldxr %w0,[%1]" : "=r"(tmp
) : "r"(p
) : "memory");
42 #elif defined(__alpha__)
43 asm volatile("ldl_l %0,0(%1)" : "=r"(tmp
) : "r"(p
) : "memory");
44 #elif defined(__arm__)
45 asm volatile("ldrex %0,[%1]" : "=r"(tmp
) : "r"(p
) : "memory");
46 #elif defined(__powerpc__)
47 asm volatile("lwarx %0,0,%1" : "=r"(tmp
) : "r"(p
) : "memory");
48 #elif defined(__riscv_atomic)
49 asm volatile("lr.w %0,(%1)" : "=r"(tmp
) : "r"(p
) : "memory");
51 /* No insn known to fault unaligned -- try for a straight load. */
53 tmp
= *(volatile int *)p
;
59 * We didn't see a signal.
60 * We might as well validate the unaligned load worked.
62 if (BYTE_ORDER
== LITTLE_ENDIAN
) {
63 assert(tmp
== 0x55443322);
65 assert(tmp
== 0x77665544);