Added a test for MUIA_Listview_SelectChange.
[AROS.git] / arch / all-mingw32 / kernel / leaveinterrupt_x86_64.s
blob2f5dc56db14b9a18a26ac00b38cb9dbc3ab7ca40
2 # Copyright © 2010-2011, The AROS Development Team. All rights reserved.
3 # $Id$
5 # Desc: Exit from emulated interrupt with enabling, x86-64 version
6 # Lang: English
9 # Theory of operation:
10 # x86-64 has red zone of 128 bytes below the rsp. This makes it impossible to use
11 # push/pop instructions here, unlike on i386, because doing this will destroy
12 # red zone data.
13 # Here we skip the red zone and use -128(%rsp) as our intermediate storage. However,
14 # there's an important problem with this. x86 is not ARM, and we can't do something like
15 # "addq $128, %rsp; jmpq *-128(rsp)" atomically. This mean, we can be preempted right before
16 # the final jump. This is dangerous, because next time we will get back here with our own
17 # address in 0(%rax), overwriting -128(rsp) with it and causing infinite loop.
18 # In order to work around this issue, interrupt thread checks rip value, and if rip is
19 # pointing at this code, interrupts are considered disabled.
20 # We still have to use stack for temporary storage because we first need to restore rax
21 # and only then jump to return address.
23 .globl core_LeaveInterrupt
24 .globl core_LeaveInt_End
26 core_LeaveInterrupt:
27 movq %rbx, -128(%rsp) # Save rbx
28 movq 0(%rax), %rbx # Get real return address into rbx
29 xchg %rbx, -128(%rsp) # Remember return address and restore rbx
30 movq 8(%rax), %rax # Restore real rax contents
31 movl $1, Ints_Enabled # Now enable interrupts
32 jmpq *-128(%rsp) # And jump to the needed address
33 core_LeaveInt_End: