2 Copyright © 2017, The AROS Development Team. All rights reserved.
7 #include <aros/atomic.h>
8 #include <aros/types/spinlock_s.h>
9 #include <aros/kernel.h>
10 #include <aros/libcall.h>
12 #include <kernel_base.h>
13 #include <kernel_debug.h>
15 #include <proto/kernel.h>
19 AROS_LH1(void, KrnSpinUnLock
,
20 AROS_LHA(spinlock_t
*, lock
, A0
),
21 struct KernelBase
*, KernelBase
, 53, Kernel
)
25 D(bug("[Kernel] %s(0x%p)\n", __func__
, lock
));
29 use cmpxchg - expect SPINLOCKF_WRITE and replace it with 0 (unlocking the spinlock), if that succeeded, the lock
30 was in WRITE mode and is now free. If that was not the case, continue with unlocking READ mode spinlock
32 if (!compare_and_exchange_long((ULONG
*)&lock
->lock
, SPINLOCKF_WRITE
, 0, NULL
))
35 Unlocking READ mode spinlock means we need to put it into UDPATING state, decrement counter and unlock from
38 while (!compare_and_exchange_byte((UBYTE
*)&lock
->block
[3], 0, SPINLOCKF_UPDATING
>> 24, NULL
))
40 // Tell CPU we are spinning
41 asm volatile("pause");
44 // Just in case someone tries to unlock already unlocked stuff
45 if (lock
->slock
.readcount
!= 0)
47 lock
->slock
.readcount
--;
49 #if defined(AROS_NO_ATOMIC_OPERATIONS)
50 lock
->slock
.updating
= 0;
52 __AROS_ATOMIC_AND_L(lock
->lock
, ~SPINLOCKF_UPDATING
);
56 D(bug("[Kernel] %s: lock = %08x\n", __func__
, lock
->lock
));