1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * vlock.S - simple voting lock implementation for ARM
5 * Created by: Dave Martin, 2012-08-16
6 * Copyright: (C) 2012-2013 Linaro Limited
8 * This algorithm is described in more detail in
9 * Documentation/arch/arm/vlocks.rst.
12 #include <linux/linkage.h>
17 /* Select different code if voting flags can fit in a single word. */
18 #if VLOCK_VOTING_SIZE > 4
26 @ voting lock for first-man coordination
28 .macro voting_begin rbase:req, rcpu:req, rscratch:req
30 strb \rscratch, [\rbase, \rcpu]
34 .macro voting_end rbase:req, rcpu:req, rscratch:req
37 strb \rscratch, [\rbase, \rcpu]
43 * The vlock structure must reside in Strongly-Ordered or Device memory.
44 * This implementation deliberately eliminates most of the barriers which
45 * would be required for other memory types, and assumes that independent
46 * writes to neighbouring locations within a cacheline do not interfere
50 @ r0: lock structure base
51 @ r1: CPU ID (0-based index within cluster)
53 add r1, r1, #VLOCK_VOTING_OFFSET
55 voting_begin r0, r1, r2
57 ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
58 cmp r2, #VLOCK_OWNER_NONE
59 bne trylock_fail @ fail if so
61 @ Control dependency implies strb not observable before previous ldrb.
63 strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
65 voting_end r0, r1, r2 @ implies DMB
67 @ Wait for the current round of voting to finish:
69 MANY( mov r3, #VLOCK_VOTING_OFFSET )
71 MANY( ldr r2, [r0, r3] )
72 FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
76 MANY( add r3, r3, #4 )
77 MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
83 ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
84 eor r0, r1, r2 @ zero if I won, else nonzero
89 mov r0, #1 @ nonzero indicates that I lost
91 ENDPROC(vlock_trylock)
93 @ r0: lock structure base
96 mov r1, #VLOCK_OWNER_NONE
97 strb r1, [r0, #VLOCK_OWNER_OFFSET]
101 ENDPROC(vlock_unlock)