Daily bump.
[gcc.git] / libgcc / config / nds32 / linux-atomic.c
blob5a5e7c0184aadcd2d4cdf284143fc59e230762b8
1 /* Linux-specific atomic operations for NDS32 Linux.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
4 This file is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 3, or (at your option) any
7 later version.
9 This file is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 Under Section 7 of GPL version 3, you are granted additional
15 permissions described in the GCC Runtime Library Exception, version
16 3.1, as published by the Free Software Foundation.
18 You should have received a copy of the GNU General Public License and
19 a copy of the GCC Runtime Library Exception along with this program;
20 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
21 <http://www.gnu.org/licenses/>. */
23 /* We implement byte, short and int versions of each atomic operation
24 using the kernel helper defined below. There is no support for
25 64-bit operations yet. */
27 /* This function copy form NDS32 Linux-kernal. */
28 static inline int
29 __kernel_cmpxchg (int oldval, int newval, int *mem)
31 int temp1, temp2, temp3, offset;
33 asm volatile ("msync\tall\n"
34 "movi\t%0, #0\n"
35 "1:\n"
36 "\tllw\t%1, [%4+%0]\n"
37 "\tsub\t%3, %1, %6\n"
38 "\tcmovz\t%2, %5, %3\n"
39 "\tcmovn\t%2, %1, %3\n"
40 "\tscw\t%2, [%4+%0]\n"
41 "\tbeqz\t%2, 1b\n"
42 : "=&r" (offset), "=&r" (temp3), "=&r" (temp2), "=&r" (temp1)
43 : "r" (mem), "r" (newval), "r" (oldval) : "memory");
45 return temp1;
48 #define HIDDEN __attribute__ ((visibility ("hidden")))
50 #ifdef __NDS32_EL__
51 #define INVERT_MASK_1 0
52 #define INVERT_MASK_2 0
53 #else
54 #define INVERT_MASK_1 24
55 #define INVERT_MASK_2 16
56 #endif
58 #define MASK_1 0xffu
59 #define MASK_2 0xffffu
61 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
62 int HIDDEN \
63 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
64 { \
65 int failure, tmp; \
67 do { \
68 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
69 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
70 } while (failure != 0); \
72 return tmp; \
75 FETCH_AND_OP_WORD (add, , +)
76 FETCH_AND_OP_WORD (sub, , -)
77 FETCH_AND_OP_WORD (or, , |)
78 FETCH_AND_OP_WORD (and, , &)
79 FETCH_AND_OP_WORD (xor, , ^)
80 FETCH_AND_OP_WORD (nand, ~, &)
82 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
83 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
85 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
86 subword-sized quantities. */
88 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
89 TYPE HIDDEN \
90 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
91 { \
92 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
93 unsigned int mask, shift, oldval, newval; \
94 int failure; \
96 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
97 mask = MASK_##WIDTH << shift; \
99 do { \
100 oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
101 newval = ((PFX_OP (((oldval & mask) >> shift) \
102 INF_OP (unsigned int) val)) << shift) & mask; \
103 newval |= oldval & ~mask; \
104 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
105 } while (failure != 0); \
107 return (RETURN & mask) >> shift; \
111 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
112 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
113 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
114 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
115 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
116 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
118 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
119 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
120 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
121 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
122 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
123 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
125 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
126 int HIDDEN \
127 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
129 int tmp, failure; \
131 do { \
132 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
133 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
134 } while (failure != 0); \
136 return PFX_OP (tmp INF_OP val); \
139 OP_AND_FETCH_WORD (add, , +)
140 OP_AND_FETCH_WORD (sub, , -)
141 OP_AND_FETCH_WORD (or, , |)
142 OP_AND_FETCH_WORD (and, , &)
143 OP_AND_FETCH_WORD (xor, , ^)
144 OP_AND_FETCH_WORD (nand, ~, &)
146 SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
147 SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
148 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
149 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
150 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
151 SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
153 SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
154 SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
155 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
156 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
157 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
158 SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
160 int HIDDEN
161 __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
163 int actual_oldval, fail;
165 while (1)
167 actual_oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
169 if (oldval != actual_oldval)
170 return actual_oldval;
172 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
174 if (!fail)
175 return oldval;
179 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
180 TYPE HIDDEN \
181 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
182 TYPE newval) \
184 int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
185 unsigned int mask, shift, actual_oldval, actual_newval; \
187 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
188 mask = MASK_##WIDTH << shift; \
190 while (1) \
192 actual_oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
194 if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
195 return (actual_oldval & mask) >> shift; \
197 actual_newval = (actual_oldval & ~mask) \
198 | (((unsigned int) newval << shift) & mask); \
200 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
201 wordptr); \
203 if (!fail) \
204 return oldval; \
208 SUBWORD_VAL_CAS (unsigned short, 2)
209 SUBWORD_VAL_CAS (unsigned char, 1)
211 bool HIDDEN
212 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
214 int failure = __kernel_cmpxchg (oldval, newval, ptr);
215 return (failure == 0);
218 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
219 bool HIDDEN \
220 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
221 TYPE newval) \
223 TYPE actual_oldval \
224 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
225 return (oldval == actual_oldval); \
228 SUBWORD_BOOL_CAS (unsigned short, 2)
229 SUBWORD_BOOL_CAS (unsigned char, 1)
231 int HIDDEN
232 __sync_lock_test_and_set_4 (int *ptr, int val)
234 int failure, oldval;
236 do {
237 oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
238 failure = __kernel_cmpxchg (oldval, val, ptr);
239 } while (failure != 0);
241 return oldval;
244 #define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
245 TYPE HIDDEN \
246 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
248 int failure; \
249 unsigned int oldval, newval, shift, mask; \
250 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
252 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
253 mask = MASK_##WIDTH << shift; \
255 do { \
256 oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
257 newval = (oldval & ~mask) \
258 | (((unsigned int) val << shift) & mask); \
259 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
260 } while (failure != 0); \
262 return (oldval & mask) >> shift; \
265 SUBWORD_TEST_AND_SET (unsigned short, 2)
266 SUBWORD_TEST_AND_SET (unsigned char, 1)
268 #define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
269 void HIDDEN \
270 __sync_lock_release_##WIDTH (TYPE *ptr) \
272 /* All writes before this point must be seen before we release \
273 the lock itself. */ \
274 __builtin_nds32_msync_all (); \
275 *ptr = 0; \
278 SYNC_LOCK_RELEASE (int, 4)
279 SYNC_LOCK_RELEASE (short, 2)
280 SYNC_LOCK_RELEASE (char, 1)