c-family: Regenerate c.opt.urls
[gcc.git] / libgcc / config / pa / linux-atomic.c
blob6191f83ed1c7528ae12c4889e134182859270960
1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2024 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #define EFAULT 14
28 #define EBUSY 16
29 #define ENOSYS 251
31 #define _ASM_EFAULT "-14"
33 typedef unsigned char u8;
34 typedef short unsigned int u16;
35 typedef unsigned int u32;
36 #ifdef __LP64__
37 typedef long unsigned int u64;
38 #else
39 typedef long long unsigned int u64;
40 #endif
42 /* PA-RISC 2.0 supports out-of-order execution for loads and stores.
43 Thus, we need to synchonize memory accesses. For more info, see:
44 "Advanced Performance Features of the 64-bit PA-8000" by Doug Hunt.
46 We implement byte, short and int versions of each atomic operation
47 using the kernel helper defined below. There is no support for
48 64-bit operations yet. */
50 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
51 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
53 /* Kernel helper for compare-and-exchange a 32-bit value. */
54 static inline long
55 __kernel_cmpxchg (volatile void *mem, int oldval, int newval)
57 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
58 register int lws_old asm("r25") = oldval;
59 register int lws_new asm("r24") = newval;
60 register long lws_ret asm("r28");
61 register long lws_errno asm("r21");
62 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
63 "ldi %2, %%r20 \n\t"
64 "cmpiclr,<> " _ASM_EFAULT ", %%r21, %%r0\n\t"
65 "iitlbp %%r0,(%%sr0, %%r0) \n\t"
66 : "=r" (lws_ret), "=r" (lws_errno)
67 : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
68 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
71 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
72 the old value from memory. If this value is equal to OLDVAL, the
73 new value was written to memory. If not, return -EBUSY. */
74 if (!lws_errno && lws_ret != oldval)
75 return -EBUSY;
77 return lws_errno;
80 static inline long
81 __kernel_cmpxchg2 (volatile void *mem, const void *oldval, const void *newval,
82 int val_size)
84 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
85 register unsigned long lws_old asm("r25") = (unsigned long) oldval;
86 register unsigned long lws_new asm("r24") = (unsigned long) newval;
87 register int lws_size asm("r23") = val_size;
88 register long lws_ret asm("r28");
89 register long lws_errno asm("r21");
90 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
91 "ldi %6, %%r20 \n\t"
92 "cmpiclr,<> " _ASM_EFAULT ", %%r21, %%r0\n\t"
93 "iitlbp %%r0,(%%sr0, %%r0) \n\t"
94 : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem),
95 "+r" (lws_old), "+r" (lws_new), "+r" (lws_size)
96 : "i" (2)
97 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
100 /* If the kernel LWS call is successful, lws_ret contains 0. */
101 if (__builtin_expect (lws_ret == 0, 1))
102 return 0;
104 /* If the kernel LWS call fails with no error, return -EBUSY */
105 if (__builtin_expect (!lws_errno, 0))
106 return -EBUSY;
108 return lws_errno;
110 #define HIDDEN __attribute__ ((visibility ("hidden")))
112 /* Big endian masks */
113 #define INVERT_MASK_1 24
114 #define INVERT_MASK_2 16
116 #define MASK_1 0xffu
117 #define MASK_2 0xffffu
119 /* Load value with an atomic processor load if possible. */
120 #define ATOMIC_LOAD(TYPE, WIDTH) \
121 static inline TYPE \
122 atomic_load_##WIDTH (volatile void *ptr) \
124 return *(volatile TYPE *)ptr; \
127 #if defined(__LP64__) || defined(__SOFTFP__)
128 ATOMIC_LOAD (u64, 8)
129 #else
130 static inline u64
131 atomic_load_8 (volatile void *ptr)
133 u64 result;
134 double tmp;
136 asm volatile ("{fldds|fldd} 0(%2),%1\n\t"
137 "{fstds|fstd} %1,-16(%%sp)\n\t"
138 "{ldws|ldw} -16(%%sp),%0\n\t"
139 "{ldws|ldw} -12(%%sp),%R0"
140 : "=r" (result), "=f" (tmp) : "r" (ptr): "memory");
141 return result;
143 #endif
145 ATOMIC_LOAD (u32, 4)
146 ATOMIC_LOAD (u16, 2)
147 ATOMIC_LOAD (u8, 1)
149 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
150 TYPE HIDDEN \
151 __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \
153 TYPE tmp, newval; \
154 long failure; \
156 do { \
157 tmp = atomic_load_##WIDTH ((volatile TYPE *)ptr); \
158 newval = PFX_OP (tmp INF_OP val); \
159 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
160 } while (failure != 0); \
162 return tmp; \
165 FETCH_AND_OP_2 (add, , +, u64, 8, 3)
166 FETCH_AND_OP_2 (sub, , -, u64, 8, 3)
167 FETCH_AND_OP_2 (or, , |, u64, 8, 3)
168 FETCH_AND_OP_2 (and, , &, u64, 8, 3)
169 FETCH_AND_OP_2 (xor, , ^, u64, 8, 3)
170 FETCH_AND_OP_2 (nand, ~, &, u64, 8, 3)
172 FETCH_AND_OP_2 (add, , +, u16, 2, 1)
173 FETCH_AND_OP_2 (sub, , -, u16, 2, 1)
174 FETCH_AND_OP_2 (or, , |, u16, 2, 1)
175 FETCH_AND_OP_2 (and, , &, u16, 2, 1)
176 FETCH_AND_OP_2 (xor, , ^, u16, 2, 1)
177 FETCH_AND_OP_2 (nand, ~, &, u16, 2, 1)
179 FETCH_AND_OP_2 (add, , +, u8, 1, 0)
180 FETCH_AND_OP_2 (sub, , -, u8, 1, 0)
181 FETCH_AND_OP_2 (or, , |, u8, 1, 0)
182 FETCH_AND_OP_2 (and, , &, u8, 1, 0)
183 FETCH_AND_OP_2 (xor, , ^, u8, 1, 0)
184 FETCH_AND_OP_2 (nand, ~, &, u8, 1, 0)
186 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
187 TYPE HIDDEN \
188 __sync_##OP##_and_fetch_##WIDTH (volatile void *ptr, TYPE val) \
190 TYPE tmp, newval; \
191 long failure; \
193 do { \
194 tmp = atomic_load_##WIDTH ((volatile TYPE *)ptr); \
195 newval = PFX_OP (tmp INF_OP val); \
196 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
197 } while (failure != 0); \
199 return PFX_OP (tmp INF_OP val); \
202 OP_AND_FETCH_2 (add, , +, u64, 8, 3)
203 OP_AND_FETCH_2 (sub, , -, u64, 8, 3)
204 OP_AND_FETCH_2 (or, , |, u64, 8, 3)
205 OP_AND_FETCH_2 (and, , &, u64, 8, 3)
206 OP_AND_FETCH_2 (xor, , ^, u64, 8, 3)
207 OP_AND_FETCH_2 (nand, ~, &, u64, 8, 3)
209 OP_AND_FETCH_2 (add, , +, u16, 2, 1)
210 OP_AND_FETCH_2 (sub, , -, u16, 2, 1)
211 OP_AND_FETCH_2 (or, , |, u16, 2, 1)
212 OP_AND_FETCH_2 (and, , &, u16, 2, 1)
213 OP_AND_FETCH_2 (xor, , ^, u16, 2, 1)
214 OP_AND_FETCH_2 (nand, ~, &, u16, 2, 1)
216 OP_AND_FETCH_2 (add, , +, u8, 1, 0)
217 OP_AND_FETCH_2 (sub, , -, u8, 1, 0)
218 OP_AND_FETCH_2 (or, , |, u8, 1, 0)
219 OP_AND_FETCH_2 (and, , &, u8, 1, 0)
220 OP_AND_FETCH_2 (xor, , ^, u8, 1, 0)
221 OP_AND_FETCH_2 (nand, ~, &, u8, 1, 0)
223 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
224 unsigned int HIDDEN \
225 __sync_fetch_and_##OP##_4 (volatile void *ptr, unsigned int val) \
227 unsigned int tmp; \
228 long failure; \
230 do { \
231 tmp = atomic_load_4 ((volatile unsigned int *)ptr); \
232 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
233 } while (failure != 0); \
235 return tmp; \
238 FETCH_AND_OP_WORD (add, , +)
239 FETCH_AND_OP_WORD (sub, , -)
240 FETCH_AND_OP_WORD (or, , |)
241 FETCH_AND_OP_WORD (and, , &)
242 FETCH_AND_OP_WORD (xor, , ^)
243 FETCH_AND_OP_WORD (nand, ~, &)
245 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
246 unsigned int HIDDEN \
247 __sync_##OP##_and_fetch_4 (volatile void *ptr, unsigned int val) \
249 unsigned int tmp; \
250 long failure; \
252 do { \
253 tmp = atomic_load_4 ((volatile unsigned int *)ptr); \
254 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
255 } while (failure != 0); \
257 return PFX_OP (tmp INF_OP val); \
260 OP_AND_FETCH_WORD (add, , +)
261 OP_AND_FETCH_WORD (sub, , -)
262 OP_AND_FETCH_WORD (or, , |)
263 OP_AND_FETCH_WORD (and, , &)
264 OP_AND_FETCH_WORD (xor, , ^)
265 OP_AND_FETCH_WORD (nand, ~, &)
267 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
268 TYPE HIDDEN \
269 __sync_val_compare_and_swap_##WIDTH (volatile void *ptr, TYPE oldval, \
270 TYPE newval) \
272 TYPE actual_oldval; \
273 long fail; \
275 while (1) \
277 actual_oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \
279 if (__builtin_expect (oldval != actual_oldval, 0)) \
280 return actual_oldval; \
282 fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
284 if (__builtin_expect (!fail, 1)) \
285 return actual_oldval; \
289 _Bool HIDDEN \
290 __sync_bool_compare_and_swap_##WIDTH (volatile void *ptr, \
291 TYPE oldval, TYPE newval) \
293 long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
294 return (failure == 0); \
297 COMPARE_AND_SWAP_2 (u64, 8, 3)
298 COMPARE_AND_SWAP_2 (u16, 2, 1)
299 COMPARE_AND_SWAP_2 (u8, 1, 0)
301 unsigned int HIDDEN
302 __sync_val_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
303 unsigned int newval)
305 long fail;
306 unsigned int actual_oldval;
308 while (1)
310 actual_oldval = atomic_load_4 ((volatile unsigned int *)ptr);
312 if (__builtin_expect (oldval != actual_oldval, 0))
313 return actual_oldval;
315 fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
317 if (__builtin_expect (!fail, 1))
318 return actual_oldval;
322 _Bool HIDDEN
323 __sync_bool_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
324 unsigned int newval)
326 long failure = __kernel_cmpxchg (ptr, oldval, newval);
327 return (failure == 0);
330 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
331 TYPE HIDDEN \
332 __sync_lock_test_and_set_##WIDTH (volatile void *ptr, TYPE val) \
334 TYPE oldval; \
335 long failure; \
337 do { \
338 oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \
339 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
340 } while (failure != 0); \
342 return oldval; \
345 SYNC_LOCK_TEST_AND_SET_2 (u64, 8, 3)
346 SYNC_LOCK_TEST_AND_SET_2 (u16, 2, 1)
347 SYNC_LOCK_TEST_AND_SET_2 (u8, 1, 0)
349 u32 HIDDEN
350 __sync_lock_test_and_set_4 (volatile void *ptr, unsigned int val)
352 long failure;
353 unsigned int oldval;
355 do {
356 oldval = atomic_load_4 ((volatile unsigned int *)ptr);
357 failure = __kernel_cmpxchg (ptr, oldval, val);
358 } while (failure != 0);
360 return oldval;
363 #define SYNC_LOCK_RELEASE_1(TYPE, WIDTH, INDEX) \
364 void HIDDEN \
365 __sync_lock_release_##WIDTH (volatile void *ptr) \
367 TYPE oldval, val = 0; \
368 long failure; \
370 do { \
371 oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \
372 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
373 } while (failure != 0); \
376 SYNC_LOCK_RELEASE_1 (u64, 8, 3)
377 SYNC_LOCK_RELEASE_1 (u16, 2, 1)
378 SYNC_LOCK_RELEASE_1 (u8, 1, 0)
380 void HIDDEN
381 __sync_lock_release_4 (volatile void *ptr)
383 long failure;
384 unsigned int oldval;
386 do {
387 oldval = atomic_load_4 ((volatile unsigned int *)ptr);
388 failure = __kernel_cmpxchg (ptr, oldval, 0);
389 } while (failure != 0);
392 #ifndef __LP64__
393 #define SYNC_LOCK_LOAD_2(TYPE, WIDTH, INDEX) \
394 TYPE __sync_lock_load_##WIDTH (volatile void *) HIDDEN; \
395 TYPE \
396 __sync_lock_load_##WIDTH (volatile void *ptr) \
398 TYPE oldval; \
399 long failure; \
401 do { \
402 oldval = atomic_load_##WIDTH ((volatile TYPE *)ptr); \
403 failure = __kernel_cmpxchg2 (ptr, &oldval, &oldval, INDEX); \
404 } while (failure != 0); \
406 return oldval; \
409 SYNC_LOCK_LOAD_2 (u64, 8, 3)
410 #endif