1 /* Linux-specific atomic operations for NDS32 Linux.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
4 This file is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 3, or (at your option) any
9 This file is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 Under Section 7 of GPL version 3, you are granted additional
15 permissions described in the GCC Runtime Library Exception, version
16 3.1, as published by the Free Software Foundation.
18 You should have received a copy of the GNU General Public License and
19 a copy of the GCC Runtime Library Exception along with this program;
20 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
21 <http://www.gnu.org/licenses/>. */
23 /* We implement byte, short and int versions of each atomic operation
24 using the kernel helper defined below. There is no support for
25 64-bit operations yet. */
27 /* This function copy form NDS32 Linux-kernal. */
29 __kernel_cmpxchg (int oldval
, int newval
, int *mem
)
31 int temp1
, temp2
, temp3
, offset
;
33 asm volatile ("msync\tall\n"
36 "\tllw\t%1, [%4+%0]\n"
38 "\tcmovz\t%2, %5, %3\n"
39 "\tcmovn\t%2, %1, %3\n"
40 "\tscw\t%2, [%4+%0]\n"
42 : "=&r" (offset
), "=&r" (temp3
), "=&r" (temp2
), "=&r" (temp1
)
43 : "r" (mem
), "r" (newval
), "r" (oldval
) : "memory");
48 #define HIDDEN __attribute__ ((visibility ("hidden")))
51 #define INVERT_MASK_1 0
52 #define INVERT_MASK_2 0
54 #define INVERT_MASK_1 24
55 #define INVERT_MASK_2 16
59 #define MASK_2 0xffffu
61 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
63 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
68 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
69 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
70 } while (failure != 0); \
75 FETCH_AND_OP_WORD (add
, , +)
76 FETCH_AND_OP_WORD (sub
, , -)
77 FETCH_AND_OP_WORD (or, , |)
78 FETCH_AND_OP_WORD (and, , &)
79 FETCH_AND_OP_WORD (xor, , ^)
80 FETCH_AND_OP_WORD (nand
, ~, &)
82 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
83 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
85 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
86 subword-sized quantities. */
88 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
90 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
92 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
93 unsigned int mask, shift, oldval, newval; \
96 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
97 mask = MASK_##WIDTH << shift; \
100 oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
101 newval = ((PFX_OP (((oldval & mask) >> shift) \
102 INF_OP (unsigned int) val)) << shift) & mask; \
103 newval |= oldval & ~mask; \
104 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
105 } while (failure != 0); \
107 return (RETURN & mask) >> shift; \
111 SUBWORD_SYNC_OP (add
, , +, unsigned short, 2, oldval
)
112 SUBWORD_SYNC_OP (sub
, , -, unsigned short, 2, oldval
)
113 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval
)
114 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval
)
115 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval
)
116 SUBWORD_SYNC_OP (nand
, ~, &, unsigned short, 2, oldval
)
118 SUBWORD_SYNC_OP (add
, , +, unsigned char, 1, oldval
)
119 SUBWORD_SYNC_OP (sub
, , -, unsigned char, 1, oldval
)
120 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval
)
121 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval
)
122 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval
)
123 SUBWORD_SYNC_OP (nand
, ~, &, unsigned char, 1, oldval
)
125 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
127 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
132 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
133 failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
134 } while (failure != 0); \
136 return PFX_OP (tmp INF_OP val); \
139 OP_AND_FETCH_WORD (add
, , +)
140 OP_AND_FETCH_WORD (sub
, , -)
141 OP_AND_FETCH_WORD (or, , |)
142 OP_AND_FETCH_WORD (and, , &)
143 OP_AND_FETCH_WORD (xor, , ^)
144 OP_AND_FETCH_WORD (nand
, ~, &)
146 SUBWORD_SYNC_OP (add
, , +, unsigned short, 2, newval
)
147 SUBWORD_SYNC_OP (sub
, , -, unsigned short, 2, newval
)
148 SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval
)
149 SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval
)
150 SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval
)
151 SUBWORD_SYNC_OP (nand
, ~, &, unsigned short, 2, newval
)
153 SUBWORD_SYNC_OP (add
, , +, unsigned char, 1, newval
)
154 SUBWORD_SYNC_OP (sub
, , -, unsigned char, 1, newval
)
155 SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval
)
156 SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval
)
157 SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval
)
158 SUBWORD_SYNC_OP (nand
, ~, &, unsigned char, 1, newval
)
161 __sync_val_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
163 int actual_oldval
, fail
;
167 actual_oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
169 if (oldval
!= actual_oldval
)
170 return actual_oldval
;
172 fail
= __kernel_cmpxchg (actual_oldval
, newval
, ptr
);
179 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
181 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
184 int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
185 unsigned int mask, shift, actual_oldval, actual_newval; \
187 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
188 mask = MASK_##WIDTH << shift; \
192 actual_oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
194 if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
195 return (actual_oldval & mask) >> shift; \
197 actual_newval = (actual_oldval & ~mask) \
198 | (((unsigned int) newval << shift) & mask); \
200 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
208 SUBWORD_VAL_CAS (unsigned short, 2)
209 SUBWORD_VAL_CAS (unsigned char, 1)
212 __sync_bool_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
214 int failure
= __kernel_cmpxchg (oldval
, newval
, ptr
);
215 return (failure
== 0);
218 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
220 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
224 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
225 return (oldval == actual_oldval); \
228 SUBWORD_BOOL_CAS (unsigned short, 2)
229 SUBWORD_BOOL_CAS (unsigned char, 1)
232 __sync_lock_test_and_set_4 (int *ptr
, int val
)
237 oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
238 failure
= __kernel_cmpxchg (oldval
, val
, ptr
);
239 } while (failure
!= 0);
244 #define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
246 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
249 unsigned int oldval, newval, shift, mask; \
250 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
252 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
253 mask = MASK_##WIDTH << shift; \
256 oldval = __atomic_load_n (wordptr, __ATOMIC_SEQ_CST); \
257 newval = (oldval & ~mask) \
258 | (((unsigned int) val << shift) & mask); \
259 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
260 } while (failure != 0); \
262 return (oldval & mask) >> shift; \
265 SUBWORD_TEST_AND_SET (unsigned short, 2)
266 SUBWORD_TEST_AND_SET (unsigned char, 1)
268 #define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
270 __sync_lock_release_##WIDTH (TYPE *ptr) \
272 /* All writes before this point must be seen before we release \
273 the lock itself. */ \
274 __builtin_nds32_msync_all (); \
278 SYNC_LOCK_RELEASE (int, 4)
279 SYNC_LOCK_RELEASE (short, 2)
280 SYNC_LOCK_RELEASE (char, 1)