Fixed typos
[ACE_TAO.git] / ACE / ace / Atomic_Op.cpp
blobe1075809b28839ea76564f0a579e137a2844a33f
1 #include "ace/Atomic_Op.h"
2 #include "ace/OS_NS_unistd.h"
4 #if !defined (__ACE_INLINE__)
5 #include "ace/Atomic_Op.inl"
6 #endif /* __ACE_INLINE__ */
8 #if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
10 #if defined (ACE_INCLUDE_ATOMIC_OP_SPARC)
11 # include "ace/Atomic_Op_Sparc.h"
12 #endif /* ACE_INCLUDE_ATOMIC_OP_SPARC */
14 namespace {
16 #if defined (_MSC_VER)
17 // Disable "no return value" warning, as we will be putting
18 // the return values directly into the EAX register.
19 #pragma warning (push)
20 #pragma warning (disable: 4035)
21 #endif /* _MSC_VER */
23 long
24 single_cpu_increment (volatile long *value)
26 #if defined (ACE_HAS_INTEL_ASSEMBLY)
27 long tmp = 1;
28 unsigned long addr = reinterpret_cast<unsigned long> (value);
29 asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
30 return tmp + 1;
31 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
32 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
33 return ace_atomic_add_long (
34 reinterpret_cast<volatile unsigned long*> (value), 1);
35 #elif defined(__GNUC__) && defined(__PPC__)
36 long tmp;
37 asm("lwz %0,%1" : "=r" (tmp) : "m" (*value) );
38 asm("addi %0,%0,1" : "+r" (tmp) );
39 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
40 return tmp;
41 #else /* ACE_HAS_INTEL_ASSEMBLY*/
42 ACE_UNUSED_ARG (value);
43 ACE_NOTSUP_RETURN (-1);
44 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
47 long
48 single_cpu_decrement (volatile long *value)
50 #if defined (ACE_HAS_INTEL_ASSEMBLY)
51 long tmp = -1;
52 unsigned long addr = reinterpret_cast<unsigned long> (value);
53 asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
54 return tmp - 1;
55 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
56 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
57 return ace_atomic_add_long (
58 reinterpret_cast<volatile unsigned long*> (value), -1);
59 #elif defined(__GNUC__) && defined(__PPC__)
60 long tmp;
61 asm("lwz %0,%1" : "=r" (tmp) : "m" (*value) );
62 asm("addi %0,%0,-1" : "+r" (tmp) );
63 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
64 return tmp;
65 #else /* ACE_HAS_INTEL_ASSEMBLY*/
66 ACE_UNUSED_ARG (value);
67 ACE_NOTSUP_RETURN (-1);
68 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
71 long
72 single_cpu_exchange (volatile long *value, long rhs)
74 #if defined (ACE_HAS_INTEL_ASSEMBLY)
75 unsigned long addr = reinterpret_cast<unsigned long> (value);
76 asm( "xchg %0, (%1)" : "+r"(rhs) : "r"(addr) );
77 return rhs;
78 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
79 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
80 return ace_atomic_swap_long (
81 reinterpret_cast<volatile unsigned long*> (value), rhs);
82 #elif defined(__GNUC__) && defined(__PPC__)
83 long tmp;
84 asm("lwz %0,%1" : "=r" (tmp) : "m" (rhs) );
85 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
86 return tmp;
87 #else /* ACE_HAS_INTEL_ASSEMBLY*/
88 ACE_UNUSED_ARG (value);
89 ACE_UNUSED_ARG (rhs);
90 ACE_NOTSUP_RETURN (-1);
91 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
94 long
95 single_cpu_exchange_add (volatile long *value, long rhs)
97 #if defined (ACE_HAS_INTEL_ASSEMBLY)
98 unsigned long addr = reinterpret_cast<unsigned long> (value);
99 asm( "xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
100 return rhs;
101 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
102 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
103 return ace_atomic_swap_add_long (
104 reinterpret_cast<volatile unsigned long*> (value), rhs);
105 #elif defined(__GNUC__) && defined(__PPC__)
106 long tmp;
107 asm("add %0,%1,%2" : "=r" (tmp) : "r" (*value), "r" (rhs) );
108 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
109 return tmp;
110 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
111 # if defined (_MSC_VER)
112 __asm
114 mov eax, rhs
115 mov edx, value
116 xadd [edx], eax
118 // Return value is already in EAX register.
119 # elif defined (__BORLANDC__)
120 _EAX = rhs;
121 _EDX = reinterpret_cast<unsigned long> (value);
122 __emit__(0x0F, 0xC1, 0x02); // xadd [edx], eax
123 // Return value is already in EAX register.
124 # else /* _MSC_VER */
125 ACE_UNUSED_ARG (value);
126 ACE_UNUSED_ARG (rhs);
127 ACE_NOTSUP_RETURN (-1);
128 # endif /* _MSC_VER */
129 #else /* ACE_HAS_INTEL_ASSEMBLY*/
130 ACE_UNUSED_ARG (value);
131 ACE_UNUSED_ARG (rhs);
132 ACE_NOTSUP_RETURN (-1);
133 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
136 long
137 multi_cpu_increment (volatile long *value)
139 #if defined (ACE_HAS_INTEL_ASSEMBLY)
140 long tmp = 1;
141 unsigned long addr = reinterpret_cast<unsigned long> (value);
142 asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
143 return tmp + 1;
144 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
145 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
146 return ace_atomic_add_long (
147 reinterpret_cast<volatile unsigned long*> (value), 1);
148 #else /* ACE_HAS_INTEL_ASSEMBLY*/
149 ACE_UNUSED_ARG (value);
150 ACE_NOTSUP_RETURN (-1);
151 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
154 long
155 multi_cpu_decrement (volatile long *value)
157 #if defined (ACE_HAS_INTEL_ASSEMBLY)
158 long tmp = -1;
159 unsigned long addr = reinterpret_cast<unsigned long> (value);
160 asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
161 return tmp - 1;
162 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
163 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
164 return ace_atomic_add_long (
165 reinterpret_cast<volatile unsigned long*> (value), -1);
166 #else /* ACE_HAS_INTEL_ASSEMBLY*/
167 ACE_UNUSED_ARG (value);
168 ACE_NOTSUP_RETURN (-1);
169 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
172 long
173 multi_cpu_exchange (volatile long *value, long rhs)
175 #if defined (ACE_HAS_INTEL_ASSEMBLY)
176 unsigned long addr = reinterpret_cast<unsigned long> (value);
177 // The XCHG instruction automatically follows LOCK semantics
178 asm( "xchg %0, (%1)" : "+r"(rhs) : "r"(addr) );
179 return rhs;
180 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
181 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
182 return ace_atomic_swap_long (
183 reinterpret_cast<volatile unsigned long*> (value), rhs);
184 #else /* ACE_HAS_INTEL_ASSEMBLY*/
185 ACE_UNUSED_ARG (value);
186 ACE_UNUSED_ARG (rhs);
187 ACE_NOTSUP_RETURN (-1);
188 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
191 long
192 multi_cpu_exchange_add (volatile long *value, long rhs)
194 #if defined (ACE_HAS_INTEL_ASSEMBLY)
195 unsigned long addr = reinterpret_cast<unsigned long> (value);
196 asm( "lock ; xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
197 return rhs;
198 #elif !defined (ACE_HAS_SOLARIS_ATOMIC_LIB) && (defined (sun) || \
199 (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64))))
200 return ace_atomic_swap_add_long (
201 reinterpret_cast<volatile unsigned long*> (value), rhs);
202 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
203 # if defined (_MSC_VER)
204 __asm
206 mov eax, rhs
207 mov edx, value
208 lock xadd [edx], eax
210 // Return value is already in EAX register.
211 # elif defined (__BORLANDC__)
212 _EAX = rhs;
213 _EDX = reinterpret_cast<unsigned long> (value);
214 __emit__(0xF0, 0x0F, 0xC1, 0x02); // lock xadd [edx], eax
215 // Return value is already in EAX register.
216 # else /* _MSC_VER */
217 ACE_UNUSED_ARG (value);
218 ACE_UNUSED_ARG (rhs);
219 ACE_NOTSUP_RETURN (-1);
220 # endif /* _MSC_VER */
221 #else /* ACE_HAS_INTEL_ASSEMBLY*/
222 ACE_UNUSED_ARG (value);
223 ACE_UNUSED_ARG (rhs);
224 ACE_NOTSUP_RETURN (-1);
225 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
228 #if defined (_MSC_VER)
229 #pragma warning (pop)
230 #endif /* _MSC_VER */
232 } // end namespace
234 ACE_BEGIN_VERSIONED_NAMESPACE_DECL
236 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::increment_fn_) (volatile long *) = multi_cpu_increment;
237 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::decrement_fn_) (volatile long *) = multi_cpu_decrement;
238 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_fn_) (volatile long *, long) = multi_cpu_exchange;
239 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_add_fn_) (volatile long *, long) = multi_cpu_exchange_add;
241 void
242 ACE_Atomic_Op<ACE_Thread_Mutex, long>::init_functions (void)
244 if (ACE_OS::num_processors () == 1)
246 increment_fn_ = single_cpu_increment;
247 decrement_fn_ = single_cpu_decrement;
248 exchange_fn_ = single_cpu_exchange;
249 exchange_add_fn_ = single_cpu_exchange_add;
251 else
253 increment_fn_ = multi_cpu_increment;
254 decrement_fn_ = multi_cpu_decrement;
255 exchange_fn_ = multi_cpu_exchange;
256 exchange_add_fn_ = multi_cpu_exchange_add;
260 void
261 ACE_Atomic_Op<ACE_Thread_Mutex, long>::dump (void) const
263 #if defined (ACE_HAS_DUMP)
264 ACELIB_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
265 ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
266 #endif /* ACE_HAS_DUMP */
269 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::increment_fn_) (volatile long *) = multi_cpu_increment;
270 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::decrement_fn_) (volatile long *) = multi_cpu_decrement;
271 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::exchange_fn_) (volatile long *, long) = multi_cpu_exchange;
272 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::exchange_add_fn_) (volatile long *, long) = multi_cpu_exchange_add;
274 void
275 ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::init_functions (void)
277 if (ACE_OS::num_processors () == 1)
279 increment_fn_ = single_cpu_increment;
280 decrement_fn_ = single_cpu_decrement;
281 exchange_fn_ = single_cpu_exchange;
282 exchange_add_fn_ = single_cpu_exchange_add;
284 else
286 increment_fn_ = multi_cpu_increment;
287 decrement_fn_ = multi_cpu_decrement;
288 exchange_fn_ = multi_cpu_exchange;
289 exchange_add_fn_ = multi_cpu_exchange_add;
293 void
294 ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::dump (void) const
296 #if defined (ACE_HAS_DUMP)
297 ACELIB_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
298 ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
299 #endif /* ACE_HAS_DUMP */
302 ACE_END_VERSIONED_NAMESPACE_DECL
304 #endif /* ACE_HAS_BUILTIN_ATOMIC_OP */