Use =default for skeleton copy constructor
[ACE_TAO.git] / ACE / ace / Atomic_Op.cpp
blob52394cd01a3d5d3e9a1aef98c589b7440b2722d2
1 #include "ace/Atomic_Op.h"
2 #include "ace/OS_NS_unistd.h"
4 #if !defined (__ACE_INLINE__)
5 #include "ace/Atomic_Op.inl"
6 #endif /* __ACE_INLINE__ */
8 #if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
10 namespace {
11 #if defined (_MSC_VER)
12 // Disable "no return value" warning, as we will be putting
13 // the return values directly into the EAX register.
14 #pragma warning (push)
15 #pragma warning (disable: 4035)
16 #endif /* _MSC_VER */
18 long
19 single_cpu_increment (volatile long *value)
21 #if defined (ACE_HAS_INTEL_ASSEMBLY)
22 long tmp = 1;
23 unsigned long addr = reinterpret_cast<unsigned long> (value);
24 asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
25 return tmp + 1;
26 #elif defined(__GNUC__) && defined(__PPC__)
27 long tmp;
28 asm("lwz %0,%1" : "=r" (tmp) : "m" (*value) );
29 asm("addi %0,%0,1" : "+r" (tmp) );
30 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
31 return tmp;
32 #else /* ACE_HAS_INTEL_ASSEMBLY*/
33 ACE_UNUSED_ARG (value);
34 ACE_NOTSUP_RETURN (-1);
35 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
38 long
39 single_cpu_decrement (volatile long *value)
41 #if defined (ACE_HAS_INTEL_ASSEMBLY)
42 long tmp = -1;
43 unsigned long addr = reinterpret_cast<unsigned long> (value);
44 asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
45 return tmp - 1;
46 #elif defined(__GNUC__) && defined(__PPC__)
47 long tmp;
48 asm("lwz %0,%1" : "=r" (tmp) : "m" (*value) );
49 asm("addi %0,%0,-1" : "+r" (tmp) );
50 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
51 return tmp;
52 #else /* ACE_HAS_INTEL_ASSEMBLY*/
53 ACE_UNUSED_ARG (value);
54 ACE_NOTSUP_RETURN (-1);
55 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
58 long
59 single_cpu_exchange (volatile long *value, long rhs)
61 #if defined (ACE_HAS_INTEL_ASSEMBLY)
62 unsigned long addr = reinterpret_cast<unsigned long> (value);
63 asm( "xchg %0, (%1)" : "+r"(rhs) : "r"(addr) );
64 return rhs;
65 #elif defined(__GNUC__) && defined(__PPC__)
66 long tmp;
67 asm("lwz %0,%1" : "=r" (tmp) : "m" (rhs) );
68 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
69 return tmp;
70 #else /* ACE_HAS_INTEL_ASSEMBLY*/
71 ACE_UNUSED_ARG (value);
72 ACE_UNUSED_ARG (rhs);
73 ACE_NOTSUP_RETURN (-1);
74 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
77 long
78 single_cpu_exchange_add (volatile long *value, long rhs)
80 #if defined (ACE_HAS_INTEL_ASSEMBLY)
81 unsigned long addr = reinterpret_cast<unsigned long> (value);
82 asm( "xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
83 return rhs;
84 #elif defined(__GNUC__) && defined(__PPC__)
85 long tmp;
86 asm("add %0,%1,%2" : "=r" (tmp) : "r" (*value), "r" (rhs) );
87 asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
88 return tmp;
89 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
90 # if defined (_MSC_VER)
91 __asm
93 mov eax, rhs
94 mov edx, value
95 xadd [edx], eax
97 // Return value is already in EAX register.
98 # elif defined (__BORLANDC__)
99 _EAX = rhs;
100 _EDX = reinterpret_cast<unsigned long> (value);
101 __emit__(0x0F, 0xC1, 0x02); // xadd [edx], eax
102 // Return value is already in EAX register.
103 # else /* _MSC_VER */
104 ACE_UNUSED_ARG (value);
105 ACE_UNUSED_ARG (rhs);
106 ACE_NOTSUP_RETURN (-1);
107 # endif /* _MSC_VER */
108 #else /* ACE_HAS_INTEL_ASSEMBLY*/
109 ACE_UNUSED_ARG (value);
110 ACE_UNUSED_ARG (rhs);
111 ACE_NOTSUP_RETURN (-1);
112 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
115 long
116 multi_cpu_increment (volatile long *value)
118 #if defined (ACE_HAS_INTEL_ASSEMBLY)
119 long tmp = 1;
120 unsigned long addr = reinterpret_cast<unsigned long> (value);
121 asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
122 return tmp + 1;
123 #else /* ACE_HAS_INTEL_ASSEMBLY*/
124 ACE_UNUSED_ARG (value);
125 ACE_NOTSUP_RETURN (-1);
126 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
129 long
130 multi_cpu_decrement (volatile long *value)
132 #if defined (ACE_HAS_INTEL_ASSEMBLY)
133 long tmp = -1;
134 unsigned long addr = reinterpret_cast<unsigned long> (value);
135 asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
136 return tmp - 1;
137 #else /* ACE_HAS_INTEL_ASSEMBLY*/
138 ACE_UNUSED_ARG (value);
139 ACE_NOTSUP_RETURN (-1);
140 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
143 long
144 multi_cpu_exchange (volatile long *value, long rhs)
146 #if defined (ACE_HAS_INTEL_ASSEMBLY)
147 unsigned long addr = reinterpret_cast<unsigned long> (value);
148 // The XCHG instruction automatically follows LOCK semantics
149 asm( "xchg %0, (%1)" : "+r"(rhs) : "r"(addr) );
150 return rhs;
151 #else /* ACE_HAS_INTEL_ASSEMBLY*/
152 ACE_UNUSED_ARG (value);
153 ACE_UNUSED_ARG (rhs);
154 ACE_NOTSUP_RETURN (-1);
155 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
158 long
159 multi_cpu_exchange_add (volatile long *value, long rhs)
161 #if defined (ACE_HAS_INTEL_ASSEMBLY)
162 unsigned long addr = reinterpret_cast<unsigned long> (value);
163 asm( "lock ; xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
164 return rhs;
165 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
166 # if defined (_MSC_VER)
167 __asm
169 mov eax, rhs
170 mov edx, value
171 lock xadd [edx], eax
173 // Return value is already in EAX register.
174 # elif defined (__BORLANDC__)
175 _EAX = rhs;
176 _EDX = reinterpret_cast<unsigned long> (value);
177 __emit__(0xF0, 0x0F, 0xC1, 0x02); // lock xadd [edx], eax
178 // Return value is already in EAX register.
179 # else /* _MSC_VER */
180 ACE_UNUSED_ARG (value);
181 ACE_UNUSED_ARG (rhs);
182 ACE_NOTSUP_RETURN (-1);
183 # endif /* _MSC_VER */
184 #else /* ACE_HAS_INTEL_ASSEMBLY*/
185 ACE_UNUSED_ARG (value);
186 ACE_UNUSED_ARG (rhs);
187 ACE_NOTSUP_RETURN (-1);
188 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
191 #if defined (_MSC_VER)
192 #pragma warning (pop)
193 #endif /* _MSC_VER */
195 } // end namespace
197 ACE_BEGIN_VERSIONED_NAMESPACE_DECL
199 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::increment_fn_) (volatile long *) = multi_cpu_increment;
200 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::decrement_fn_) (volatile long *) = multi_cpu_decrement;
201 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_fn_) (volatile long *, long) = multi_cpu_exchange;
202 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_add_fn_) (volatile long *, long) = multi_cpu_exchange_add;
204 void
205 ACE_Atomic_Op<ACE_Thread_Mutex, long>::init_functions ()
207 if (ACE_OS::num_processors () == 1)
209 increment_fn_ = single_cpu_increment;
210 decrement_fn_ = single_cpu_decrement;
211 exchange_fn_ = single_cpu_exchange;
212 exchange_add_fn_ = single_cpu_exchange_add;
214 else
216 increment_fn_ = multi_cpu_increment;
217 decrement_fn_ = multi_cpu_decrement;
218 exchange_fn_ = multi_cpu_exchange;
219 exchange_add_fn_ = multi_cpu_exchange_add;
223 void
224 ACE_Atomic_Op<ACE_Thread_Mutex, long>::dump () const
226 #if defined (ACE_HAS_DUMP)
227 ACELIB_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
228 ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
229 #endif /* ACE_HAS_DUMP */
232 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::increment_fn_) (volatile long *) = multi_cpu_increment;
233 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::decrement_fn_) (volatile long *) = multi_cpu_decrement;
234 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::exchange_fn_) (volatile long *, long) = multi_cpu_exchange;
235 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::exchange_add_fn_) (volatile long *, long) = multi_cpu_exchange_add;
237 void
238 ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::init_functions ()
240 if (ACE_OS::num_processors () == 1)
242 increment_fn_ = single_cpu_increment;
243 decrement_fn_ = single_cpu_decrement;
244 exchange_fn_ = single_cpu_exchange;
245 exchange_add_fn_ = single_cpu_exchange_add;
247 else
249 increment_fn_ = multi_cpu_increment;
250 decrement_fn_ = multi_cpu_decrement;
251 exchange_fn_ = multi_cpu_exchange;
252 exchange_add_fn_ = multi_cpu_exchange_add;
256 void
257 ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::dump () const
259 #if defined (ACE_HAS_DUMP)
260 ACELIB_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
261 ACELIB_DEBUG ((LM_DEBUG, ACE_END_DUMP));
262 #endif /* ACE_HAS_DUMP */
265 ACE_END_VERSIONED_NAMESPACE_DECL
267 #endif /* ACE_HAS_BUILTIN_ATOMIC_OP */