1 #include "ace/Atomic_Op.h"
2 #include "ace/OS_NS_unistd.h"
4 #if !defined (__ACE_INLINE__)
5 #include "ace/Atomic_Op.inl"
6 #endif /* __ACE_INLINE__ */
8 #if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
11 #if defined (_MSC_VER)
12 // Disable "no return value" warning, as we will be putting
13 // the return values directly into the EAX register.
14 #pragma warning (push)
15 #pragma warning (disable: 4035)
19 single_cpu_increment (volatile long *value
)
21 #if defined (ACE_HAS_INTEL_ASSEMBLY)
23 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
24 asm( "xadd %0, (%1)" : "+r"(tmp
) : "r"(addr
) );
26 #elif defined(__GNUC__) && defined(__PPC__)
28 asm("lwz %0,%1" : "=r" (tmp
) : "m" (*value
) );
29 asm("addi %0,%0,1" : "+r" (tmp
) );
30 asm("stw %0,%1" : "+r" (tmp
), "=m" (*value
) );
32 #else /* ACE_HAS_INTEL_ASSEMBLY*/
33 ACE_UNUSED_ARG (value
);
34 ACE_NOTSUP_RETURN (-1);
35 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
39 single_cpu_decrement (volatile long *value
)
41 #if defined (ACE_HAS_INTEL_ASSEMBLY)
43 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
44 asm( "xadd %0, (%1)" : "+r"(tmp
) : "r"(addr
) );
46 #elif defined(__GNUC__) && defined(__PPC__)
48 asm("lwz %0,%1" : "=r" (tmp
) : "m" (*value
) );
49 asm("addi %0,%0,-1" : "+r" (tmp
) );
50 asm("stw %0,%1" : "+r" (tmp
), "=m" (*value
) );
52 #else /* ACE_HAS_INTEL_ASSEMBLY*/
53 ACE_UNUSED_ARG (value
);
54 ACE_NOTSUP_RETURN (-1);
55 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
59 single_cpu_exchange (volatile long *value
, long rhs
)
61 #if defined (ACE_HAS_INTEL_ASSEMBLY)
62 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
63 asm( "xchg %0, (%1)" : "+r"(rhs
) : "r"(addr
) );
65 #elif defined(__GNUC__) && defined(__PPC__)
67 asm("lwz %0,%1" : "=r" (tmp
) : "m" (rhs
) );
68 asm("stw %0,%1" : "+r" (tmp
), "=m" (*value
) );
70 #else /* ACE_HAS_INTEL_ASSEMBLY*/
71 ACE_UNUSED_ARG (value
);
73 ACE_NOTSUP_RETURN (-1);
74 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
78 single_cpu_exchange_add (volatile long *value
, long rhs
)
80 #if defined (ACE_HAS_INTEL_ASSEMBLY)
81 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
82 asm( "xadd %0, (%1)" : "+r"(rhs
) : "r"(addr
) );
84 #elif defined(__GNUC__) && defined(__PPC__)
86 asm("add %0,%1,%2" : "=r" (tmp
) : "r" (*value
), "r" (rhs
) );
87 asm("stw %0,%1" : "+r" (tmp
), "=m" (*value
) );
89 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
90 # if defined (_MSC_VER)
97 // Return value is already in EAX register.
98 # elif defined (__BORLANDC__)
100 _EDX
= reinterpret_cast<unsigned long> (value
);
101 __emit__(0x0F, 0xC1, 0x02); // xadd [edx], eax
102 // Return value is already in EAX register.
103 # else /* _MSC_VER */
104 ACE_UNUSED_ARG (value
);
105 ACE_UNUSED_ARG (rhs
);
106 ACE_NOTSUP_RETURN (-1);
107 # endif /* _MSC_VER */
108 #else /* ACE_HAS_INTEL_ASSEMBLY*/
109 ACE_UNUSED_ARG (value
);
110 ACE_UNUSED_ARG (rhs
);
111 ACE_NOTSUP_RETURN (-1);
112 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
116 multi_cpu_increment (volatile long *value
)
118 #if defined (ACE_HAS_INTEL_ASSEMBLY)
120 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
121 asm( "lock ; xadd %0, (%1)" : "+r"(tmp
) : "r"(addr
) );
123 #else /* ACE_HAS_INTEL_ASSEMBLY*/
124 ACE_UNUSED_ARG (value
);
125 ACE_NOTSUP_RETURN (-1);
126 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
130 multi_cpu_decrement (volatile long *value
)
132 #if defined (ACE_HAS_INTEL_ASSEMBLY)
134 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
135 asm( "lock ; xadd %0, (%1)" : "+r"(tmp
) : "r"(addr
) );
137 #else /* ACE_HAS_INTEL_ASSEMBLY*/
138 ACE_UNUSED_ARG (value
);
139 ACE_NOTSUP_RETURN (-1);
140 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
144 multi_cpu_exchange (volatile long *value
, long rhs
)
146 #if defined (ACE_HAS_INTEL_ASSEMBLY)
147 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
148 // The XCHG instruction automatically follows LOCK semantics
149 asm( "xchg %0, (%1)" : "+r"(rhs
) : "r"(addr
) );
151 #else /* ACE_HAS_INTEL_ASSEMBLY*/
152 ACE_UNUSED_ARG (value
);
153 ACE_UNUSED_ARG (rhs
);
154 ACE_NOTSUP_RETURN (-1);
155 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
159 multi_cpu_exchange_add (volatile long *value
, long rhs
)
161 #if defined (ACE_HAS_INTEL_ASSEMBLY)
162 unsigned long addr
= reinterpret_cast<unsigned long> (value
);
163 asm( "lock ; xadd %0, (%1)" : "+r"(rhs
) : "r"(addr
) );
165 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
166 # if defined (_MSC_VER)
173 // Return value is already in EAX register.
174 # elif defined (__BORLANDC__)
176 _EDX
= reinterpret_cast<unsigned long> (value
);
177 __emit__(0xF0, 0x0F, 0xC1, 0x02); // lock xadd [edx], eax
178 // Return value is already in EAX register.
179 # else /* _MSC_VER */
180 ACE_UNUSED_ARG (value
);
181 ACE_UNUSED_ARG (rhs
);
182 ACE_NOTSUP_RETURN (-1);
183 # endif /* _MSC_VER */
184 #else /* ACE_HAS_INTEL_ASSEMBLY*/
185 ACE_UNUSED_ARG (value
);
186 ACE_UNUSED_ARG (rhs
);
187 ACE_NOTSUP_RETURN (-1);
188 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
191 #if defined (_MSC_VER)
192 #pragma warning (pop)
193 #endif /* _MSC_VER */
197 ACE_BEGIN_VERSIONED_NAMESPACE_DECL
199 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, long>::increment_fn_
) (volatile long *) = multi_cpu_increment
;
200 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, long>::decrement_fn_
) (volatile long *) = multi_cpu_decrement
;
201 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, long>::exchange_fn_
) (volatile long *, long) = multi_cpu_exchange
;
202 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, long>::exchange_add_fn_
) (volatile long *, long) = multi_cpu_exchange_add
;
205 ACE_Atomic_Op
<ACE_Thread_Mutex
, long>::init_functions ()
207 if (ACE_OS::num_processors () == 1)
209 increment_fn_
= single_cpu_increment
;
210 decrement_fn_
= single_cpu_decrement
;
211 exchange_fn_
= single_cpu_exchange
;
212 exchange_add_fn_
= single_cpu_exchange_add
;
216 increment_fn_
= multi_cpu_increment
;
217 decrement_fn_
= multi_cpu_decrement
;
218 exchange_fn_
= multi_cpu_exchange
;
219 exchange_add_fn_
= multi_cpu_exchange_add
;
224 ACE_Atomic_Op
<ACE_Thread_Mutex
, long>::dump () const
226 #if defined (ACE_HAS_DUMP)
227 ACELIB_DEBUG ((LM_DEBUG
, ACE_BEGIN_DUMP
, this));
228 ACELIB_DEBUG ((LM_DEBUG
, ACE_END_DUMP
));
229 #endif /* ACE_HAS_DUMP */
232 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, unsigned long>::increment_fn_
) (volatile long *) = multi_cpu_increment
;
233 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, unsigned long>::decrement_fn_
) (volatile long *) = multi_cpu_decrement
;
234 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, unsigned long>::exchange_fn_
) (volatile long *, long) = multi_cpu_exchange
;
235 long (*ACE_Atomic_Op
<ACE_Thread_Mutex
, unsigned long>::exchange_add_fn_
) (volatile long *, long) = multi_cpu_exchange_add
;
238 ACE_Atomic_Op
<ACE_Thread_Mutex
, unsigned long>::init_functions ()
240 if (ACE_OS::num_processors () == 1)
242 increment_fn_
= single_cpu_increment
;
243 decrement_fn_
= single_cpu_decrement
;
244 exchange_fn_
= single_cpu_exchange
;
245 exchange_add_fn_
= single_cpu_exchange_add
;
249 increment_fn_
= multi_cpu_increment
;
250 decrement_fn_
= multi_cpu_decrement
;
251 exchange_fn_
= multi_cpu_exchange
;
252 exchange_add_fn_
= multi_cpu_exchange_add
;
257 ACE_Atomic_Op
<ACE_Thread_Mutex
, unsigned long>::dump () const
259 #if defined (ACE_HAS_DUMP)
260 ACELIB_DEBUG ((LM_DEBUG
, ACE_BEGIN_DUMP
, this));
261 ACELIB_DEBUG ((LM_DEBUG
, ACE_END_DUMP
));
262 #endif /* ACE_HAS_DUMP */
265 ACE_END_VERSIONED_NAMESPACE_DECL
267 #endif /* ACE_HAS_BUILTIN_ATOMIC_OP */