btrfs: [] on the end of a struct field is a variable length array.
[haiku.git] / headers / private / kernel / smp.h
blob0da58a0cd3f2af4b9d2dd80771e6fc8ff60aaeee
1 /*
2 * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
8 #ifndef KERNEL_SMP_H
9 #define KERNEL_SMP_H
12 #include <arch/atomic.h>
13 #include <boot/kernel_args.h>
14 #include <kernel.h>
16 #include <KernelExport.h>
18 #include <string.h>
21 struct kernel_args;
24 // intercpu messages
25 enum {
26 SMP_MSG_INVALIDATE_PAGE_RANGE = 0,
27 SMP_MSG_INVALIDATE_PAGE_LIST,
28 SMP_MSG_USER_INVALIDATE_PAGES,
29 SMP_MSG_GLOBAL_INVALIDATE_PAGES,
30 SMP_MSG_CPU_HALT,
31 SMP_MSG_CALL_FUNCTION,
32 SMP_MSG_RESCHEDULE
35 enum {
36 SMP_MSG_FLAG_ASYNC = 0x0,
37 SMP_MSG_FLAG_SYNC = 0x1,
38 SMP_MSG_FLAG_FREE_ARG = 0x2,
41 typedef void (*smp_call_func)(addr_t data1, int32 currentCPU, addr_t data2, addr_t data3);
43 class CPUSet {
44 public:
45 inline CPUSet();
47 inline void ClearAll();
48 inline void SetAll();
50 inline void SetBit(int32 cpu);
51 inline void ClearBit(int32 cpu);
53 inline void SetBitAtomic(int32 cpu);
54 inline void ClearBitAtomic(int32 cpu);
56 inline bool GetBit(int32 cpu) const;
58 inline bool IsEmpty() const;
60 private:
61 static const int kArraySize = ROUNDUP(SMP_MAX_CPUS, 32) / 32;
63 uint32 fBitmap[kArraySize];
67 #ifdef __cplusplus
68 extern "C" {
69 #endif
71 bool try_acquire_spinlock(spinlock* lock);
73 status_t smp_init(struct kernel_args *args);
74 status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu);
75 status_t smp_init_post_generic_syscalls(void);
76 bool smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous);
77 void smp_wake_up_non_boot_cpus(void);
78 void smp_cpu_rendezvous(uint32* var);
79 void smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2, addr_t data3,
80 void *data_ptr, uint32 flags);
81 void smp_send_multicast_ici(CPUSet& cpuMask, int32 message, addr_t data,
82 addr_t data2, addr_t data3, void *data_ptr, uint32 flags);
83 void smp_send_broadcast_ici(int32 message, addr_t data, addr_t data2, addr_t data3,
84 void *data_ptr, uint32 flags);
85 void smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message,
86 addr_t data, addr_t data2, addr_t data3, void *data_ptr, uint32 flags);
88 int32 smp_get_num_cpus(void);
89 void smp_set_num_cpus(int32 numCPUs);
90 int32 smp_get_current_cpu(void);
92 int smp_intercpu_int_handler(int32 cpu);
94 #ifdef __cplusplus
96 #endif
99 inline
100 CPUSet::CPUSet()
102 memset(fBitmap, 0, sizeof(fBitmap));
106 inline void
107 CPUSet::ClearAll()
109 memset(fBitmap, 0, sizeof(fBitmap));
113 inline void
114 CPUSet::SetAll()
116 memset(fBitmap, ~uint8(0), sizeof(fBitmap));
120 inline void
121 CPUSet::SetBit(int32 cpu)
123 int32* element = (int32*)&fBitmap[cpu % kArraySize];
124 *element |= 1u << (cpu / kArraySize);
128 inline void
129 CPUSet::ClearBit(int32 cpu)
131 int32* element = (int32*)&fBitmap[cpu % kArraySize];
132 *element &= ~uint32(1u << (cpu / kArraySize));
136 inline void
137 CPUSet::SetBitAtomic(int32 cpu)
139 int32* element = (int32*)&fBitmap[cpu % kArraySize];
140 atomic_or(element, 1u << (cpu / kArraySize));
144 inline void
145 CPUSet::ClearBitAtomic(int32 cpu)
147 int32* element = (int32*)&fBitmap[cpu % kArraySize];
148 atomic_and(element, ~uint32(1u << (cpu / kArraySize)));
152 inline bool
153 CPUSet::GetBit(int32 cpu) const
155 int32* element = (int32*)&fBitmap[cpu % kArraySize];
156 return ((uint32)atomic_get(element) & (1u << (cpu / kArraySize))) != 0;
160 inline bool
161 CPUSet::IsEmpty() const
163 for (int i = 0; i < kArraySize; i++) {
164 if (fBitmap[i] != 0)
165 return false;
168 return true;
172 // Unless spinlock debug features are enabled, try to inline
173 // {acquire,release}_spinlock().
174 #if !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION
177 static inline bool
178 try_acquire_spinlock_inline(spinlock* lock)
180 return atomic_get_and_set((int32*)lock, 1) == 0;
184 static inline void
185 acquire_spinlock_inline(spinlock* lock)
187 if (try_acquire_spinlock_inline(lock))
188 return;
189 acquire_spinlock(lock);
193 static inline void
194 release_spinlock_inline(spinlock* lock)
196 atomic_set((int32*)lock, 0);
200 #define try_acquire_spinlock(lock) try_acquire_spinlock_inline(lock)
201 #define acquire_spinlock(lock) acquire_spinlock_inline(lock)
202 #define release_spinlock(lock) release_spinlock_inline(lock)
205 static inline bool
206 try_acquire_write_spinlock_inline(rw_spinlock* lock)
208 return atomic_test_and_set(&lock->lock, 1u << 31, 0) == 0;
212 static inline void
213 acquire_write_spinlock_inline(rw_spinlock* lock)
215 if (try_acquire_write_spinlock(lock))
216 return;
217 acquire_write_spinlock(lock);
221 static inline void
222 release_write_spinlock_inline(rw_spinlock* lock)
224 atomic_set(&lock->lock, 0);
228 static inline bool
229 try_acquire_read_spinlock_inline(rw_spinlock* lock)
231 uint32 previous = atomic_add(&lock->lock, 1);
232 return (previous & (1u << 31)) == 0;
236 static inline void
237 acquire_read_spinlock_inline(rw_spinlock* lock)
239 if (try_acquire_read_spinlock(lock))
240 return;
241 acquire_read_spinlock(lock);
245 static inline void
246 release_read_spinlock_inline(rw_spinlock* lock)
248 atomic_add(&lock->lock, -1);
252 #define try_acquire_read_spinlock(lock) try_acquire_read_spinlock_inline(lock)
253 #define acquire_read_spinlock(lock) acquire_read_spinlock_inline(lock)
254 #define release_read_spinlock(lock) release_read_spinlock_inline(lock)
255 #define try_acquire_write_spinlock(lock) \
256 try_acquire_write_spinlock(lock)
257 #define acquire_write_spinlock(lock) acquire_write_spinlock_inline(lock)
258 #define release_write_spinlock(lock) release_write_spinlock_inline(lock)
261 static inline bool
262 try_acquire_write_seqlock_inline(seqlock* lock) {
263 bool succeed = try_acquire_spinlock(&lock->lock);
264 if (succeed)
265 atomic_add((int32*)&lock->count, 1);
266 return succeed;
270 static inline void
271 acquire_write_seqlock_inline(seqlock* lock) {
272 acquire_spinlock(&lock->lock);
273 atomic_add((int32*)&lock->count, 1);
277 static inline void
278 release_write_seqlock_inline(seqlock* lock) {
279 atomic_add((int32*)&lock->count, 1);
280 release_spinlock(&lock->lock);
284 static inline uint32
285 acquire_read_seqlock_inline(seqlock* lock) {
286 return atomic_get((int32*)&lock->count);
290 static inline bool
291 release_read_seqlock_inline(seqlock* lock, uint32 count) {
292 uint32 current = atomic_get((int32*)&lock->count);
294 return count % 2 == 0 && current == count;
298 #define try_acquire_write_seqlock(lock) try_acquire_write_seqlock_inline(lock)
299 #define acquire_write_seqlock(lock) acquire_write_seqlock_inline(lock)
300 #define release_write_seqlock(lock) release_write_seqlock_inline(lock)
301 #define acquire_read_seqlock(lock) acquire_read_seqlock_inline(lock)
302 #define release_read_seqlock(lock, count) \
303 release_read_seqlock_inline(lock, count)
306 #endif // !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION
309 #endif /* KERNEL_SMP_H */