2 * Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
12 #include <arch/atomic.h>
13 #include <boot/kernel_args.h>
16 #include <KernelExport.h>
26 SMP_MSG_INVALIDATE_PAGE_RANGE
= 0,
27 SMP_MSG_INVALIDATE_PAGE_LIST
,
28 SMP_MSG_USER_INVALIDATE_PAGES
,
29 SMP_MSG_GLOBAL_INVALIDATE_PAGES
,
31 SMP_MSG_CALL_FUNCTION
,
36 SMP_MSG_FLAG_ASYNC
= 0x0,
37 SMP_MSG_FLAG_SYNC
= 0x1,
38 SMP_MSG_FLAG_FREE_ARG
= 0x2,
41 typedef void (*smp_call_func
)(addr_t data1
, int32 currentCPU
, addr_t data2
, addr_t data3
);
47 inline void ClearAll();
50 inline void SetBit(int32 cpu
);
51 inline void ClearBit(int32 cpu
);
53 inline void SetBitAtomic(int32 cpu
);
54 inline void ClearBitAtomic(int32 cpu
);
56 inline bool GetBit(int32 cpu
) const;
58 inline bool IsEmpty() const;
61 static const int kArraySize
= ROUNDUP(SMP_MAX_CPUS
, 32) / 32;
63 uint32 fBitmap
[kArraySize
];
71 bool try_acquire_spinlock(spinlock
* lock
);
73 status_t
smp_init(struct kernel_args
*args
);
74 status_t
smp_per_cpu_init(struct kernel_args
*args
, int32 cpu
);
75 status_t
smp_init_post_generic_syscalls(void);
76 bool smp_trap_non_boot_cpus(int32 cpu
, uint32
* rendezVous
);
77 void smp_wake_up_non_boot_cpus(void);
78 void smp_cpu_rendezvous(uint32
* var
);
79 void smp_send_ici(int32 targetCPU
, int32 message
, addr_t data
, addr_t data2
, addr_t data3
,
80 void *data_ptr
, uint32 flags
);
81 void smp_send_multicast_ici(CPUSet
& cpuMask
, int32 message
, addr_t data
,
82 addr_t data2
, addr_t data3
, void *data_ptr
, uint32 flags
);
83 void smp_send_broadcast_ici(int32 message
, addr_t data
, addr_t data2
, addr_t data3
,
84 void *data_ptr
, uint32 flags
);
85 void smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU
, int32 message
,
86 addr_t data
, addr_t data2
, addr_t data3
, void *data_ptr
, uint32 flags
);
88 int32
smp_get_num_cpus(void);
89 void smp_set_num_cpus(int32 numCPUs
);
90 int32
smp_get_current_cpu(void);
92 int smp_intercpu_int_handler(int32 cpu
);
102 memset(fBitmap
, 0, sizeof(fBitmap
));
109 memset(fBitmap
, 0, sizeof(fBitmap
));
116 memset(fBitmap
, ~uint8(0), sizeof(fBitmap
));
121 CPUSet::SetBit(int32 cpu
)
123 int32
* element
= (int32
*)&fBitmap
[cpu
% kArraySize
];
124 *element
|= 1u << (cpu
/ kArraySize
);
129 CPUSet::ClearBit(int32 cpu
)
131 int32
* element
= (int32
*)&fBitmap
[cpu
% kArraySize
];
132 *element
&= ~uint32(1u << (cpu
/ kArraySize
));
137 CPUSet::SetBitAtomic(int32 cpu
)
139 int32
* element
= (int32
*)&fBitmap
[cpu
% kArraySize
];
140 atomic_or(element
, 1u << (cpu
/ kArraySize
));
145 CPUSet::ClearBitAtomic(int32 cpu
)
147 int32
* element
= (int32
*)&fBitmap
[cpu
% kArraySize
];
148 atomic_and(element
, ~uint32(1u << (cpu
/ kArraySize
)));
153 CPUSet::GetBit(int32 cpu
) const
155 int32
* element
= (int32
*)&fBitmap
[cpu
% kArraySize
];
156 return ((uint32
)atomic_get(element
) & (1u << (cpu
/ kArraySize
))) != 0;
161 CPUSet::IsEmpty() const
163 for (int i
= 0; i
< kArraySize
; i
++) {
172 // Unless spinlock debug features are enabled, try to inline
173 // {acquire,release}_spinlock().
174 #if !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION
178 try_acquire_spinlock_inline(spinlock
* lock
)
180 return atomic_get_and_set((int32
*)lock
, 1) == 0;
185 acquire_spinlock_inline(spinlock
* lock
)
187 if (try_acquire_spinlock_inline(lock
))
189 acquire_spinlock(lock
);
194 release_spinlock_inline(spinlock
* lock
)
196 atomic_set((int32
*)lock
, 0);
200 #define try_acquire_spinlock(lock) try_acquire_spinlock_inline(lock)
201 #define acquire_spinlock(lock) acquire_spinlock_inline(lock)
202 #define release_spinlock(lock) release_spinlock_inline(lock)
206 try_acquire_write_spinlock_inline(rw_spinlock
* lock
)
208 return atomic_test_and_set(&lock
->lock
, 1u << 31, 0) == 0;
213 acquire_write_spinlock_inline(rw_spinlock
* lock
)
215 if (try_acquire_write_spinlock(lock
))
217 acquire_write_spinlock(lock
);
222 release_write_spinlock_inline(rw_spinlock
* lock
)
224 atomic_set(&lock
->lock
, 0);
229 try_acquire_read_spinlock_inline(rw_spinlock
* lock
)
231 uint32 previous
= atomic_add(&lock
->lock
, 1);
232 return (previous
& (1u << 31)) == 0;
237 acquire_read_spinlock_inline(rw_spinlock
* lock
)
239 if (try_acquire_read_spinlock(lock
))
241 acquire_read_spinlock(lock
);
246 release_read_spinlock_inline(rw_spinlock
* lock
)
248 atomic_add(&lock
->lock
, -1);
252 #define try_acquire_read_spinlock(lock) try_acquire_read_spinlock_inline(lock)
253 #define acquire_read_spinlock(lock) acquire_read_spinlock_inline(lock)
254 #define release_read_spinlock(lock) release_read_spinlock_inline(lock)
255 #define try_acquire_write_spinlock(lock) \
256 try_acquire_write_spinlock(lock)
257 #define acquire_write_spinlock(lock) acquire_write_spinlock_inline(lock)
258 #define release_write_spinlock(lock) release_write_spinlock_inline(lock)
262 try_acquire_write_seqlock_inline(seqlock
* lock
) {
263 bool succeed
= try_acquire_spinlock(&lock
->lock
);
265 atomic_add((int32
*)&lock
->count
, 1);
271 acquire_write_seqlock_inline(seqlock
* lock
) {
272 acquire_spinlock(&lock
->lock
);
273 atomic_add((int32
*)&lock
->count
, 1);
278 release_write_seqlock_inline(seqlock
* lock
) {
279 atomic_add((int32
*)&lock
->count
, 1);
280 release_spinlock(&lock
->lock
);
285 acquire_read_seqlock_inline(seqlock
* lock
) {
286 return atomic_get((int32
*)&lock
->count
);
291 release_read_seqlock_inline(seqlock
* lock
, uint32 count
) {
292 uint32 current
= atomic_get((int32
*)&lock
->count
);
294 return count
% 2 == 0 && current
== count
;
298 #define try_acquire_write_seqlock(lock) try_acquire_write_seqlock_inline(lock)
299 #define acquire_write_seqlock(lock) acquire_write_seqlock_inline(lock)
300 #define release_write_seqlock(lock) release_write_seqlock_inline(lock)
301 #define acquire_read_seqlock(lock) acquire_read_seqlock_inline(lock)
302 #define release_read_seqlock(lock, count) \
303 release_read_seqlock_inline(lock, count)
306 #endif // !DEBUG_SPINLOCKS && !B_DEBUG_SPINLOCK_CONTENTION
309 #endif /* KERNEL_SMP_H */