libroot_debug: Merge guarded heap into libroot_debug.
[haiku.git] / src / system / kernel / smp.cpp
blob29e3dfc127da732f10bb1db3104d4659ab971843
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
12 /*! Functionality for symetrical multi-processors */
15 #include <smp.h>
17 #include <stdlib.h>
18 #include <string.h>
20 #include <arch/atomic.h>
21 #include <arch/cpu.h>
22 #include <arch/debug.h>
23 #include <arch/int.h>
24 #include <arch/smp.h>
25 #include <boot/kernel_args.h>
26 #include <cpu.h>
27 #include <generic_syscall.h>
28 #include <int.h>
29 #include <spinlock_contention.h>
30 #include <thread.h>
31 #include <util/atomic.h>
32 #if DEBUG_SPINLOCK_LATENCIES
33 # include <safemode.h>
34 #endif
36 #include "kernel_debug_config.h"
39 //#define TRACE_SMP
40 #ifdef TRACE_SMP
41 # define TRACE(...) dprintf_no_syslog(__VA_ARGS__)
42 #else
43 # define TRACE(...) (void)0
44 #endif
47 #undef try_acquire_spinlock
48 #undef acquire_spinlock
49 #undef release_spinlock
51 #undef try_acquire_read_spinlock
52 #undef acquire_read_spinlock
53 #undef release_read_spinlock
54 #undef try_acquire_write_spinlock
55 #undef acquire_write_spinlock
56 #undef release_write_spinlock
58 #undef try_acquire_write_seqlock
59 #undef acquire_write_seqlock
60 #undef release_write_seqlock
61 #undef acquire_read_seqlock
62 #undef release_read_seqlock
65 #define MSG_POOL_SIZE (SMP_MAX_CPUS * 4)
67 // These macros define the number of unsuccessful iterations in
68 // acquire_spinlock() and acquire_spinlock_nocheck() after which the functions
69 // panic(), assuming a deadlock.
70 #define SPINLOCK_DEADLOCK_COUNT 100000000
71 #define SPINLOCK_DEADLOCK_COUNT_NO_CHECK 2000000000
74 struct smp_msg {
75 struct smp_msg *next;
76 int32 message;
77 addr_t data;
78 addr_t data2;
79 addr_t data3;
80 void *data_ptr;
81 uint32 flags;
82 int32 ref_count;
83 int32 done;
84 CPUSet proc_bitmap;
87 enum mailbox_source {
88 MAILBOX_LOCAL,
89 MAILBOX_BCAST,
92 static int32 sBootCPUSpin = 0;
94 static int32 sEarlyCPUCallCount;
95 static CPUSet sEarlyCPUCallSet;
96 static void (*sEarlyCPUCallFunction)(void*, int);
97 void* sEarlyCPUCallCookie;
99 static struct smp_msg* sFreeMessages = NULL;
100 static int32 sFreeMessageCount = 0;
101 static spinlock sFreeMessageSpinlock = B_SPINLOCK_INITIALIZER;
103 static struct smp_msg* sCPUMessages[SMP_MAX_CPUS] = { NULL, };
105 static struct smp_msg* sBroadcastMessages = NULL;
106 static spinlock sBroadcastMessageSpinlock = B_SPINLOCK_INITIALIZER;
107 static int32 sBroadcastMessageCounter;
109 static bool sICIEnabled = false;
110 static int32 sNumCPUs = 1;
112 static int32 process_pending_ici(int32 currentCPU);
115 #if DEBUG_SPINLOCKS
116 #define NUM_LAST_CALLERS 32
118 static struct {
119 void *caller;
120 spinlock *lock;
121 } sLastCaller[NUM_LAST_CALLERS];
123 static int32 sLastIndex = 0;
124 // Is incremented atomically. Must be % NUM_LAST_CALLERS before being used
125 // as index into sLastCaller. Note, that it has to be casted to uint32
126 // before applying the modulo operation, since otherwise after overflowing
127 // that would yield negative indices.
130 static void
131 push_lock_caller(void* caller, spinlock* lock)
133 int32 index = (uint32)atomic_add(&sLastIndex, 1) % NUM_LAST_CALLERS;
135 sLastCaller[index].caller = caller;
136 sLastCaller[index].lock = lock;
140 static void*
141 find_lock_caller(spinlock* lock)
143 int32 lastIndex = (uint32)atomic_get(&sLastIndex) % NUM_LAST_CALLERS;
145 for (int32 i = 0; i < NUM_LAST_CALLERS; i++) {
146 int32 index = (NUM_LAST_CALLERS + lastIndex - 1 - i) % NUM_LAST_CALLERS;
147 if (sLastCaller[index].lock == lock)
148 return sLastCaller[index].caller;
151 return NULL;
156 dump_spinlock(int argc, char** argv)
158 if (argc != 2) {
159 print_debugger_command_usage(argv[0]);
160 return 0;
163 uint64 address;
164 if (!evaluate_debug_expression(argv[1], &address, false))
165 return 0;
167 spinlock* lock = (spinlock*)(addr_t)address;
168 kprintf("spinlock %p:\n", lock);
169 bool locked = B_SPINLOCK_IS_LOCKED(lock);
170 if (locked) {
171 kprintf(" locked from %p\n", find_lock_caller(lock));
172 } else
173 kprintf(" not locked\n");
175 return 0;
179 #endif // DEBUG_SPINLOCKS
182 #if DEBUG_SPINLOCK_LATENCIES
185 #define NUM_LATENCY_LOCKS 4
186 #define DEBUG_LATENCY 200
189 static struct {
190 spinlock *lock;
191 bigtime_t timestamp;
192 } sLatency[SMP_MAX_CPUS][NUM_LATENCY_LOCKS];
194 static int32 sLatencyIndex[SMP_MAX_CPUS];
195 static bool sEnableLatencyCheck;
198 static void
199 push_latency(spinlock* lock)
201 if (!sEnableLatencyCheck)
202 return;
204 int32 cpu = smp_get_current_cpu();
205 int32 index = (++sLatencyIndex[cpu]) % NUM_LATENCY_LOCKS;
207 sLatency[cpu][index].lock = lock;
208 sLatency[cpu][index].timestamp = system_time();
212 static void
213 test_latency(spinlock* lock)
215 if (!sEnableLatencyCheck)
216 return;
218 int32 cpu = smp_get_current_cpu();
220 for (int32 i = 0; i < NUM_LATENCY_LOCKS; i++) {
221 if (sLatency[cpu][i].lock == lock) {
222 bigtime_t diff = system_time() - sLatency[cpu][i].timestamp;
223 if (diff > DEBUG_LATENCY && diff < 500000) {
224 panic("spinlock %p was held for %lld usecs (%d allowed)\n",
225 lock, diff, DEBUG_LATENCY);
228 sLatency[cpu][i].lock = NULL;
234 #endif // DEBUG_SPINLOCK_LATENCIES
238 dump_ici_messages(int argc, char** argv)
240 // count broadcast messages
241 int32 count = 0;
242 int32 doneCount = 0;
243 int32 unreferencedCount = 0;
244 smp_msg* message = sBroadcastMessages;
245 while (message != NULL) {
246 count++;
247 if (message->done == 1)
248 doneCount++;
249 if (message->ref_count <= 0)
250 unreferencedCount++;
251 message = message->next;
254 kprintf("ICI broadcast messages: %" B_PRId32 ", first: %p\n", count,
255 sBroadcastMessages);
256 kprintf(" done: %" B_PRId32 "\n", doneCount);
257 kprintf(" unreferenced: %" B_PRId32 "\n", unreferencedCount);
259 // count per-CPU messages
260 for (int32 i = 0; i < sNumCPUs; i++) {
261 count = 0;
262 message = sCPUMessages[i];
263 while (message != NULL) {
264 count++;
265 message = message->next;
268 kprintf("CPU %" B_PRId32 " messages: %" B_PRId32 ", first: %p\n", i,
269 count, sCPUMessages[i]);
272 return 0;
277 dump_ici_message(int argc, char** argv)
279 if (argc != 2) {
280 print_debugger_command_usage(argv[0]);
281 return 0;
284 uint64 address;
285 if (!evaluate_debug_expression(argv[1], &address, false))
286 return 0;
288 smp_msg* message = (smp_msg*)(addr_t)address;
289 kprintf("ICI message %p:\n", message);
290 kprintf(" next: %p\n", message->next);
291 kprintf(" message: %" B_PRId32 "\n", message->message);
292 kprintf(" data: 0x%lx\n", message->data);
293 kprintf(" data2: 0x%lx\n", message->data2);
294 kprintf(" data3: 0x%lx\n", message->data3);
295 kprintf(" data_ptr: %p\n", message->data_ptr);
296 kprintf(" flags: %" B_PRIx32 "\n", message->flags);
297 kprintf(" ref_count: %" B_PRIx32 "\n", message->ref_count);
298 kprintf(" done: %s\n", message->done == 1 ? "true" : "false");
300 kprintf(" proc_bitmap: ");
301 for (int32 i = 0; i < sNumCPUs; i++) {
302 if (message->proc_bitmap.GetBit(i))
303 kprintf("%s%" B_PRId32, i != 0 ? ", " : "", i);
305 kprintf("\n");
307 return 0;
311 static inline void
312 process_all_pending_ici(int32 currentCPU)
314 while (process_pending_ici(currentCPU) != B_ENTRY_NOT_FOUND)
319 bool
320 try_acquire_spinlock(spinlock* lock)
322 #if DEBUG_SPINLOCKS
323 if (are_interrupts_enabled()) {
324 panic("try_acquire_spinlock: attempt to acquire lock %p with "
325 "interrupts enabled", lock);
327 #endif
329 #if B_DEBUG_SPINLOCK_CONTENTION
330 if (atomic_add(&lock->lock, 1) != 0)
331 return false;
332 #else
333 if (atomic_get_and_set((int32*)lock, 1) != 0)
334 return false;
336 # if DEBUG_SPINLOCKS
337 push_lock_caller(arch_debug_get_caller(), lock);
338 # endif
339 #endif
341 return true;
345 void
346 acquire_spinlock(spinlock* lock)
348 #if DEBUG_SPINLOCKS
349 if (are_interrupts_enabled()) {
350 panic("acquire_spinlock: attempt to acquire lock %p with interrupts "
351 "enabled", lock);
353 #endif
355 if (sNumCPUs > 1) {
356 int currentCPU = smp_get_current_cpu();
357 #if B_DEBUG_SPINLOCK_CONTENTION
358 while (atomic_add(&lock->lock, 1) != 0)
359 process_all_pending_ici(currentCPU);
360 #else
361 while (1) {
362 uint32 count = 0;
363 while (lock->lock != 0) {
364 if (++count == SPINLOCK_DEADLOCK_COUNT) {
365 # if DEBUG_SPINLOCKS
366 panic("acquire_spinlock(): Failed to acquire spinlock %p "
367 "for a long time (last caller: %p, value: %" B_PRIx32
368 ")", lock, find_lock_caller(lock), lock->lock);
369 # else
370 panic("acquire_spinlock(): Failed to acquire spinlock %p "
371 "for a long time (value: %" B_PRIx32 ")", lock,
372 lock->lock);
373 # endif
374 count = 0;
377 process_all_pending_ici(currentCPU);
378 cpu_wait(&lock->lock, 0);
380 if (atomic_get_and_set(&lock->lock, 1) == 0)
381 break;
384 # if DEBUG_SPINLOCKS
385 push_lock_caller(arch_debug_get_caller(), lock);
386 # endif
387 #endif
388 } else {
389 #if DEBUG_SPINLOCKS
390 int32 oldValue = atomic_get_and_set(&lock->lock, 1);
391 if (oldValue != 0) {
392 panic("acquire_spinlock: attempt to acquire lock %p twice on "
393 "non-SMP system (last caller: %p, value %" B_PRIx32 ")", lock,
394 find_lock_caller(lock), oldValue);
397 push_lock_caller(arch_debug_get_caller(), lock);
398 #endif
400 #if DEBUG_SPINLOCK_LATENCIES
401 push_latency(lock);
402 #endif
406 static void
407 acquire_spinlock_nocheck(spinlock *lock)
409 #if DEBUG_SPINLOCKS
410 if (are_interrupts_enabled()) {
411 panic("acquire_spinlock_nocheck: attempt to acquire lock %p with "
412 "interrupts enabled", lock);
414 #endif
416 if (sNumCPUs > 1) {
417 #if B_DEBUG_SPINLOCK_CONTENTION
418 while (atomic_add(&lock->lock, 1) != 0) {
420 #else
421 while (1) {
422 uint32 count = 0;
423 while (lock->lock != 0) {
424 if (++count == SPINLOCK_DEADLOCK_COUNT_NO_CHECK) {
425 # if DEBUG_SPINLOCKS
426 panic("acquire_spinlock_nocheck(): Failed to acquire "
427 "spinlock %p for a long time (last caller: %p, value: %"
428 B_PRIx32 ")", lock, find_lock_caller(lock), lock->lock);
429 # else
430 panic("acquire_spinlock_nocheck(): Failed to acquire "
431 "spinlock %p for a long time (value: %" B_PRIx32 ")",
432 lock, lock->lock);
433 # endif
434 count = 0;
437 cpu_wait(&lock->lock, 0);
440 if (atomic_get_and_set(&lock->lock, 1) == 0)
441 break;
444 # if DEBUG_SPINLOCKS
445 push_lock_caller(arch_debug_get_caller(), lock);
446 # endif
447 #endif
448 } else {
449 #if DEBUG_SPINLOCKS
450 int32 oldValue = atomic_get_and_set(&lock->lock, 1);
451 if (oldValue != 0) {
452 panic("acquire_spinlock_nocheck: attempt to acquire lock %p twice "
453 "on non-SMP system (last caller: %p, value %" B_PRIx32 ")",
454 lock, find_lock_caller(lock), oldValue);
457 push_lock_caller(arch_debug_get_caller(), lock);
458 #endif
463 /*! Equivalent to acquire_spinlock(), save for currentCPU parameter. */
464 static void
465 acquire_spinlock_cpu(int32 currentCPU, spinlock *lock)
467 #if DEBUG_SPINLOCKS
468 if (are_interrupts_enabled()) {
469 panic("acquire_spinlock_cpu: attempt to acquire lock %p with "
470 "interrupts enabled", lock);
472 #endif
474 if (sNumCPUs > 1) {
475 #if B_DEBUG_SPINLOCK_CONTENTION
476 while (atomic_add(&lock->lock, 1) != 0)
477 process_all_pending_ici(currentCPU);
478 #else
479 while (1) {
480 uint32 count = 0;
481 while (lock->lock != 0) {
482 if (++count == SPINLOCK_DEADLOCK_COUNT) {
483 # if DEBUG_SPINLOCKS
484 panic("acquire_spinlock_cpu(): Failed to acquire spinlock "
485 "%p for a long time (last caller: %p, value: %" B_PRIx32
486 ")", lock, find_lock_caller(lock), lock->lock);
487 # else
488 panic("acquire_spinlock_cpu(): Failed to acquire spinlock "
489 "%p for a long time (value: %" B_PRIx32 ")", lock,
490 lock->lock);
491 # endif
492 count = 0;
495 process_all_pending_ici(currentCPU);
496 cpu_wait(&lock->lock, 0);
498 if (atomic_get_and_set(&lock->lock, 1) == 0)
499 break;
502 # if DEBUG_SPINLOCKS
503 push_lock_caller(arch_debug_get_caller(), lock);
504 # endif
505 #endif
506 } else {
507 #if DEBUG_SPINLOCKS
508 int32 oldValue = atomic_get_and_set(&lock->lock, 1);
509 if (oldValue != 0) {
510 panic("acquire_spinlock_cpu(): attempt to acquire lock %p twice on "
511 "non-SMP system (last caller: %p, value %" B_PRIx32 ")", lock,
512 find_lock_caller(lock), oldValue);
515 push_lock_caller(arch_debug_get_caller(), lock);
516 #endif
521 void
522 release_spinlock(spinlock *lock)
524 #if DEBUG_SPINLOCK_LATENCIES
525 test_latency(lock);
526 #endif
528 if (sNumCPUs > 1) {
529 if (are_interrupts_enabled())
530 panic("release_spinlock: attempt to release lock %p with "
531 "interrupts enabled\n", lock);
532 #if B_DEBUG_SPINLOCK_CONTENTION
534 int32 count = atomic_and(&lock->lock, 0) - 1;
535 if (count < 0) {
536 panic("release_spinlock: lock %p was already released\n", lock);
537 } else {
538 // add to the total count -- deal with carry manually
539 if ((uint32)atomic_add(&lock->count_low, count) + count
540 < (uint32)count) {
541 atomic_add(&lock->count_high, 1);
545 #elif DEBUG_SPINLOCKS
546 if (atomic_get_and_set(&lock->lock, 0) != 1)
547 panic("release_spinlock: lock %p was already released\n", lock);
548 #else
549 atomic_set(&lock->lock, 0);
550 #endif
551 } else {
552 #if DEBUG_SPINLOCKS
553 if (are_interrupts_enabled()) {
554 panic("release_spinlock: attempt to release lock %p with "
555 "interrupts enabled\n", lock);
557 if (atomic_get_and_set(&lock->lock, 0) != 1)
558 panic("release_spinlock: lock %p was already released\n", lock);
559 #endif
560 #if DEBUG_SPINLOCK_LATENCIES
561 test_latency(lock);
562 #endif
567 bool
568 try_acquire_write_spinlock(rw_spinlock* lock)
570 #if DEBUG_SPINLOCKS
571 if (are_interrupts_enabled()) {
572 panic("try_acquire_write_spinlock: attempt to acquire lock %p with "
573 "interrupts enabled", lock);
576 if (sNumCPUs < 2 && lock->lock != 0) {
577 panic("try_acquire_write_spinlock(): attempt to acquire lock %p twice "
578 "on non-SMP system", lock);
580 #endif
582 return atomic_test_and_set(&lock->lock, 1u << 31, 0) == 0;
586 void
587 acquire_write_spinlock(rw_spinlock* lock)
589 #if DEBUG_SPINLOCKS
590 if (are_interrupts_enabled()) {
591 panic("acquire_write_spinlock: attempt to acquire lock %p with "
592 "interrupts enabled", lock);
594 #endif
596 uint32 count = 0;
597 int currentCPU = smp_get_current_cpu();
598 while (true) {
599 if (try_acquire_write_spinlock(lock))
600 break;
602 while (lock->lock != 0) {
603 if (++count == SPINLOCK_DEADLOCK_COUNT) {
604 panic("acquire_write_spinlock(): Failed to acquire spinlock %p "
605 "for a long time!", lock);
606 count = 0;
609 process_all_pending_ici(currentCPU);
610 cpu_wait(&lock->lock, 0);
616 void
617 release_write_spinlock(rw_spinlock* lock)
619 #if DEBUG_SPINLOCKS
620 uint32 previous = atomic_get_and_set(&lock->lock, 0);
621 if ((previous & 1u << 31) == 0) {
622 panic("release_write_spinlock: lock %p was already released (value: "
623 "%#" B_PRIx32 ")\n", lock, previous);
625 #else
626 atomic_set(&lock->lock, 0);
627 #endif
631 bool
632 try_acquire_read_spinlock(rw_spinlock* lock)
634 #if DEBUG_SPINLOCKS
635 if (are_interrupts_enabled()) {
636 panic("try_acquire_read_spinlock: attempt to acquire lock %p with "
637 "interrupts enabled", lock);
640 if (sNumCPUs < 2 && lock->lock != 0) {
641 panic("try_acquire_read_spinlock(): attempt to acquire lock %p twice "
642 "on non-SMP system", lock);
644 #endif
646 uint32 previous = atomic_add(&lock->lock, 1);
647 return (previous & (1u << 31)) == 0;
651 void
652 acquire_read_spinlock(rw_spinlock* lock)
654 #if DEBUG_SPINLOCKS
655 if (are_interrupts_enabled()) {
656 panic("acquire_read_spinlock: attempt to acquire lock %p with "
657 "interrupts enabled", lock);
659 #endif
661 uint32 count = 0;
662 int currentCPU = smp_get_current_cpu();
663 while (1) {
664 if (try_acquire_read_spinlock(lock))
665 break;
667 while ((lock->lock & (1u << 31)) != 0) {
668 if (++count == SPINLOCK_DEADLOCK_COUNT) {
669 panic("acquire_read_spinlock(): Failed to acquire spinlock %p "
670 "for a long time!", lock);
671 count = 0;
674 process_all_pending_ici(currentCPU);
675 cpu_wait(&lock->lock, 0);
681 void
682 release_read_spinlock(rw_spinlock* lock)
684 #if DEBUG_SPINLOCKS
685 uint32 previous = atomic_add(&lock->lock, -1);
686 if ((previous & 1u << 31) != 0) {
687 panic("release_read_spinlock: lock %p was already released (value:"
688 " %#" B_PRIx32 ")\n", lock, previous);
690 #else
691 atomic_add(&lock->lock, -1);
692 #endif
697 bool
698 try_acquire_write_seqlock(seqlock* lock) {
699 bool succeed = try_acquire_spinlock(&lock->lock);
700 if (succeed)
701 atomic_add((int32*)&lock->count, 1);
702 return succeed;
706 void
707 acquire_write_seqlock(seqlock* lock) {
708 acquire_spinlock(&lock->lock);
709 atomic_add((int32*)&lock->count, 1);
713 void
714 release_write_seqlock(seqlock* lock) {
715 atomic_add((int32*)&lock->count, 1);
716 release_spinlock(&lock->lock);
720 uint32
721 acquire_read_seqlock(seqlock* lock) {
722 return atomic_get((int32*)&lock->count);
726 bool
727 release_read_seqlock(seqlock* lock, uint32 count) {
728 memory_read_barrier();
730 uint32 current = *(volatile int32*)&lock->count;
732 if (count % 2 == 1 || current != count) {
733 cpu_pause();
734 return false;
737 return true;
741 /*! Finds a free message and gets it.
742 NOTE: has side effect of disabling interrupts
743 return value is the former interrupt state
745 static cpu_status
746 find_free_message(struct smp_msg** msg)
748 cpu_status state;
750 TRACE("find_free_message: entry\n");
752 retry:
753 while (sFreeMessageCount <= 0)
754 cpu_pause();
756 state = disable_interrupts();
757 acquire_spinlock(&sFreeMessageSpinlock);
759 if (sFreeMessageCount <= 0) {
760 // someone grabbed one while we were getting the lock,
761 // go back to waiting for it
762 release_spinlock(&sFreeMessageSpinlock);
763 restore_interrupts(state);
764 goto retry;
767 *msg = sFreeMessages;
768 sFreeMessages = (*msg)->next;
769 sFreeMessageCount--;
771 release_spinlock(&sFreeMessageSpinlock);
773 TRACE("find_free_message: returning msg %p\n", *msg);
775 return state;
779 /*! Similar to find_free_message(), but expects the interrupts to be disabled
780 already.
782 static void
783 find_free_message_interrupts_disabled(int32 currentCPU,
784 struct smp_msg** _message)
786 TRACE("find_free_message_interrupts_disabled: entry\n");
788 acquire_spinlock_cpu(currentCPU, &sFreeMessageSpinlock);
789 while (sFreeMessageCount <= 0) {
790 release_spinlock(&sFreeMessageSpinlock);
791 process_all_pending_ici(currentCPU);
792 cpu_pause();
793 acquire_spinlock_cpu(currentCPU, &sFreeMessageSpinlock);
796 *_message = sFreeMessages;
797 sFreeMessages = (*_message)->next;
798 sFreeMessageCount--;
800 release_spinlock(&sFreeMessageSpinlock);
802 TRACE("find_free_message_interrupts_disabled: returning msg %p\n",
803 *_message);
807 static void
808 return_free_message(struct smp_msg* msg)
810 TRACE("return_free_message: returning msg %p\n", msg);
812 acquire_spinlock_nocheck(&sFreeMessageSpinlock);
813 msg->next = sFreeMessages;
814 sFreeMessages = msg;
815 sFreeMessageCount++;
816 release_spinlock(&sFreeMessageSpinlock);
820 static struct smp_msg*
821 check_for_message(int currentCPU, mailbox_source& sourceMailbox)
823 if (!sICIEnabled)
824 return NULL;
826 struct smp_msg* msg = atomic_pointer_get(&sCPUMessages[currentCPU]);
827 if (msg != NULL) {
828 do {
829 cpu_pause();
830 msg = atomic_pointer_get(&sCPUMessages[currentCPU]);
831 ASSERT(msg != NULL);
832 } while (atomic_pointer_test_and_set(&sCPUMessages[currentCPU],
833 msg->next, msg) != msg);
835 TRACE(" cpu %d: found msg %p in cpu mailbox\n", currentCPU, msg);
836 sourceMailbox = MAILBOX_LOCAL;
837 } else if (atomic_get(&get_cpu_struct()->ici_counter)
838 != atomic_get(&sBroadcastMessageCounter)) {
840 // try getting one from the broadcast mailbox
841 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
843 msg = sBroadcastMessages;
844 while (msg != NULL) {
845 if (!msg->proc_bitmap.GetBit(currentCPU)) {
846 // we have handled this one already
847 msg = msg->next;
848 continue;
851 // mark it so we wont try to process this one again
852 msg->proc_bitmap.ClearBitAtomic(currentCPU);
853 atomic_add(&gCPU[currentCPU].ici_counter, 1);
855 sourceMailbox = MAILBOX_BCAST;
856 break;
858 release_spinlock(&sBroadcastMessageSpinlock);
860 if (msg != NULL) {
861 TRACE(" cpu %d: found msg %p in broadcast mailbox\n", currentCPU,
862 msg);
865 return msg;
869 static void
870 finish_message_processing(int currentCPU, struct smp_msg* msg,
871 mailbox_source sourceMailbox)
873 if (atomic_add(&msg->ref_count, -1) != 1)
874 return;
876 // we were the last one to decrement the ref_count
877 // it's our job to remove it from the list & possibly clean it up
879 // clean up the message
880 if (sourceMailbox == MAILBOX_BCAST)
881 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
883 TRACE("cleaning up message %p\n", msg);
885 if (sourceMailbox != MAILBOX_BCAST) {
886 // local mailbox -- the message has already been removed in
887 // check_for_message()
888 } else if (msg == sBroadcastMessages) {
889 sBroadcastMessages = msg->next;
890 } else {
891 // we need to walk to find the message in the list.
892 // we can't use any data found when previously walking through
893 // the list, since the list may have changed. But, we are guaranteed
894 // to at least have msg in it.
895 struct smp_msg* last = NULL;
896 struct smp_msg* msg1;
898 msg1 = sBroadcastMessages;
899 while (msg1 != NULL && msg1 != msg) {
900 last = msg1;
901 msg1 = msg1->next;
904 // by definition, last must be something
905 if (msg1 == msg && last != NULL)
906 last->next = msg->next;
907 else
908 panic("last == NULL or msg != msg1");
911 if (sourceMailbox == MAILBOX_BCAST)
912 release_spinlock(&sBroadcastMessageSpinlock);
914 if ((msg->flags & SMP_MSG_FLAG_FREE_ARG) != 0 && msg->data_ptr != NULL)
915 free(msg->data_ptr);
917 if ((msg->flags & SMP_MSG_FLAG_SYNC) != 0) {
918 atomic_set(&msg->done, 1);
919 // the caller cpu should now free the message
920 } else {
921 // in the !SYNC case, we get to free the message
922 return_free_message(msg);
927 static status_t
928 process_pending_ici(int32 currentCPU)
930 mailbox_source sourceMailbox;
931 struct smp_msg* msg = check_for_message(currentCPU, sourceMailbox);
932 if (msg == NULL)
933 return B_ENTRY_NOT_FOUND;
935 TRACE(" cpu %ld message = %ld\n", currentCPU, msg->message);
937 bool haltCPU = false;
939 switch (msg->message) {
940 case SMP_MSG_INVALIDATE_PAGE_RANGE:
941 arch_cpu_invalidate_TLB_range(msg->data, msg->data2);
942 break;
943 case SMP_MSG_INVALIDATE_PAGE_LIST:
944 arch_cpu_invalidate_TLB_list((addr_t*)msg->data, (int)msg->data2);
945 break;
946 case SMP_MSG_USER_INVALIDATE_PAGES:
947 arch_cpu_user_TLB_invalidate();
948 break;
949 case SMP_MSG_GLOBAL_INVALIDATE_PAGES:
950 arch_cpu_global_TLB_invalidate();
951 break;
952 case SMP_MSG_CPU_HALT:
953 haltCPU = true;
954 break;
955 case SMP_MSG_CALL_FUNCTION:
957 smp_call_func func = (smp_call_func)msg->data_ptr;
958 func(msg->data, currentCPU, msg->data2, msg->data3);
959 break;
961 case SMP_MSG_RESCHEDULE:
962 scheduler_reschedule_ici();
963 break;
965 default:
966 dprintf("smp_intercpu_int_handler: got unknown message %" B_PRId32 "\n",
967 msg->message);
968 break;
971 // finish dealing with this message, possibly removing it from the list
972 finish_message_processing(currentCPU, msg, sourceMailbox);
974 // special case for the halt message
975 if (haltCPU)
976 debug_trap_cpu_in_kdl(currentCPU, false);
978 return B_OK;
982 #if B_DEBUG_SPINLOCK_CONTENTION
985 static uint64
986 get_spinlock_counter(spinlock* lock)
988 uint32 high;
989 uint32 low;
990 do {
991 high = (uint32)atomic_get(&lock->count_high);
992 low = (uint32)atomic_get(&lock->count_low);
993 } while (high != atomic_get(&lock->count_high));
995 return ((uint64)high << 32) | low;
999 static status_t
1000 spinlock_contention_syscall(const char* subsystem, uint32 function,
1001 void* buffer, size_t bufferSize)
1003 spinlock_contention_info info;
1005 if (function != GET_SPINLOCK_CONTENTION_INFO)
1006 return B_BAD_VALUE;
1008 if (bufferSize < sizeof(spinlock_contention_info))
1009 return B_BAD_VALUE;
1011 info.thread_spinlock_counter = get_spinlock_counter(&gThreadSpinlock);
1012 info.team_spinlock_counter = get_spinlock_counter(&gTeamSpinlock);
1014 if (!IS_USER_ADDRESS(buffer)
1015 || user_memcpy(buffer, &info, sizeof(info)) != B_OK) {
1016 return B_BAD_ADDRESS;
1019 return B_OK;
1023 #endif // B_DEBUG_SPINLOCK_CONTENTION
1026 static void
1027 process_early_cpu_call(int32 cpu)
1029 sEarlyCPUCallFunction(sEarlyCPUCallCookie, cpu);
1030 sEarlyCPUCallSet.ClearBitAtomic(cpu);
1031 atomic_add(&sEarlyCPUCallCount, 1);
1035 static void
1036 call_all_cpus_early(void (*function)(void*, int), void* cookie)
1038 if (sNumCPUs > 1) {
1039 sEarlyCPUCallFunction = function;
1040 sEarlyCPUCallCookie = cookie;
1042 atomic_set(&sEarlyCPUCallCount, 1);
1043 sEarlyCPUCallSet.SetAll();
1044 sEarlyCPUCallSet.ClearBit(0);
1046 // wait for all CPUs to finish
1047 while (sEarlyCPUCallCount < sNumCPUs)
1048 cpu_wait(&sEarlyCPUCallCount, sNumCPUs);
1051 function(cookie, 0);
1055 // #pragma mark -
1059 smp_intercpu_int_handler(int32 cpu)
1061 TRACE("smp_intercpu_int_handler: entry on cpu %ld\n", cpu);
1063 process_all_pending_ici(cpu);
1065 TRACE("smp_intercpu_int_handler: done on cpu %ld\n", cpu);
1067 return B_HANDLED_INTERRUPT;
1071 void
1072 smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2,
1073 addr_t data3, void* dataPointer, uint32 flags)
1075 struct smp_msg *msg;
1077 TRACE("smp_send_ici: target 0x%lx, mess 0x%lx, data 0x%lx, data2 0x%lx, "
1078 "data3 0x%lx, ptr %p, flags 0x%lx\n", targetCPU, message, data, data2,
1079 data3, dataPointer, flags);
1081 if (sICIEnabled) {
1082 int state;
1083 int currentCPU;
1085 // find_free_message leaves interrupts disabled
1086 state = find_free_message(&msg);
1088 currentCPU = smp_get_current_cpu();
1089 if (targetCPU == currentCPU) {
1090 return_free_message(msg);
1091 restore_interrupts(state);
1092 return; // nope, cant do that
1095 // set up the message
1096 msg->message = message;
1097 msg->data = data;
1098 msg->data2 = data2;
1099 msg->data3 = data3;
1100 msg->data_ptr = dataPointer;
1101 msg->ref_count = 1;
1102 msg->flags = flags;
1103 msg->done = 0;
1105 // stick it in the appropriate cpu's mailbox
1106 struct smp_msg* next;
1107 do {
1108 cpu_pause();
1109 next = atomic_pointer_get(&sCPUMessages[targetCPU]);
1110 msg->next = next;
1111 } while (atomic_pointer_test_and_set(&sCPUMessages[targetCPU], msg,
1112 next) != next);
1114 arch_smp_send_ici(targetCPU);
1116 if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
1117 // wait for the other cpu to finish processing it
1118 // the interrupt handler will ref count it to <0
1119 // if the message is sync after it has removed it from the mailbox
1120 while (msg->done == 0) {
1121 process_all_pending_ici(currentCPU);
1122 cpu_wait(&msg->done, 1);
1124 // for SYNC messages, it's our responsibility to put it
1125 // back into the free list
1126 return_free_message(msg);
1129 restore_interrupts(state);
1134 void
1135 smp_send_multicast_ici(CPUSet& cpuMask, int32 message, addr_t data,
1136 addr_t data2, addr_t data3, void *dataPointer, uint32 flags)
1138 if (!sICIEnabled)
1139 return;
1141 int currentCPU = smp_get_current_cpu();
1143 // find_free_message leaves interrupts disabled
1144 struct smp_msg *msg;
1145 int state = find_free_message(&msg);
1147 msg->proc_bitmap = cpuMask;
1148 msg->proc_bitmap.ClearBit(currentCPU);
1150 int32 targetCPUs = 0;
1151 for (int32 i = 0; i < sNumCPUs; i++) {
1152 if (msg->proc_bitmap.GetBit(i))
1153 targetCPUs++;
1156 if (targetCPUs == 0) {
1157 panic("smp_send_multicast_ici(): 0 CPU mask");
1158 return;
1161 msg->message = message;
1162 msg->data = data;
1163 msg->data2 = data2;
1164 msg->data3 = data3;
1165 msg->data_ptr = dataPointer;
1166 msg->ref_count = targetCPUs;
1167 msg->flags = flags;
1168 msg->done = 0;
1170 bool broadcast = targetCPUs == sNumCPUs - 1;
1172 // stick it in the broadcast mailbox
1173 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
1174 msg->next = sBroadcastMessages;
1175 sBroadcastMessages = msg;
1176 release_spinlock(&sBroadcastMessageSpinlock);
1178 atomic_add(&sBroadcastMessageCounter, 1);
1179 for (int32 i = 0; i < sNumCPUs; i++) {
1180 if (!cpuMask.GetBit(i))
1181 atomic_add(&gCPU[i].ici_counter, 1);
1184 if (broadcast)
1185 arch_smp_send_broadcast_ici();
1186 else
1187 arch_smp_send_multicast_ici(cpuMask);
1189 if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
1190 // wait for the other cpus to finish processing it
1191 // the interrupt handler will ref count it to <0
1192 // if the message is sync after it has removed it from the mailbox
1193 while (msg->done == 0) {
1194 process_all_pending_ici(currentCPU);
1195 cpu_wait(&msg->done, 1);
1198 // for SYNC messages, it's our responsibility to put it
1199 // back into the free list
1200 return_free_message(msg);
1203 restore_interrupts(state);
1207 void
1208 smp_send_broadcast_ici(int32 message, addr_t data, addr_t data2, addr_t data3,
1209 void *dataPointer, uint32 flags)
1211 struct smp_msg *msg;
1213 TRACE("smp_send_broadcast_ici: cpu %ld mess 0x%lx, data 0x%lx, data2 "
1214 "0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n", smp_get_current_cpu(),
1215 message, data, data2, data3, dataPointer, flags);
1217 if (sICIEnabled) {
1218 int state;
1219 int currentCPU;
1221 // find_free_message leaves interrupts disabled
1222 state = find_free_message(&msg);
1224 currentCPU = smp_get_current_cpu();
1226 msg->message = message;
1227 msg->data = data;
1228 msg->data2 = data2;
1229 msg->data3 = data3;
1230 msg->data_ptr = dataPointer;
1231 msg->ref_count = sNumCPUs - 1;
1232 msg->flags = flags;
1233 msg->proc_bitmap.SetAll();
1234 msg->proc_bitmap.ClearBit(currentCPU);
1235 msg->done = 0;
1237 TRACE("smp_send_broadcast_ici%d: inserting msg %p into broadcast "
1238 "mbox\n", currentCPU, msg);
1240 // stick it in the appropriate cpu's mailbox
1241 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
1242 msg->next = sBroadcastMessages;
1243 sBroadcastMessages = msg;
1244 release_spinlock(&sBroadcastMessageSpinlock);
1246 atomic_add(&sBroadcastMessageCounter, 1);
1247 atomic_add(&gCPU[currentCPU].ici_counter, 1);
1249 arch_smp_send_broadcast_ici();
1251 TRACE("smp_send_broadcast_ici: sent interrupt\n");
1253 if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
1254 // wait for the other cpus to finish processing it
1255 // the interrupt handler will ref count it to <0
1256 // if the message is sync after it has removed it from the mailbox
1257 TRACE("smp_send_broadcast_ici: waiting for ack\n");
1259 while (msg->done == 0) {
1260 process_all_pending_ici(currentCPU);
1261 cpu_wait(&msg->done, 1);
1264 TRACE("smp_send_broadcast_ici: returning message to free list\n");
1266 // for SYNC messages, it's our responsibility to put it
1267 // back into the free list
1268 return_free_message(msg);
1271 restore_interrupts(state);
1274 TRACE("smp_send_broadcast_ici: done\n");
1278 void
1279 smp_send_broadcast_ici_interrupts_disabled(int32 currentCPU, int32 message,
1280 addr_t data, addr_t data2, addr_t data3, void *dataPointer, uint32 flags)
1282 if (!sICIEnabled)
1283 return;
1285 TRACE("smp_send_broadcast_ici_interrupts_disabled: cpu %ld mess 0x%lx, "
1286 "data 0x%lx, data2 0x%lx, data3 0x%lx, ptr %p, flags 0x%lx\n",
1287 currentCPU, message, data, data2, data3, dataPointer, flags);
1289 struct smp_msg *msg;
1290 find_free_message_interrupts_disabled(currentCPU, &msg);
1292 msg->message = message;
1293 msg->data = data;
1294 msg->data2 = data2;
1295 msg->data3 = data3;
1296 msg->data_ptr = dataPointer;
1297 msg->ref_count = sNumCPUs - 1;
1298 msg->flags = flags;
1299 msg->proc_bitmap.SetAll();
1300 msg->proc_bitmap.ClearBit(currentCPU);
1301 msg->done = 0;
1303 TRACE("smp_send_broadcast_ici_interrupts_disabled %ld: inserting msg %p "
1304 "into broadcast mbox\n", currentCPU, msg);
1306 // stick it in the appropriate cpu's mailbox
1307 acquire_spinlock_nocheck(&sBroadcastMessageSpinlock);
1308 msg->next = sBroadcastMessages;
1309 sBroadcastMessages = msg;
1310 release_spinlock(&sBroadcastMessageSpinlock);
1312 atomic_add(&sBroadcastMessageCounter, 1);
1313 atomic_add(&gCPU[currentCPU].ici_counter, 1);
1315 arch_smp_send_broadcast_ici();
1317 TRACE("smp_send_broadcast_ici_interrupts_disabled %ld: sent interrupt\n",
1318 currentCPU);
1320 if ((flags & SMP_MSG_FLAG_SYNC) != 0) {
1321 // wait for the other cpus to finish processing it
1322 // the interrupt handler will ref count it to <0
1323 // if the message is sync after it has removed it from the mailbox
1324 TRACE("smp_send_broadcast_ici_interrupts_disabled %ld: waiting for "
1325 "ack\n", currentCPU);
1327 while (msg->done == 0) {
1328 process_all_pending_ici(currentCPU);
1329 cpu_wait(&msg->done, 1);
1332 TRACE("smp_send_broadcast_ici_interrupts_disabled %ld: returning "
1333 "message to free list\n", currentCPU);
1335 // for SYNC messages, it's our responsibility to put it
1336 // back into the free list
1337 return_free_message(msg);
1340 TRACE("smp_send_broadcast_ici_interrupts_disabled: done\n");
1344 /*! Spin on non-boot CPUs until smp_wake_up_non_boot_cpus() has been called.
1346 \param cpu The index of the calling CPU.
1347 \param rendezVous A rendez-vous variable to make sure that the boot CPU
1348 does not return before all other CPUs have started waiting.
1349 \return \c true on the boot CPU, \c false otherwise.
1351 bool
1352 smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous)
1354 if (cpu == 0) {
1355 smp_cpu_rendezvous(rendezVous);
1356 return true;
1359 smp_cpu_rendezvous(rendezVous);
1361 while (sBootCPUSpin == 0) {
1362 if (sEarlyCPUCallSet.GetBit(cpu))
1363 process_early_cpu_call(cpu);
1365 cpu_pause();
1368 return false;
1372 void
1373 smp_wake_up_non_boot_cpus()
1375 // ICIs were previously being ignored
1376 if (sNumCPUs > 1)
1377 sICIEnabled = true;
1379 // resume non boot CPUs
1380 atomic_set(&sBootCPUSpin, 1);
1384 /*! Spin until all CPUs have reached the rendez-vous point.
1386 The rendez-vous variable \c *var must have been initialized to 0 before the
1387 function is called. The variable will be non-null when the function returns.
1389 Note that when the function returns on one CPU, it only means that all CPU
1390 have already entered the function. It does not mean that the variable can
1391 already be reset. Only when all CPUs have returned (which would have to be
1392 ensured via another rendez-vous) the variable can be reset.
1394 void
1395 smp_cpu_rendezvous(uint32* var)
1397 atomic_add((int32*)var, 1);
1399 while (*var < (uint32)sNumCPUs)
1400 cpu_wait((int32*)var, sNumCPUs);
1404 status_t
1405 smp_init(kernel_args* args)
1407 TRACE("smp_init: entry\n");
1409 #if DEBUG_SPINLOCK_LATENCIES
1410 sEnableLatencyCheck
1411 = !get_safemode_boolean(B_SAFEMODE_DISABLE_LATENCY_CHECK, false);
1412 #endif
1414 #if DEBUG_SPINLOCKS
1415 add_debugger_command_etc("spinlock", &dump_spinlock,
1416 "Dump info on a spinlock",
1417 "\n"
1418 "Dumps info on a spinlock.\n", 0);
1419 #endif
1420 add_debugger_command_etc("ici", &dump_ici_messages,
1421 "Dump info on pending ICI messages",
1422 "\n"
1423 "Dumps info on pending ICI messages.\n", 0);
1424 add_debugger_command_etc("ici_message", &dump_ici_message,
1425 "Dump info on an ICI message",
1426 "\n"
1427 "Dumps info on an ICI message.\n", 0);
1429 if (args->num_cpus > 1) {
1430 sFreeMessages = NULL;
1431 sFreeMessageCount = 0;
1432 for (int i = 0; i < MSG_POOL_SIZE; i++) {
1433 struct smp_msg* msg
1434 = (struct smp_msg*)malloc(sizeof(struct smp_msg));
1435 if (msg == NULL) {
1436 panic("error creating smp mailboxes\n");
1437 return B_ERROR;
1439 memset(msg, 0, sizeof(struct smp_msg));
1440 msg->next = sFreeMessages;
1441 sFreeMessages = msg;
1442 sFreeMessageCount++;
1444 sNumCPUs = args->num_cpus;
1446 TRACE("smp_init: calling arch_smp_init\n");
1448 return arch_smp_init(args);
1452 status_t
1453 smp_per_cpu_init(kernel_args* args, int32 cpu)
1455 return arch_smp_per_cpu_init(args, cpu);
1459 status_t
1460 smp_init_post_generic_syscalls(void)
1462 #if B_DEBUG_SPINLOCK_CONTENTION
1463 return register_generic_syscall(SPINLOCK_CONTENTION,
1464 &spinlock_contention_syscall, 0, 0);
1465 #else
1466 return B_OK;
1467 #endif
1471 void
1472 smp_set_num_cpus(int32 numCPUs)
1474 sNumCPUs = numCPUs;
1478 int32
1479 smp_get_num_cpus()
1481 return sNumCPUs;
1485 int32
1486 smp_get_current_cpu(void)
1488 return thread_get_current_thread()->cpu->cpu_num;
1492 // #pragma mark - public exported functions
1495 void
1496 call_all_cpus(void (*func)(void*, int), void* cookie)
1498 cpu_status state = disable_interrupts();
1500 // if inter-CPU communication is not yet enabled, use the early mechanism
1501 if (!sICIEnabled) {
1502 call_all_cpus_early(func, cookie);
1503 restore_interrupts(state);
1504 return;
1507 if (smp_get_num_cpus() > 1) {
1508 smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)cookie,
1509 0, 0, (void*)func, SMP_MSG_FLAG_ASYNC);
1512 // we need to call this function ourselves as well
1513 func(cookie, smp_get_current_cpu());
1515 restore_interrupts(state);
1519 void
1520 call_all_cpus_sync(void (*func)(void*, int), void* cookie)
1522 cpu_status state = disable_interrupts();
1524 // if inter-CPU communication is not yet enabled, use the early mechanism
1525 if (!sICIEnabled) {
1526 call_all_cpus_early(func, cookie);
1527 restore_interrupts(state);
1528 return;
1531 if (smp_get_num_cpus() > 1) {
1532 smp_send_broadcast_ici(SMP_MSG_CALL_FUNCTION, (addr_t)cookie,
1533 0, 0, (void*)func, SMP_MSG_FLAG_SYNC);
1536 // we need to call this function ourselves as well
1537 func(cookie, smp_get_current_cpu());
1539 restore_interrupts(state);
1543 #undef memory_read_barrier
1544 #undef memory_write_barrier
1547 void
1548 memory_read_barrier()
1550 memory_read_barrier_inline();
1554 void
1555 memory_write_barrier()
1557 memory_write_barrier_inline();