Remove building with NOCRYPTO option
[minix.git] / minix / kernel / smp.c
blob1a6c86de29f82ff93fdcac9a4c2c8943e09cf831
1 #include <assert.h>
3 #include "smp.h"
4 #include "interrupt.h"
5 #include "clock.h"
7 unsigned ncpus;
8 unsigned ht_per_core;
9 unsigned bsp_cpu_id;
11 struct cpu cpus[CONFIG_MAX_CPUS];
13 /* info passed to another cpu along with a sched ipi */
14 struct sched_ipi_data {
15 volatile u32_t flags;
16 volatile u32_t data;
19 static struct sched_ipi_data sched_ipi_data[CONFIG_MAX_CPUS];
21 #define SCHED_IPI_STOP_PROC 1
22 #define SCHED_IPI_VM_INHIBIT 2
23 #define SCHED_IPI_SAVE_CTX 4
25 static volatile unsigned ap_cpus_booted;
27 SPINLOCK_DEFINE(big_kernel_lock)
28 SPINLOCK_DEFINE(boot_lock)
30 void wait_for_APs_to_finish_booting(void)
32 unsigned n = 0;
33 int i;
35 /* check how many cpus are actually alive */
36 for (i = 0 ; i < ncpus ; i++) {
37 if (cpu_test_flag(i, CPU_IS_READY))
38 n++;
40 if (n != ncpus)
41 printf("WARNING only %d out of %d cpus booted\n", n, ncpus);
43 /* we must let the other CPUs to run in kernel mode first */
44 BKL_UNLOCK();
45 while (ap_cpus_booted != (n - 1))
46 arch_pause();
47 /* now we have to take the lock again as we continue execution */
48 BKL_LOCK();
51 void ap_boot_finished(unsigned cpu)
53 ap_cpus_booted++;
56 void smp_ipi_halt_handler(void)
58 ipi_ack();
59 stop_local_timer();
60 arch_smp_halt_cpu();
63 void smp_schedule(unsigned cpu)
65 arch_send_smp_schedule_ipi(cpu);
68 void smp_sched_handler(void);
71 * tell another cpu about a task to do and return only after the cpu acks that
72 * the task is finished. Also wait before it finishes task sent by another cpu
73 * to the same one.
75 static void smp_schedule_sync(struct proc * p, unsigned task)
77 unsigned cpu = p->p_cpu;
78 unsigned mycpu = cpuid;
80 assert(cpu != mycpu);
82 * if some other cpu made a request to the same cpu, wait until it is
83 * done before proceeding
85 if (sched_ipi_data[cpu].flags != 0) {
86 BKL_UNLOCK();
87 while (sched_ipi_data[cpu].flags != 0) {
88 if (sched_ipi_data[mycpu].flags) {
89 BKL_LOCK();
90 smp_sched_handler();
91 BKL_UNLOCK();
94 BKL_LOCK();
97 sched_ipi_data[cpu].data = (u32_t) p;
98 sched_ipi_data[cpu].flags |= task;
99 __insn_barrier();
100 arch_send_smp_schedule_ipi(cpu);
102 /* wait until the destination cpu finishes its job */
103 BKL_UNLOCK();
104 while (sched_ipi_data[cpu].flags != 0) {
105 if (sched_ipi_data[mycpu].flags) {
106 BKL_LOCK();
107 smp_sched_handler();
108 BKL_UNLOCK();
111 BKL_LOCK();
114 void smp_schedule_stop_proc(struct proc * p)
116 if (proc_is_runnable(p))
117 smp_schedule_sync(p, SCHED_IPI_STOP_PROC);
118 else
119 RTS_SET(p, RTS_PROC_STOP);
120 assert(RTS_ISSET(p, RTS_PROC_STOP));
123 void smp_schedule_vminhibit(struct proc * p)
125 if (proc_is_runnable(p))
126 smp_schedule_sync(p, SCHED_IPI_VM_INHIBIT);
127 else
128 RTS_SET(p, RTS_VMINHIBIT);
129 assert(RTS_ISSET(p, RTS_VMINHIBIT));
132 void smp_schedule_stop_proc_save_ctx(struct proc * p)
135 * stop the processes and force the complete context of the process to
136 * be saved (i.e. including FPU state and such)
138 smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
139 assert(RTS_ISSET(p, RTS_PROC_STOP));
142 void smp_schedule_migrate_proc(struct proc * p, unsigned dest_cpu)
145 * stop the processes and force the complete context of the process to
146 * be saved (i.e. including FPU state and such)
148 smp_schedule_sync(p, SCHED_IPI_STOP_PROC | SCHED_IPI_SAVE_CTX);
149 assert(RTS_ISSET(p, RTS_PROC_STOP));
151 /* assign the new cpu and let the process run again */
152 p->p_cpu = dest_cpu;
153 RTS_UNSET(p, RTS_PROC_STOP);
156 void smp_sched_handler(void)
158 unsigned flgs;
159 unsigned cpu = cpuid;
161 flgs = sched_ipi_data[cpu].flags;
163 if (flgs) {
164 struct proc * p;
165 p = (struct proc *)sched_ipi_data[cpu].data;
167 if (flgs & SCHED_IPI_STOP_PROC) {
168 RTS_SET(p, RTS_PROC_STOP);
170 if (flgs & SCHED_IPI_SAVE_CTX) {
171 /* all context has been saved already, FPU remains */
172 if (proc_used_fpu(p) &&
173 get_cpulocal_var(fpu_owner) == p) {
174 disable_fpu_exception();
175 save_local_fpu(p, FALSE /*retain*/);
176 /* we're preparing to migrate somewhere else */
177 release_fpu(p);
180 if (flgs & SCHED_IPI_VM_INHIBIT) {
181 RTS_SET(p, RTS_VMINHIBIT);
185 __insn_barrier();
186 sched_ipi_data[cpu].flags = 0;
190 * This function gets always called only after smp_sched_handler() has been
191 * already called. It only serves the purpose of acknowledging the IPI and
192 * preempting the current process if the CPU was not idle.
194 void smp_ipi_sched_handler(void)
196 struct proc * curr;
198 ipi_ack();
200 curr = get_cpulocal_var(proc_ptr);
201 if (curr->p_endpoint != IDLE) {
202 RTS_SET(curr, RTS_PREEMPTED);