11 struct cpu cpus
[CONFIG_MAX_CPUS
];
13 /* info passed to another cpu along with a sched ipi */
14 struct sched_ipi_data
{
19 static struct sched_ipi_data sched_ipi_data
[CONFIG_MAX_CPUS
];
21 #define SCHED_IPI_STOP_PROC 1
22 #define SCHED_IPI_VM_INHIBIT 2
23 #define SCHED_IPI_SAVE_CTX 4
25 static volatile unsigned ap_cpus_booted
;
27 SPINLOCK_DEFINE(big_kernel_lock
)
28 SPINLOCK_DEFINE(boot_lock
)
30 void wait_for_APs_to_finish_booting(void)
35 /* check how many cpus are actually alive */
36 for (i
= 0 ; i
< ncpus
; i
++) {
37 if (cpu_test_flag(i
, CPU_IS_READY
))
41 printf("WARNING only %d out of %d cpus booted\n", n
, ncpus
);
43 /* we must let the other CPUs to run in kernel mode first */
45 while (ap_cpus_booted
!= (n
- 1))
47 /* now we have to take the lock again as we continue execution */
51 void ap_boot_finished(unsigned cpu
)
56 void smp_ipi_halt_handler(void)
63 void smp_schedule(unsigned cpu
)
65 arch_send_smp_schedule_ipi(cpu
);
68 void smp_sched_handler(void);
71 * tell another cpu about a task to do and return only after the cpu acks that
72 * the task is finished. Also wait before it finishes task sent by another cpu
75 static void smp_schedule_sync(struct proc
* p
, unsigned task
)
77 unsigned cpu
= p
->p_cpu
;
78 unsigned mycpu
= cpuid
;
82 * if some other cpu made a request to the same cpu, wait until it is
83 * done before proceeding
85 if (sched_ipi_data
[cpu
].flags
!= 0) {
87 while (sched_ipi_data
[cpu
].flags
!= 0) {
88 if (sched_ipi_data
[mycpu
].flags
) {
97 sched_ipi_data
[cpu
].data
= (u32_t
) p
;
98 sched_ipi_data
[cpu
].flags
|= task
;
100 arch_send_smp_schedule_ipi(cpu
);
102 /* wait until the destination cpu finishes its job */
104 while (sched_ipi_data
[cpu
].flags
!= 0) {
105 if (sched_ipi_data
[mycpu
].flags
) {
114 void smp_schedule_stop_proc(struct proc
* p
)
116 if (proc_is_runnable(p
))
117 smp_schedule_sync(p
, SCHED_IPI_STOP_PROC
);
119 RTS_SET(p
, RTS_PROC_STOP
);
120 assert(RTS_ISSET(p
, RTS_PROC_STOP
));
123 void smp_schedule_vminhibit(struct proc
* p
)
125 if (proc_is_runnable(p
))
126 smp_schedule_sync(p
, SCHED_IPI_VM_INHIBIT
);
128 RTS_SET(p
, RTS_VMINHIBIT
);
129 assert(RTS_ISSET(p
, RTS_VMINHIBIT
));
132 void smp_schedule_stop_proc_save_ctx(struct proc
* p
)
135 * stop the processes and force the complete context of the process to
136 * be saved (i.e. including FPU state and such)
138 smp_schedule_sync(p
, SCHED_IPI_STOP_PROC
| SCHED_IPI_SAVE_CTX
);
139 assert(RTS_ISSET(p
, RTS_PROC_STOP
));
142 void smp_schedule_migrate_proc(struct proc
* p
, unsigned dest_cpu
)
145 * stop the processes and force the complete context of the process to
146 * be saved (i.e. including FPU state and such)
148 smp_schedule_sync(p
, SCHED_IPI_STOP_PROC
| SCHED_IPI_SAVE_CTX
);
149 assert(RTS_ISSET(p
, RTS_PROC_STOP
));
151 /* assign the new cpu and let the process run again */
153 RTS_UNSET(p
, RTS_PROC_STOP
);
156 void smp_sched_handler(void)
159 unsigned cpu
= cpuid
;
161 flgs
= sched_ipi_data
[cpu
].flags
;
165 p
= (struct proc
*)sched_ipi_data
[cpu
].data
;
167 if (flgs
& SCHED_IPI_STOP_PROC
) {
168 RTS_SET(p
, RTS_PROC_STOP
);
170 if (flgs
& SCHED_IPI_SAVE_CTX
) {
171 /* all context has been saved already, FPU remains */
172 if (proc_used_fpu(p
) &&
173 get_cpulocal_var(fpu_owner
) == p
) {
174 disable_fpu_exception();
175 save_local_fpu(p
, FALSE
/*retain*/);
176 /* we're preparing to migrate somewhere else */
180 if (flgs
& SCHED_IPI_VM_INHIBIT
) {
181 RTS_SET(p
, RTS_VMINHIBIT
);
186 sched_ipi_data
[cpu
].flags
= 0;
190 * This function gets always called only after smp_sched_handler() has been
191 * already called. It only serves the purpose of acknowledging the IPI and
192 * preempting the current process if the CPU was not idle.
194 void smp_ipi_sched_handler(void)
200 curr
= get_cpulocal_var(proc_ptr
);
201 if (curr
->p_endpoint
!= IDLE
) {
202 RTS_SET(curr
, RTS_PREEMPTED
);