5 static void *worker_main(void *arg
);
6 static void worker_sleep(void);
7 static void worker_wake(struct worker_thread
*worker
);
9 static mthread_attr_t tattr
;
10 static unsigned int pending
;
11 static unsigned int busy
;
14 #if defined(_MINIX_MAGIC)
15 # define TH_STACKSIZE (64 * 1024)
16 #elif defined(MKCOVERAGE)
17 # define TH_STACKSIZE (40 * 1024)
19 # define TH_STACKSIZE (28 * 1024)
22 #define ASSERTW(w) assert((w) >= &workers[0] && (w) < &workers[NR_WTHREADS])
24 /*===========================================================================*
26 *===========================================================================*/
27 void worker_init(void)
29 /* Initialize worker threads */
30 struct worker_thread
*wp
;
33 if (mthread_attr_init(&tattr
) != 0)
34 panic("failed to initialize attribute");
35 if (mthread_attr_setstacksize(&tattr
, TH_STACKSIZE
) != 0)
36 panic("couldn't set default thread stack size");
42 for (i
= 0; i
< NR_WTHREADS
; i
++) {
45 wp
->w_fp
= NULL
; /* Mark not in use */
48 if (mutex_init(&wp
->w_event_mutex
, NULL
) != 0)
49 panic("failed to initialize mutex");
50 if (cond_init(&wp
->w_event
, NULL
) != 0)
51 panic("failed to initialize condition variable");
52 if (mthread_create(&wp
->w_tid
, &tattr
, worker_main
, (void *) wp
) != 0)
53 panic("unable to start thread");
56 /* Let all threads get ready to accept work. */
60 /*===========================================================================*
62 *===========================================================================*/
63 void worker_cleanup(void)
65 /* Clean up worker threads, reversing the actions of worker_init() such that
66 * we can safely call worker_init() again later. All worker threads are
67 * expected to be idle already. Used for live updates, because transferring
68 * the thread stacks from one version to another is currently not feasible.
70 struct worker_thread
*wp
;
73 assert(worker_idle());
75 /* First terminate all threads. */
76 for (i
= 0; i
< NR_WTHREADS
; i
++) {
79 assert(wp
->w_fp
== NULL
);
81 /* Waking up the thread with no w_fp will cause it to exit. */
87 /* Then clean up their resources. */
88 for (i
= 0; i
< NR_WTHREADS
; i
++) {
91 if (mthread_join(wp
->w_tid
, NULL
) != 0)
92 panic("worker_cleanup: could not join thread %d", i
);
93 if (cond_destroy(&wp
->w_event
) != 0)
94 panic("failed to destroy condition variable");
95 if (mutex_destroy(&wp
->w_event_mutex
) != 0)
96 panic("failed to destroy mutex");
99 /* Finally, clean up global resources. */
100 if (mthread_attr_destroy(&tattr
) != 0)
101 panic("failed to destroy attribute");
103 memset(workers
, 0, sizeof(workers
));
106 /*===========================================================================*
108 *===========================================================================*/
109 int worker_idle(void)
111 /* Return whether all worker threads are idle. */
113 return (pending
== 0 && busy
== 0);
116 /*===========================================================================*
118 *===========================================================================*/
119 static void worker_assign(struct fproc
*rfp
)
121 /* Assign the work for the given process to a free thread. The caller must
122 * ensure that there is in fact at least one free thread.
124 struct worker_thread
*worker
;
127 /* Find a free worker thread. */
128 for (i
= 0; i
< NR_WTHREADS
; i
++) {
129 worker
= &workers
[i
];
131 if (worker
->w_fp
== NULL
)
134 assert(worker
!= NULL
);
136 /* Assign work to it. */
137 rfp
->fp_worker
= worker
;
144 /*===========================================================================*
145 * worker_may_do_pending *
146 *===========================================================================*/
147 static int worker_may_do_pending(void)
149 /* Return whether there is a free thread that may do pending work. This is true
150 * only if there is pending work at all, and there is a free non-spare thread
151 * (the spare thread is never used for pending work), and VFS is currently
152 * processing new requests at all (this may not be true during initialization).
155 /* Ordered by likelihood to be false. */
156 return (pending
> 0 && worker_available() > 1 && !block_all
);
159 /*===========================================================================*
161 *===========================================================================*/
162 void worker_allow(int allow
)
164 /* Allow or disallow workers to process new work. If disallowed, any new work
165 * will be stored as pending, even when there are free worker threads. There is
166 * no facility to stop active workers. To be used only during initialization!
172 if (!worker_may_do_pending())
175 /* Assign any pending work to workers. */
176 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
177 if (rfp
->fp_flags
& FP_PENDING
) {
178 rfp
->fp_flags
&= ~FP_PENDING
; /* No longer pending */
183 if (!worker_may_do_pending())
189 /*===========================================================================*
191 *===========================================================================*/
192 static int worker_get_work(void)
194 /* Find new work to do. Work can be 'queued', 'pending', or absent. In the
195 * latter case wait for new work to come in. Return TRUE if there is work to
196 * do, or FALSE if the current thread is requested to shut down.
200 assert(self
->w_fp
== NULL
);
202 /* Is there pending work, and should we do it? */
203 if (worker_may_do_pending()) {
204 /* Find pending work */
205 for (rfp
= &fproc
[0]; rfp
< &fproc
[NR_PROCS
]; rfp
++) {
206 if (rfp
->fp_flags
& FP_PENDING
) {
208 rfp
->fp_worker
= self
;
210 rfp
->fp_flags
&= ~FP_PENDING
; /* No longer pending */
216 panic("Pending work inconsistency");
219 /* Wait for work to come to us */
222 return (self
->w_fp
!= NULL
);
225 /*===========================================================================*
227 *===========================================================================*/
228 int worker_available(void)
230 /* Return the number of threads that are available, including the spare thread.
233 return(NR_WTHREADS
- busy
);
236 /*===========================================================================*
238 *===========================================================================*/
239 static void *worker_main(void *arg
)
241 /* Worker thread main loop */
243 self
= (struct worker_thread
*) arg
;
246 while (worker_get_work()) {
249 assert(fp
->fp_worker
== self
);
251 /* Lock the process. */
254 /* The following two blocks could be run in a loop until both the
255 * conditions are no longer met, but it is currently impossible that
256 * more normal work is present after postponed PM work has been done.
259 /* Perform normal work, if any. */
260 if (fp
->fp_func
!= NULL
) {
261 self
->w_m_in
= fp
->fp_msg
;
266 fp
->fp_func
= NULL
; /* deliberately unset AFTER the call */
269 /* Perform postponed PM work, if any. */
270 if (fp
->fp_flags
& FP_PM_WORK
) {
271 self
->w_m_in
= fp
->fp_pm_msg
;
273 service_pm_postponed();
275 fp
->fp_flags
&= ~FP_PM_WORK
;
278 /* Perform cleanup actions. */
283 fp
->fp_worker
= NULL
;
292 /*===========================================================================*
294 *===========================================================================*/
295 int worker_can_start(struct fproc
*rfp
)
297 /* Return whether normal (non-PM) work can be started for the given process.
298 * This function is used to serialize invocation of "special" procedures, and
299 * not entirely safe for other cases, as explained in the comments below.
301 int is_pending
, is_active
, has_normal_work
;
303 is_pending
= (rfp
->fp_flags
& FP_PENDING
);
304 is_active
= (rfp
->fp_worker
!= NULL
);
305 has_normal_work
= (rfp
->fp_func
!= NULL
);
307 /* If there is no work scheduled for the process, we can start work. */
308 if (!is_pending
&& !is_active
) return TRUE
;
310 /* If there is already normal work scheduled for the process, we cannot add
311 * more, since we support only one normal job per process.
313 if (has_normal_work
) return FALSE
;
315 /* If this process has pending PM work but no normal work, we can add the
316 * normal work for execution before the worker will start.
318 if (is_pending
) return TRUE
;
320 /* However, if a worker is active for PM work, we cannot add normal work
321 * either, because the work will not be considered. For this reason, we can
322 * not use this function for processes that can possibly get postponed PM
323 * work. It is still safe for core system processes, though.
328 /*===========================================================================*
329 * worker_try_activate *
330 *===========================================================================*/
331 static void worker_try_activate(struct fproc
*rfp
, int use_spare
)
333 /* See if we can wake up a thread to do the work scheduled for the given
334 * process. If not, mark the process as having pending work for later.
338 /* Use the last available thread only if requested. Otherwise, leave at least
339 * one spare thread for deadlock resolution.
341 needed
= use_spare
? 1 : 2;
343 /* Also make sure that doing new work is allowed at all right now, which may
344 * not be the case during VFS initialization. We do always allow callback
345 * calls, i.e., calls that may use the spare thread. The reason is that we do
346 * not support callback calls being marked as pending, so the (entirely
347 * theoretical) exception here may (entirely theoretically) avoid deadlocks.
349 if (needed
<= worker_available() && (!block_all
|| use_spare
)) {
352 rfp
->fp_flags
|= FP_PENDING
;
357 /*===========================================================================*
359 *===========================================================================*/
360 void worker_start(struct fproc
*rfp
, void (*func
)(void), message
*m_ptr
,
363 /* Schedule work to be done by a worker thread. The work is bound to the given
364 * process. If a function pointer is given, the work is considered normal work,
365 * and the function will be called to handle it. If the function pointer is
366 * NULL, the work is considered postponed PM work, and service_pm_postponed
367 * will be called to handle it. The input message will be a copy of the given
368 * message. Optionally, the last spare (deadlock-resolving) thread may be used
369 * to execute the work immediately.
371 int is_pm_work
, is_pending
, is_active
, has_normal_work
, has_pm_work
;
375 is_pm_work
= (func
== NULL
);
376 is_pending
= (rfp
->fp_flags
& FP_PENDING
);
377 is_active
= (rfp
->fp_worker
!= NULL
);
378 has_normal_work
= (rfp
->fp_func
!= NULL
);
379 has_pm_work
= (rfp
->fp_flags
& FP_PM_WORK
);
381 /* Sanity checks. If any of these trigger, someone messed up badly! */
382 if (is_pending
|| is_active
) {
383 if (is_pending
&& is_active
)
384 panic("work cannot be both pending and active");
386 /* The process cannot make more than one call at once. */
387 if (!is_pm_work
&& has_normal_work
)
388 panic("process has two calls (%x, %x)",
389 rfp
->fp_msg
.m_type
, m_ptr
->m_type
);
391 /* PM will not send more than one job per process to us at once. */
392 if (is_pm_work
&& has_pm_work
)
393 panic("got two calls from PM (%x, %x)",
394 rfp
->fp_pm_msg
.m_type
, m_ptr
->m_type
);
396 /* Despite PM's sys_delay_stop() system, it is possible that normal
397 * work (in particular, do_pending_pipe) arrives after postponed PM
398 * work has been scheduled for execution, so we don't check for that.
401 printf("VFS: adding %s work to %s thread\n",
402 is_pm_work
? "PM" : "normal",
403 is_pending
? "pending" : "active");
406 /* Some cleanup step forgotten somewhere? */
407 if (has_normal_work
|| has_pm_work
)
408 panic("worker administration error");
411 /* Save the work to be performed. */
413 rfp
->fp_msg
= *m_ptr
;
416 rfp
->fp_pm_msg
= *m_ptr
;
417 rfp
->fp_flags
|= FP_PM_WORK
;
420 /* If we have not only added to existing work, go look for a free thread.
421 * Note that we won't be using the spare thread for normal work if there is
422 * already PM work pending, but that situation will never occur in practice.
424 if (!is_pending
&& !is_active
)
425 worker_try_activate(rfp
, use_spare
);
428 /*===========================================================================*
430 *===========================================================================*/
431 void worker_yield(void)
433 /* Yield to all worker threads. To be called from the main thread only. */
440 /*===========================================================================*
442 *===========================================================================*/
443 static void worker_sleep(void)
445 struct worker_thread
*worker
= self
;
447 if (mutex_lock(&worker
->w_event_mutex
) != 0)
448 panic("unable to lock event mutex");
449 if (cond_wait(&worker
->w_event
, &worker
->w_event_mutex
) != 0)
450 panic("could not wait on conditional variable");
451 if (mutex_unlock(&worker
->w_event_mutex
) != 0)
452 panic("unable to unlock event mutex");
456 /*===========================================================================*
458 *===========================================================================*/
459 static void worker_wake(struct worker_thread
*worker
)
461 /* Signal a worker to wake up */
463 if (mutex_lock(&worker
->w_event_mutex
) != 0)
464 panic("unable to lock event mutex");
465 if (cond_signal(&worker
->w_event
) != 0)
466 panic("unable to signal conditional variable");
467 if (mutex_unlock(&worker
->w_event_mutex
) != 0)
468 panic("unable to unlock event mutex");
471 /*===========================================================================*
473 *===========================================================================*/
474 struct worker_thread
*worker_suspend(void)
476 /* Suspend the current thread, saving certain thread variables. Return a
477 * pointer to the thread's worker structure for later resumption.
482 assert(self
->w_fp
== fp
);
483 assert(fp
->fp_worker
== self
);
485 self
->w_err_code
= err_code
;
490 /*===========================================================================*
492 *===========================================================================*/
493 void worker_resume(struct worker_thread
*org_self
)
495 /* Resume the current thread after suspension, restoring thread variables. */
504 err_code
= self
->w_err_code
;
507 /*===========================================================================*
509 *===========================================================================*/
510 void worker_wait(void)
512 /* Put the current thread to sleep until woken up by the main thread. */
514 (void) worker_suspend(); /* worker_sleep already saves and restores 'self' */
518 /* We continue here after waking up */
520 assert(self
->w_next
== NULL
);
523 /*===========================================================================*
525 *===========================================================================*/
526 void worker_signal(struct worker_thread
*worker
)
528 ASSERTW(worker
); /* Make sure we have a valid thread */
532 /*===========================================================================*
534 *===========================================================================*/
535 void worker_stop(struct worker_thread
*worker
)
537 ASSERTW(worker
); /* Make sure we have a valid thread */
538 /* This thread is communicating with a driver or file server */
539 if (worker
->w_drv_sendrec
!= NULL
) { /* Driver */
540 assert(worker
->w_task
!= NONE
);
541 worker
->w_drv_sendrec
->m_type
= EIO
;
542 worker
->w_drv_sendrec
= NULL
;
543 } else if (worker
->w_sendrec
!= NULL
) { /* FS */
544 /* worker->w_task may be NONE if the FS message was still queued */
545 worker
->w_sendrec
->m_type
= EIO
;
546 worker
->w_sendrec
= NULL
;
548 panic("reply storage consistency error"); /* Oh dear */
552 /*===========================================================================*
553 * worker_stop_by_endpt *
554 *===========================================================================*/
555 void worker_stop_by_endpt(endpoint_t proc_e
)
557 struct worker_thread
*worker
;
560 if (proc_e
== NONE
) return;
562 for (i
= 0; i
< NR_WTHREADS
; i
++) {
563 worker
= &workers
[i
];
564 if (worker
->w_fp
!= NULL
&& worker
->w_task
== proc_e
)
569 /*===========================================================================*
571 *===========================================================================*/
572 struct worker_thread
*worker_get(thread_t worker_tid
)
576 for (i
= 0; i
< NR_WTHREADS
; i
++)
577 if (workers
[i
].w_tid
== worker_tid
)
583 /*===========================================================================*
585 *===========================================================================*/
586 void worker_set_proc(struct fproc
*rfp
)
588 /* Perform an incredibly ugly action that completely violates the threading
589 * model: change the current working thread's process context to another
590 * process. The caller is expected to hold the lock to both the calling and the
591 * target process, and neither process is expected to continue regular
592 * operation when done. This code is here *only* and *strictly* for the reboot
593 * code, and *must not* be used for anything else.
596 if (fp
== rfp
) return;
598 if (rfp
->fp_worker
!= NULL
)
599 panic("worker_set_proc: target process not idle");
601 fp
->fp_worker
= NULL
;
606 fp
->fp_worker
= self
;