1 /* $NetBSD: compat_sa.c,v 1.11 2009/09/13 18:45:10 pooka Exp $ */
4 * Copyright (c) 2001, 2004, 2005, 2006 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/cdefs.h>
41 #include "opt_ktrace.h"
42 #include "opt_multiprocessor.h"
44 __KERNEL_RCSID(0, "$NetBSD: compat_sa.c,v 1.11 2009/09/13 18:45:10 pooka Exp $");
46 #include <sys/param.h>
47 #include <sys/systm.h>
51 #include <sys/types.h>
52 #include <sys/ucontext.h>
53 #include <sys/kernel.h>
55 #include <sys/mount.h>
57 #include <sys/savar.h>
58 #include <sys/syscallargs.h>
59 #include <sys/ktrace.h>
60 #include <sys/sched.h>
61 #include <sys/sleepq.h>
62 #include <sys/atomic.h> /* for membar_producer() */
64 #include <uvm/uvm_extern.h>
67 * Now handle building with SA diabled. We always compile this file,
68 * just if SA's disabled we merely build in stub routines for call
69 * entry points we still need.
74 * SA_CONCURRENCY is buggy can lead to kernel crashes.
77 #ifndef MULTIPROCESSOR
78 #error "SA_CONCURRENCY is only valid on MULTIPROCESSOR kernels"
83 * memory pool for sadata structures
85 static struct pool sadata_pool
;
88 * memory pool for pending upcalls
90 static struct pool saupcall_pool
;
93 * memory pool for sastack structs
95 static struct pool sastack_pool
;
98 * memory pool for sadata_vp structures
100 static struct pool savp_pool
;
102 static struct sadata_vp
*sa_newsavp(struct proc
*);
103 static void sa_freevp(struct proc
*, struct sadata
*, struct sadata_vp
*);
104 static inline int sa_stackused(struct sastack
*, struct sadata
*);
105 static inline void sa_setstackfree(struct sastack
*, struct sadata
*);
106 static struct sastack
*sa_getstack(struct sadata
*);
107 static inline struct sastack
*sa_getstack0(struct sadata
*);
108 static inline int sast_compare(struct sastack
*, struct sastack
*);
109 #ifdef SA_CONCURRENCY
110 static int sa_increaseconcurrency(struct lwp
*, int);
112 static void sa_switchcall(void *);
113 static void sa_neverrun(void *);
114 static int sa_newcachelwp(struct lwp
*, struct sadata_vp
*);
115 static void sa_makeupcalls(struct lwp
*, struct sadata_upcall
*);
117 static inline int sa_pagefault(struct lwp
*, ucontext_t
*);
119 static void sa_upcall0(struct sadata_upcall
*, int, struct lwp
*, struct lwp
*,
120 size_t, void *, void (*)(void *));
121 static void sa_upcall_getstate(union sau_state
*, struct lwp
*, int);
123 void sa_putcachelwp(struct proc
*, struct lwp
*);
124 struct lwp
*sa_getcachelwp(struct proc
*, struct sadata_vp
*);
125 static void sa_setrunning(struct lwp
*);
130 #define DPRINTF(x) do { if (sadebug) printf_nolog x; } while (0)
131 #define DPRINTFN(n,x) do { if (sadebug & (1<<(n-1))) printf_nolog x; } while (0)
135 #define DPRINTFN(n,x)
138 static syncobj_t sa_sobj
= {
146 static const char *sa_lwpcache_wmesg
= "lwpcache";
147 static const char *sa_lwpwoken_wmesg
= "lwpublk";
149 #define SA_LWP_STATE_LOCK(l, f) do { \
150 (f) = ~(l)->l_pflag & LP_SA_NOBLOCK; \
151 (l)->l_pflag |= LP_SA_NOBLOCK; \
152 } while (/*CONSTCOND*/ 0)
154 #define SA_LWP_STATE_UNLOCK(l, f) do { \
155 (l)->l_pflag ^= (f); \
156 } while (/*CONSTCOND*/ 0)
158 RB_PROTOTYPE(sasttree
, sastack
, sast_node
, sast_compare
);
159 RB_GENERATE(sasttree
, sastack
, sast_node
, sast_compare
);
161 kmutex_t saupcall_mutex
;
162 SIMPLEQ_HEAD(, sadata_upcall
) saupcall_freelist
;
168 pool_init(&sadata_pool
, sizeof(struct sadata
), 0, 0, 0, "sadatapl",
169 &pool_allocator_nointr
, IPL_NONE
);
170 pool_init(&saupcall_pool
, sizeof(struct sadata_upcall
), 0, 0, 0,
171 "saupcpl", &pool_allocator_nointr
, IPL_NONE
);
172 pool_init(&sastack_pool
, sizeof(struct sastack
), 0, 0, 0, "sastackpl",
173 &pool_allocator_nointr
, IPL_NONE
);
174 pool_init(&savp_pool
, sizeof(struct sadata_vp
), 0, 0, 0, "savppl",
175 &pool_allocator_nointr
, IPL_NONE
);
180 * permit other parts of the kernel to make SA_LWP_STATE_{UN,}LOCK calls.
183 sa_critpath_enter(struct lwp
*l1
, sa_critpath_t
*f1
)
185 SA_LWP_STATE_LOCK(l1
, *f1
);
188 sa_critpath_exit(struct lwp
*l1
, sa_critpath_t
*f1
)
190 SA_LWP_STATE_UNLOCK(l1
, *f1
);
195 * sadata_upcall_alloc:
197 * Allocate an sadata_upcall structure.
199 struct sadata_upcall
*
200 sadata_upcall_alloc(int waitok
)
202 struct sadata_upcall
*sau
;
205 if (waitok
&& !SIMPLEQ_EMPTY(&saupcall_freelist
)) {
206 mutex_enter(&saupcall_mutex
);
207 if ((sau
= SIMPLEQ_FIRST(&saupcall_freelist
)) != NULL
)
208 SIMPLEQ_REMOVE_HEAD(&saupcall_freelist
, sau_next
);
209 mutex_exit(&saupcall_mutex
);
210 if (sau
!= NULL
&& sau
->sau_arg
!= NULL
)
211 (*sau
->sau_argfreefunc
)(sau
->sau_arg
);
215 sau
= pool_get(&saupcall_pool
, waitok
? PR_WAITOK
: PR_NOWAIT
);
223 * sadata_upcall_free:
225 * Free an sadata_upcall structure and any associated argument data.
228 sadata_upcall_free(struct sadata_upcall
*sau
)
234 * If our current synchronisation object is a sleep queue or
235 * similar, we must not put the object back to the pool as
236 * doing to could acquire sleep locks. That could trigger
239 if (curlwp
->l_syncobj
== &sched_syncobj
) {
241 (*sau
->sau_argfreefunc
)(sau
->sau_arg
);
242 pool_put(&saupcall_pool
, sau
);
243 sadata_upcall_drain();
245 mutex_enter(&saupcall_mutex
);
246 SIMPLEQ_INSERT_HEAD(&saupcall_freelist
, sau
, sau_next
);
247 mutex_exit(&saupcall_mutex
);
252 * sadata_upcall_drain:
254 * Put freed upcall structures back to the pool.
257 sadata_upcall_drain(void)
259 struct sadata_upcall
*sau
;
261 sau
= SIMPLEQ_FIRST(&saupcall_freelist
);
262 while (sau
!= NULL
) {
263 mutex_enter(&saupcall_mutex
);
264 if ((sau
= SIMPLEQ_FIRST(&saupcall_freelist
)) != NULL
)
265 SIMPLEQ_REMOVE_HEAD(&saupcall_freelist
, sau_next
);
266 mutex_exit(&saupcall_mutex
);
267 if (sau
!= NULL
) /* XXX sau_arg free needs a call! */
268 pool_put(&saupcall_pool
, sau
);
275 * Allocate a new virtual processor structure, do some simple
276 * initialization and add it to the passed-in sa. Pre-allocate
277 * an upcall event data structure for when the main thread on
280 * We lock ??? while manipulating the list of vp's.
282 * We allocate the lwp to run on this separately. In the case of the
283 * first lwp/vp for a process, the lwp already exists. It's the
284 * main (only) lwp of the process.
286 static struct sadata_vp
*
287 sa_newsavp(struct proc
*p
)
289 struct sadata
*sa
= p
->p_sa
;
290 struct sadata_vp
*vp
, *qvp
;
291 struct sadata_upcall
*sau
;
293 /* Allocate virtual processor data structure */
294 vp
= pool_get(&savp_pool
, PR_WAITOK
);
295 /* And preallocate an upcall data structure for sleeping */
296 sau
= sadata_upcall_alloc(1);
298 memset(vp
, 0, sizeof(*vp
));
299 /* Lock has to be IPL_SCHED, since we use it in the
300 * hooks from the scheduler code */
302 vp
->savp_faultaddr
= 0;
303 vp
->savp_ofaultaddr
= 0;
304 vp
->savp_woken_count
= 0;
305 vp
->savp_lwpcache_count
= 0;
307 vp
->savp_sleeper_upcall
= sau
;
308 mutex_init(&vp
->savp_mutex
, MUTEX_DEFAULT
, IPL_SCHED
);
309 sleepq_init(&vp
->savp_lwpcache
);
310 sleepq_init(&vp
->savp_woken
);
311 SIMPLEQ_INIT(&vp
->savp_upcalls
);
313 /* We're writing sa_savps, so lock both locks */
314 mutex_enter(p
->p_lock
);
315 mutex_enter(&sa
->sa_mutex
);
316 /* find first free savp_id and add vp to sorted slist */
317 if (SLIST_EMPTY(&sa
->sa_vps
) ||
318 SLIST_FIRST(&sa
->sa_vps
)->savp_id
!= 0) {
320 SLIST_INSERT_HEAD(&sa
->sa_vps
, vp
, savp_next
);
322 SLIST_FOREACH(qvp
, &sa
->sa_vps
, savp_next
) {
323 if (SLIST_NEXT(qvp
, savp_next
) == NULL
||
324 SLIST_NEXT(qvp
, savp_next
)->savp_id
!=
328 vp
->savp_id
= qvp
->savp_id
+ 1;
329 SLIST_INSERT_AFTER(qvp
, vp
, savp_next
);
331 mutex_exit(&sa
->sa_mutex
);
332 mutex_exit(p
->p_lock
);
334 DPRINTFN(1, ("sa_newsavp(%d) allocated vp %p\n", p
->p_pid
, vp
));
342 * Deallocate a vp. Must be called with no locks held.
343 * Will lock and unlock p_lock.
346 sa_freevp(struct proc
*p
, struct sadata
*sa
, struct sadata_vp
*vp
)
348 DPRINTFN(1, ("sa_freevp(%d) freeing vp %p\n", p
->p_pid
, vp
));
350 mutex_enter(p
->p_lock
);
352 DPRINTFN(1, ("sa_freevp(%d) about to unlink in vp %p\n", p
->p_pid
, vp
));
353 SLIST_REMOVE(&sa
->sa_vps
, vp
, sadata_vp
, savp_next
);
354 DPRINTFN(1, ("sa_freevp(%d) done unlink in vp %p\n", p
->p_pid
, vp
));
356 if (vp
->savp_sleeper_upcall
) {
357 sadata_upcall_free(vp
->savp_sleeper_upcall
);
358 vp
->savp_sleeper_upcall
= NULL
;
360 DPRINTFN(1, ("sa_freevp(%d) about to mut_det in vp %p\n", p
->p_pid
, vp
));
362 mutex_destroy(&vp
->savp_mutex
);
364 mutex_exit(p
->p_lock
);
366 pool_put(&savp_pool
, vp
);
372 int sa_system_disabled
= 1;
376 * Handle copyin and copyout of info for registering the
377 * upcall handler address.
380 sys_sa_register(struct lwp
*l
, const struct sys_sa_register_args
*uap
,
386 error
= dosa_register(l
, SCARG(uap
, new), &prev
, SCARG(uap
, flags
),
387 SCARG(uap
, stackinfo_offset
));
392 return copyout(&prev
, SCARG(uap
, old
),
400 * Change the upcall address for the process. If needed, allocate
401 * an sadata structure (and initialize it) for the process. If initializing,
402 * set the flags in the sadata structure to those passed in. Flags will
403 * be ignored if the sadata structure already exists (dosa_regiister was
406 * Note: changing the upcall handler address for a process that has
407 * concurrency greater than one can yield ambiguous results. The one
408 * guarantee we can offer is that any upcalls generated on all CPUs
409 * after this routine finishes will use the new upcall handler. Note
410 * that any upcalls delivered upon return to user level by the
411 * sys_sa_register() system call that called this routine will use the
412 * new upcall handler. Note that any such upcalls will be delivered
413 * before the old upcall handling address has been returned to
417 dosa_register(struct lwp
*l
, sa_upcall_t
new, sa_upcall_t
*prev
, int flags
,
418 ssize_t stackinfo_offset
)
420 struct proc
*p
= l
->l_proc
;
423 if (sa_system_disabled
)
426 if (p
->p_sa
== NULL
) {
427 /* Allocate scheduler activations data structure */
428 sa
= pool_get(&sadata_pool
, PR_WAITOK
);
429 memset(sa
, 0, sizeof(*sa
));
431 /* WRS: not sure if need SCHED. need to audit lockers */
432 mutex_init(&sa
->sa_mutex
, MUTEX_DEFAULT
, IPL_SCHED
);
433 mutex_enter(p
->p_lock
);
434 if ((p
->p_sflag
& PS_NOSA
) != 0) {
435 mutex_exit(p
->p_lock
);
436 mutex_destroy(&sa
->sa_mutex
);
437 pool_put(&sadata_pool
, sa
);
442 sa
->sa_flag
= flags
& SA_FLAG_ALL
;
443 sa
->sa_maxconcurrency
= 1;
444 sa
->sa_concurrency
= 1;
445 RB_INIT(&sa
->sa_stackstree
);
446 sa
->sa_stacknext
= NULL
;
447 if (flags
& SA_FLAG_STACKINFO
)
448 sa
->sa_stackinfo_offset
= stackinfo_offset
;
450 sa
->sa_stackinfo_offset
= 0;
452 sigemptyset(&sa
->sa_sigmask
);
453 sigplusset(&l
->l_sigmask
, &sa
->sa_sigmask
);
454 sigemptyset(&l
->l_sigmask
);
455 SLIST_INIT(&sa
->sa_vps
);
456 cv_init(&sa
->sa_cv
, "sawait");
459 KASSERT(l
->l_savp
== NULL
);
460 mutex_exit(p
->p_lock
);
462 if (l
->l_savp
== NULL
) { /* XXXSMP */
463 l
->l_savp
= sa_newsavp(p
);
464 sa_newcachelwp(l
, NULL
);
467 *prev
= p
->p_sa
->sa_upcall
;
468 p
->p_sa
->sa_upcall
= new;
474 sa_release(struct proc
*p
)
477 struct sastack
*sast
, *next
;
478 struct sadata_vp
*vp
;
483 KASSERT(p
->p_nlwps
<= 1);
485 for (sast
= RB_MIN(sasttree
, &sa
->sa_stackstree
); sast
!= NULL
;
487 next
= RB_NEXT(sasttree
, &sa
->sa_stackstree
, sast
);
488 RB_REMOVE(sasttree
, &sa
->sa_stackstree
, sast
);
489 pool_put(&sastack_pool
, sast
);
492 mutex_enter(p
->p_lock
);
493 p
->p_sflag
= (p
->p_sflag
& ~PS_SA
) | PS_NOSA
;
495 l
= LIST_FIRST(&p
->p_lwps
);
498 KASSERT(LIST_NEXT(l
, l_sibling
) == NULL
);
502 mutex_exit(p
->p_lock
);
504 while ((vp
= SLIST_FIRST(&sa
->sa_vps
)) != NULL
) {
505 sa_freevp(p
, sa
, vp
);
508 DPRINTFN(1, ("sa_release(%d) done vps\n", p
->p_pid
));
510 mutex_destroy(&sa
->sa_mutex
);
511 cv_destroy(&sa
->sa_cv
);
512 pool_put(&sadata_pool
, sa
);
514 DPRINTFN(1, ("sa_release(%d) put sa\n", p
->p_pid
));
516 mutex_enter(p
->p_lock
);
517 p
->p_sflag
&= ~PS_NOSA
;
518 mutex_exit(p
->p_lock
);
524 * copyin the generation number for the stack in question.
526 * WRS: I think this routine needs the SA_LWP_STATE_LOCK() dance, either
527 * here or in its caller.
529 * Must be called with sa_mutex locked.
532 sa_fetchstackgen(struct sastack
*sast
, struct sadata
*sa
, unsigned int *gen
)
536 /* COMPAT_NETBSD32: believe it or not, but the following is ok */
537 mutex_exit(&sa
->sa_mutex
);
538 error
= copyin(&((struct sa_stackinfo_t
*)
539 ((char *)sast
->sast_stack
.ss_sp
+
540 sa
->sa_stackinfo_offset
))->sasi_stackgen
, gen
, sizeof(*gen
));
541 mutex_enter(&sa
->sa_mutex
);
549 * Convenience routine to determine if a given stack has been used
550 * or not. We consider a stack to be unused if the kernel's concept
551 * of its generation number matches that of userland.
552 * We kill the application with SIGILL if there is an error copying
553 * in the userland generation number.
556 sa_stackused(struct sastack
*sast
, struct sadata
*sa
)
560 KASSERT(mutex_owned(&sa
->sa_mutex
));
562 if (sa_fetchstackgen(sast
, sa
, &gen
)) {
563 sigexit(curlwp
, SIGILL
);
566 return (sast
->sast_gen
!= gen
);
572 * Convenience routine to mark a stack as unused in the kernel's
573 * eyes. We do this by setting the kernel's generation number for the stack
574 * to that of userland.
575 * We kill the application with SIGILL if there is an error copying
576 * in the userland generation number.
579 sa_setstackfree(struct sastack
*sast
, struct sadata
*sa
)
583 KASSERT(mutex_owned(&sa
->sa_mutex
));
585 if (sa_fetchstackgen(sast
, sa
, &gen
)) {
586 sigexit(curlwp
, SIGILL
);
589 sast
->sast_gen
= gen
;
595 * Find next free stack, starting at sa->sa_stacknext. Must be called
596 * with sa->sa_mutex held, and will release while checking for stack
599 * Caller should have set LP_SA_NOBLOCK for our thread. This is not the time
600 * to go generating upcalls as we aren't in a position to deliver another one.
602 static struct sastack
*
603 sa_getstack(struct sadata
*sa
)
605 struct sastack
*sast
;
608 KASSERT(mutex_owned(&sa
->sa_mutex
));
611 chg
= sa
->sa_stackchg
;
612 sast
= sa
->sa_stacknext
;
613 if (sast
== NULL
|| sa_stackused(sast
, sa
))
614 sast
= sa_getstack0(sa
);
615 } while (chg
!= sa
->sa_stackchg
);
627 * sa_getstack0 -- get the lowest numbered sa stack
629 * We walk the splay tree in order and find the lowest-numbered
630 * (as defined by SPLAY_MIN() and SPLAY_NEXT() ordering) stack that
633 static inline struct sastack
*
634 sa_getstack0(struct sadata
*sa
)
636 struct sastack
*start
;
639 KASSERT(mutex_owned(&sa
->sa_mutex
));
642 chg
= sa
->sa_stackchg
;
643 if (sa
->sa_stacknext
== NULL
) {
644 sa
->sa_stacknext
= RB_MIN(sasttree
, &sa
->sa_stackstree
);
645 if (sa
->sa_stacknext
== NULL
)
648 start
= sa
->sa_stacknext
;
650 while (sa_stackused(sa
->sa_stacknext
, sa
)) {
651 if (sa
->sa_stackchg
!= chg
)
653 sa
->sa_stacknext
= RB_NEXT(sasttree
, &sa
->sa_stackstree
,
655 if (sa
->sa_stacknext
== NULL
)
656 sa
->sa_stacknext
= RB_MIN(sasttree
,
658 if (sa
->sa_stacknext
== start
)
661 return sa
->sa_stacknext
;
665 * sast_compare - compare two sastacks
667 * We sort stacks according to their userspace addresses.
668 * Stacks are "equal" if their start + size overlap.
671 sast_compare(struct sastack
*a
, struct sastack
*b
)
674 if ((vaddr_t
)a
->sast_stack
.ss_sp
+ a
->sast_stack
.ss_size
<=
675 (vaddr_t
)b
->sast_stack
.ss_sp
)
677 if ((vaddr_t
)a
->sast_stack
.ss_sp
>=
678 (vaddr_t
)b
->sast_stack
.ss_sp
+ b
->sast_stack
.ss_size
)
684 * sa_copyin_stack -- copyin a stack.
687 sa_copyin_stack(stack_t
*stacks
, int index
, stack_t
*dest
)
689 return copyin(stacks
+ index
, dest
, sizeof(stack_t
));
693 * sys_sa_stacks -- the user level threading library is passing us stacks
695 * We copy in some arguments then call sa_stacks1() to do the main
696 * work. NETBSD32 has its own front-end for this call.
699 sys_sa_stacks(struct lwp
*l
, const struct sys_sa_stacks_args
*uap
,
702 return sa_stacks1(l
, retval
, SCARG(uap
, num
), SCARG(uap
, stacks
),
708 * Process stacks passed-in by the user threading library. At
709 * present we use the kernel lock to lock the SPLAY tree, which we
710 * manipulate to load in the stacks.
712 * It is an error to pass in a stack that we already know about
713 * and which hasn't been used. Passing in a known-but-used one is fine.
714 * We accept up to SA_MAXNUMSTACKS per desired vp (concurrency level).
717 sa_stacks1(struct lwp
*l
, register_t
*retval
, int num
, stack_t
*stacks
,
718 sa_copyin_stack_t do_sa_copyin_stack
)
720 struct sadata
*sa
= l
->l_proc
->p_sa
;
721 struct sastack
*sast
, *new;
722 int count
, error
, f
, i
, chg
;
724 /* We have to be using scheduler activations */
732 SA_LWP_STATE_LOCK(l
, f
);
736 for (i
= 0; i
< count
; i
++) {
737 new = pool_get(&sastack_pool
, PR_WAITOK
);
738 error
= do_sa_copyin_stack(stacks
, i
, &new->sast_stack
);
743 mutex_enter(&sa
->sa_mutex
);
745 chg
= sa
->sa_stackchg
;
746 sa_setstackfree(new, sa
);
747 sast
= RB_FIND(sasttree
, &sa
->sa_stackstree
, new);
749 DPRINTFN(9, ("sa_stacks(%d.%d) returning stack %p\n",
750 l
->l_proc
->p_pid
, l
->l_lid
,
751 new->sast_stack
.ss_sp
));
752 if (sa_stackused(sast
, sa
) == 0) {
755 mutex_exit(&sa
->sa_mutex
);
756 pool_put(&sastack_pool
, new);
759 if (chg
!= sa
->sa_stackchg
)
761 } else if (sa
->sa_nstacks
>=
762 SA_MAXNUMSTACKS
* sa
->sa_concurrency
) {
764 ("sa_stacks(%d.%d) already using %d stacks\n",
765 l
->l_proc
->p_pid
, l
->l_lid
,
766 SA_MAXNUMSTACKS
* sa
->sa_concurrency
));
769 mutex_exit(&sa
->sa_mutex
);
770 pool_put(&sastack_pool
, new);
773 DPRINTFN(9, ("sa_stacks(%d.%d) adding stack %p\n",
774 l
->l_proc
->p_pid
, l
->l_lid
,
775 new->sast_stack
.ss_sp
));
776 RB_INSERT(sasttree
, &sa
->sa_stackstree
, new);
780 mutex_exit(&sa
->sa_mutex
);
783 SA_LWP_STATE_UNLOCK(l
, f
);
791 * sys_sa_enable - throw the switch & enable SA
793 * Fairly simple. Make sure the sadata and vp've been set up for this
794 * process, assign this thread to the vp and initiate the first upcall
795 * (SA_UPCALL_NEWPROC).
798 sys_sa_enable(struct lwp
*l
, const void *v
, register_t
*retval
)
800 struct proc
*p
= l
->l_proc
;
801 struct sadata
*sa
= p
->p_sa
;
802 struct sadata_vp
*vp
= l
->l_savp
;
805 DPRINTF(("sys_sa_enable(%d.%d)\n", l
->l_proc
->p_pid
,
808 /* We have to be using scheduler activations */
809 if (sa
== NULL
|| vp
== NULL
)
812 if (p
->p_sflag
& PS_SA
) /* Already running! */
815 error
= sa_upcall(l
, SA_UPCALL_NEWPROC
, l
, NULL
, 0, NULL
, NULL
);
819 /* Assign this LWP to the virtual processor */
820 mutex_enter(p
->p_lock
);
824 l
->l_flag
|= LW_SA
; /* We are now an activation LWP */
826 mutex_exit(p
->p_lock
);
829 * This will return to the SA handler previously registered.
836 * sa_increaseconcurrency
837 * Raise the process's maximum concurrency level to the
838 * requested level. Does nothing if the current maximum councurrency
839 * is greater than the requested.
840 * Must be called with sa_mutex locked. Will unlock and relock as
841 * needed, and will lock p_lock. Will exit with sa_mutex locked.
843 #ifdef SA_CONCURRENCY
846 sa_increaseconcurrency(struct lwp
*l
, int concurrency
)
851 struct sadata_vp
*vp
;
852 struct sadata_upcall
*sau
;
853 int addedconcurrency
, error
;
858 KASSERT(mutex_owned(&sa
->sa_mutex
));
860 addedconcurrency
= 0;
861 while (sa
->sa_maxconcurrency
< concurrency
) {
862 sa
->sa_maxconcurrency
++;
863 sa
->sa_concurrency
++;
864 mutex_exit(&sa
->sa_mutex
);
867 error
= sa_newcachelwp(l
, vp
);
869 /* reset concurrency */
870 mutex_enter(&sa
->sa_mutex
);
871 sa
->sa_maxconcurrency
--;
872 sa
->sa_concurrency
--;
873 return (addedconcurrency
);
875 mutex_enter(&vp
->savp_mutex
);
876 l2
= sa_getcachelwp(p
, vp
);
879 sau
= vp
->savp_sleeper_upcall
;
880 vp
->savp_sleeper_upcall
= NULL
;
881 KASSERT(sau
!= NULL
);
883 cpu_setfunc(l2
, sa_switchcall
, sau
);
884 sa_upcall0(sau
, SA_UPCALL_NEWPROC
, NULL
, NULL
,
888 /* put l2 into l's VP LWP cache */
889 mutex_exit(&vp
->savp_mutex
);
891 l2
->l_savp
= l
->l_savp
;
892 cpu_setfunc(l2
, sa_neverrun
, NULL
);
894 mutex_enter(&l
->l_savp
->savp_mutex
);
895 sa_putcachelwp(p
, l2
);
896 mutex_exit(&l
->l_savp
->savp_mutex
);
899 sa_freevp(p
, sa
, vp
);
901 /* reset concurrency */
902 mutex_enter(&sa
->sa_mutex
);
903 sa
->sa_maxconcurrency
--;
904 sa
->sa_concurrency
--;
905 return (addedconcurrency
);
907 /* Run the LWP, locked since its mutex is still savp_mutex */
909 mutex_exit(&vp
->savp_mutex
);
911 mutex_enter(&sa
->sa_mutex
);
915 return (addedconcurrency
);
920 * sys_sa_setconcurrency
921 * The user threading library wants to increase the number
922 * of active virtual CPUS we assign to it. We return the number of virt
923 * CPUs we assigned to the process. We limit concurrency to the number
924 * of CPUs in the system.
926 * WRS: at present, this system call serves two purposes. The first is
927 * for an application to indicate that it wants a certain concurrency
928 * level. The second is for the application to request that the kernel
929 * reactivate previously allocated virtual CPUs.
932 sys_sa_setconcurrency(struct lwp
*l
, const struct sys_sa_setconcurrency_args
*uap
,
935 struct proc
*p
= l
->l_proc
;
936 struct sadata
*sa
= p
->p_sa
;
937 #ifdef SA_CONCURRENCY
938 struct sadata_vp
*vp
= l
->l_savp
;
942 CPU_INFO_ITERATOR cii
;
945 DPRINTFN(11,("sys_sa_concurrency(%d.%d)\n", p
->p_pid
,
948 /* We have to be using scheduler activations */
952 if ((p
->p_sflag
& PS_SA
) == 0)
955 if (SCARG(uap
, concurrency
) < 1)
960 * Concurrency greater than the number of physical CPUs does
962 * XXX Should we ever support hot-plug CPUs, this will need
965 #ifdef SA_CONCURRENCY
966 mutex_enter(&sa
->sa_mutex
);
968 if (SCARG(uap
, concurrency
) > sa
->sa_maxconcurrency
) {
970 for (CPU_INFO_FOREACH(cii
, ci
))
972 *retval
+= sa_increaseconcurrency(l
,
973 min(SCARG(uap
, concurrency
), ncpus
));
977 DPRINTFN(11,("sys_sa_concurrency(%d.%d) want %d, have %d, max %d\n",
978 p
->p_pid
, l
->l_lid
, SCARG(uap
, concurrency
),
979 sa
->sa_concurrency
, sa
->sa_maxconcurrency
));
980 #ifdef SA_CONCURRENCY
981 if (SCARG(uap
, concurrency
) <= sa
->sa_concurrency
) {
982 mutex_exit(&sa
->sa_mutex
);
985 SLIST_FOREACH(vp
, &sa
->sa_vps
, savp_next
) {
988 if (l2
->l_flag
& LW_SA_IDLE
) {
989 l2
->l_flag
&= ~(LW_SA_IDLE
|LW_SA_YIELD
|LW_SINTR
);
991 DPRINTFN(11,("sys_sa_concurrency(%d.%d) NEWPROC vp %d\n",
992 p
->p_pid
, l
->l_lid
, vp
->savp_id
));
993 sa
->sa_concurrency
++;
994 mutex_exit(&sa
->sa_mutex
);
995 /* error = */ sa_upcall(l2
, SA_UPCALL_NEWPROC
, NULL
,
996 NULL
, 0, NULL
, NULL
);
998 /* lwp_unsleep() will unlock the LWP */
999 lwp_unsleep(vp
->savp_lwp
, true);
1000 KASSERT((l2
->l_flag
& LW_SINTR
) == 0);
1002 mutex_enter(&sa
->sa_mutex
);
1005 if (sa
->sa_concurrency
== SCARG(uap
, concurrency
))
1008 mutex_exit(&sa
->sa_mutex
);
1015 * application has nothing for this lwp to do, so let it linger in
1019 sys_sa_yield(struct lwp
*l
, const void *v
, register_t
*retval
)
1021 struct proc
*p
= l
->l_proc
;
1023 mutex_enter(p
->p_lock
);
1024 if (p
->p_sa
== NULL
|| !(p
->p_sflag
& PS_SA
)) {
1025 mutex_exit(p
->p_lock
);
1027 ("sys_sa_yield(%d.%d) proc %p not SA (p_sa %p, flag %s)\n",
1028 p
->p_pid
, l
->l_lid
, p
, p
->p_sa
,
1029 p
->p_sflag
& PS_SA
? "T" : "F"));
1033 mutex_exit(p
->p_lock
);
1037 return (EJUSTRETURN
);
1042 * This lwp has nothing to do, so hang around. Assuming we
1043 * are the lwp "on" our vp, sleep in "sawait" until there's something
1046 * Unfortunately some subsystems can't directly tell us if there's an
1047 * upcall going to happen when we get worken up. Work gets deferred to
1048 * userret() and that work may trigger an upcall. So we have to try
1049 * calling userret() (by calling upcallret()) and see if makeupcalls()
1050 * delivered an upcall. It will clear LW_SA_YIELD if it did.
1053 sa_yield(struct lwp
*l
)
1055 struct proc
*p
= l
->l_proc
;
1056 struct sadata
*sa
= p
->p_sa
;
1057 struct sadata_vp
*vp
= l
->l_savp
;
1062 if (vp
->savp_lwp
!= l
) {
1066 * We lost the VP on our way here, this happens for
1067 * instance when we sleep in systrace. This will end
1068 * in an SA_UNBLOCKED_UPCALL in sa_unblock_userret().
1070 DPRINTFN(2,("sa_yield(%d.%d) lost VP\n",
1071 p
->p_pid
, l
->l_lid
));
1072 KASSERT(l
->l_flag
& LW_SA_BLOCKING
);
1077 * If we're the last running LWP, stick around to receive
1080 KASSERT((l
->l_flag
& LW_SA_YIELD
) == 0);
1081 DPRINTFN(2,("sa_yield(%d.%d) going dormant\n",
1082 p
->p_pid
, l
->l_lid
));
1084 * A signal will probably wake us up. Worst case, the upcall
1085 * happens and just causes the process to yield again.
1087 KASSERT(vp
->savp_lwp
== l
);
1090 * If we were told to make an upcall or exit already
1091 * make sure we process it (by returning and letting userret() do
1092 * the right thing). Otherwise set LW_SA_YIELD and go to sleep.
1095 if (l
->l_flag
& LW_SA_UPCALL
) {
1099 l
->l_flag
|= LW_SA_YIELD
;
1103 DPRINTFN(2,("sa_yield(%d.%d) really going dormant\n",
1104 p
->p_pid
, l
->l_lid
));
1106 mutex_enter(&sa
->sa_mutex
);
1107 sa
->sa_concurrency
--;
1108 ret
= cv_wait_sig(&sa
->sa_cv
, &sa
->sa_mutex
);
1109 sa
->sa_concurrency
++;
1110 mutex_exit(&sa
->sa_mutex
);
1111 DPRINTFN(2,("sa_yield(%d.%d) woke\n",
1112 p
->p_pid
, l
->l_lid
));
1114 KASSERT(vp
->savp_lwp
== l
|| p
->p_sflag
& PS_WEXIT
);
1117 * We get woken in two different ways. Most code
1118 * calls setrunnable() which clears LW_SA_IDLE,
1119 * but leaves LW_SA_YIELD. Some call points
1120 * (in this file) however also clear LW_SA_YIELD, mainly
1121 * as the code knows there is an upcall to be delivered.
1123 * As noted above, except in the cases where other code
1124 * in this file cleared LW_SA_YIELD already, we have to
1125 * try calling upcallret() & seeing if upcalls happen.
1126 * if so, tell userret() NOT to deliver more upcalls on
1129 if (l
->l_flag
& LW_SA_YIELD
) {
1131 if (~l
->l_flag
& LW_SA_YIELD
) {
1133 * Ok, we made an upcall. We will exit. Tell
1134 * sa_upcall_userret() to NOT make any more
1137 vp
->savp_pflags
|= SAVP_FLAG_NOUPCALLS
;
1139 * Now force us to call into sa_upcall_userret()
1140 * which will clear SAVP_FLAG_NOUPCALLS
1143 l
->l_flag
|= LW_SA_UPCALL
;
1149 } while (l
->l_flag
& LW_SA_YIELD
);
1151 DPRINTFN(2,("sa_yield(%d.%d) returned, ret %d\n",
1152 p
->p_pid
, l
->l_lid
, ret
));
1159 * sys_sa_preempt - preempt a running thread
1161 * Given an lwp id, send it a user upcall. This is a way for libpthread to
1162 * kick something into the upcall handler.
1165 sys_sa_preempt(struct lwp
*l
, const struct sys_sa_preempt_args
*uap
,
1170 struct proc
*p
= l
->l_proc
;
1171 struct sadata
*sa
= p
->p_sa
;
1175 DPRINTFN(11,("sys_sa_preempt(%d.%d)\n", l
->l_proc
->p_pid
,
1178 /* We have to be using scheduler activations */
1182 if ((p
->p_sflag
& PS_SA
) == 0)
1185 if ((target
= SCARG(uap
, sa_id
)) < 1)
1188 mutex_enter(p
->p_lock
);
1190 LIST_FOREACH(t
, &l
->l_proc
->p_lwps
, l_sibling
)
1191 if (t
->l_lid
== target
)
1199 /* XXX WRS We really need all of this locking documented */
1200 mutex_exit(p
->p_lock
);
1202 error
= sa_upcall(l
, SA_UPCALL_USER
| SA_UPCALL_DEFER_EVENT
, l
, NULL
,
1210 mutex_exit(p
->p_lock
);
1214 /* Just return an error */
1215 return (sys_nosys(l
, (const void *)uap
, retval
));
1220 /* XXX Hm, naming collision. */
1222 * sa_preempt(). In the 4.0 code, this routine is called when we
1223 * are in preempt() and the caller informed us it does NOT
1224 * have more work to do (it's going to userland after we return).
1225 * If mi_switch() tells us we switched to another thread, we
1226 * generate a BLOCKED upcall. Since we are returning to userland
1227 * we then will immediately generate an UNBLOCKED upcall as well.
1228 * The only place that actually didn't tell preempt() that
1229 * we had more to do was sys_sched_yield() (well, midi did too, but
1232 * For simplicitly, in 5.0+ code, just call this routine in
1233 * sys_sched_yield after we preempt(). The BLOCKED/UNBLOCKED
1234 * upcall sequence will get delivered when we return to userland
1235 * and will ensure that the SA scheduler has an opportunity to
1236 * effectively preempt the thread that was running in userland.
1238 * Of course, it would be simpler for libpthread to just intercept
1239 * this call, but we do this to ensure binary compatability. Plus
1240 * it's not hard to do.
1242 * We are called and return with no locks held.
1245 sa_preempt(struct lwp
*l
)
1247 struct proc
*p
= l
->l_proc
;
1248 struct sadata
*sa
= p
->p_sa
;
1251 * Defer saving the lwp's state because on some ports
1252 * preemption can occur between generating an unblocked upcall
1253 * and processing the upcall queue.
1255 if (sa
->sa_flag
& SA_FLAG_PREEMPT
)
1256 sa_upcall(l
, SA_UPCALL_PREEMPTED
| SA_UPCALL_DEFER_EVENT
,
1257 l
, NULL
, 0, NULL
, NULL
);
1262 * Set up the user-level stack and trapframe to do an upcall.
1264 * NOTE: This routine WILL FREE "arg" in the case of failure! Callers
1265 * should not touch the "arg" pointer once calling sa_upcall().
1268 sa_upcall(struct lwp
*l
, int type
, struct lwp
*event
, struct lwp
*interrupted
,
1269 size_t argsize
, void *arg
, void (*func
)(void *))
1271 struct sadata_upcall
*sau
;
1272 struct sadata
*sa
= l
->l_proc
->p_sa
;
1273 struct sadata_vp
*vp
= l
->l_savp
;
1274 struct sastack
*sast
;
1277 KASSERT((type
& (SA_UPCALL_LOCKED_EVENT
| SA_UPCALL_LOCKED_INTERRUPTED
))
1280 /* XXX prevent recursive upcalls if we sleep for memory */
1281 SA_LWP_STATE_LOCK(curlwp
, f
);
1282 sau
= sadata_upcall_alloc(1);
1283 mutex_enter(&sa
->sa_mutex
);
1284 sast
= sa_getstack(sa
);
1285 mutex_exit(&sa
->sa_mutex
);
1286 SA_LWP_STATE_UNLOCK(curlwp
, f
);
1288 if (sau
== NULL
|| sast
== NULL
) {
1290 mutex_enter(&sa
->sa_mutex
);
1291 sa_setstackfree(sast
, sa
);
1292 mutex_exit(&sa
->sa_mutex
);
1295 sadata_upcall_free(sau
);
1298 DPRINTFN(9,("sa_upcall(%d.%d) using stack %p\n",
1299 l
->l_proc
->p_pid
, l
->l_lid
, sast
->sast_stack
.ss_sp
));
1301 if (l
->l_proc
->p_emul
->e_sa
->sae_upcallconv
) {
1302 error
= (*l
->l_proc
->p_emul
->e_sa
->sae_upcallconv
)(l
, type
,
1303 &argsize
, &arg
, &func
);
1305 mutex_enter(&sa
->sa_mutex
);
1306 sa_setstackfree(sast
, sa
);
1307 mutex_exit(&sa
->sa_mutex
);
1308 sadata_upcall_free(sau
);
1313 sa_upcall0(sau
, type
, event
, interrupted
, argsize
, arg
, func
);
1314 sau
->sau_stack
= sast
->sast_stack
;
1315 mutex_enter(&vp
->savp_mutex
);
1316 SIMPLEQ_INSERT_TAIL(&vp
->savp_upcalls
, sau
, sau_next
);
1318 l
->l_flag
|= LW_SA_UPCALL
;
1320 mutex_exit(&vp
->savp_mutex
);
1326 sa_upcall0(struct sadata_upcall
*sau
, int type
, struct lwp
*event
,
1327 struct lwp
*interrupted
, size_t argsize
, void *arg
, void (*func
)(void *))
1329 DPRINTFN(12,("sa_upcall0: event %p interrupted %p type %x\n",
1330 event
, interrupted
, type
));
1332 KASSERT((event
== NULL
) || (event
!= interrupted
));
1336 if (type
& SA_UPCALL_DEFER_EVENT
) {
1337 sau
->sau_event
.ss_deferred
.ss_lwp
= event
;
1338 sau
->sau_flags
|= SAU_FLAG_DEFERRED_EVENT
;
1340 sa_upcall_getstate(&sau
->sau_event
, event
,
1341 type
& SA_UPCALL_LOCKED_EVENT
);
1342 if (type
& SA_UPCALL_DEFER_INTERRUPTED
) {
1343 sau
->sau_interrupted
.ss_deferred
.ss_lwp
= interrupted
;
1344 sau
->sau_flags
|= SAU_FLAG_DEFERRED_INTERRUPTED
;
1346 sa_upcall_getstate(&sau
->sau_interrupted
, interrupted
,
1347 type
& SA_UPCALL_LOCKED_INTERRUPTED
);
1349 sau
->sau_type
= type
& SA_UPCALL_TYPE_MASK
;
1350 sau
->sau_argsize
= argsize
;
1352 sau
->sau_argfreefunc
= func
;
1357 * return the stack pointer (??) for a given context as
1358 * reported by the _UC_MACHINE_SP() macro.
1363 ucontext_t
*uc
= arg
;
1365 return (void *)(uintptr_t)_UC_MACHINE_SP(uc
);
1369 * sa_upcall_getstate
1370 * Fill in the given sau_state with info for the passed-in
1371 * lwp, and update the lwp accordingly.
1372 * We set LW_SA_SWITCHING on the target lwp, and so we have to hold
1373 * l's lock in this call. l must be already locked, or it must be unlocked
1374 * and locking it must not cause deadlock.
1377 sa_upcall_getstate(union sau_state
*ss
, struct lwp
*l
, int isLocked
)
1385 l
->l_flag
|= LW_SA_SWITCHING
;
1388 (*l
->l_proc
->p_emul
->e_sa
->sae_getucontext
)(l
,
1389 (void *)&ss
->ss_captured
.ss_ctx
);
1392 l
->l_flag
&= ~LW_SA_SWITCHING
;
1395 sp
= (*l
->l_proc
->p_emul
->e_sa
->sae_ucsp
)
1396 (&ss
->ss_captured
.ss_ctx
);
1397 /* XXX COMPAT_NETBSD32: _UC_UCONTEXT_ALIGN */
1398 sp
= STACK_ALIGN(sp
, ~_UC_UCONTEXT_ALIGN
);
1399 ucsize
= roundup(l
->l_proc
->p_emul
->e_sa
->sae_ucsize
,
1400 (~_UC_UCONTEXT_ALIGN
) + 1);
1401 ss
->ss_captured
.ss_sa
.sa_context
=
1402 (ucontext_t
*)STACK_ALLOC(sp
, ucsize
);
1403 ss
->ss_captured
.ss_sa
.sa_id
= l
->l_lid
;
1404 ss
->ss_captured
.ss_sa
.sa_cpu
= l
->l_savp
->savp_id
;
1406 ss
->ss_captured
.ss_sa
.sa_context
= NULL
;
1413 * Detect double pagefaults and pagefaults on upcalls.
1414 * - double pagefaults are detected by comparing the previous faultaddr
1415 * against the current faultaddr
1416 * - pagefaults on upcalls are detected by checking if the userspace
1417 * thread is running on an upcall stack
1420 sa_pagefault(struct lwp
*l
, ucontext_t
*l_ctx
)
1424 struct sadata_vp
*vp
;
1425 struct sastack sast
;
1432 KASSERT(mutex_owned(&sa
->sa_mutex
));
1433 KASSERT(vp
->savp_lwp
== l
);
1435 if (vp
->savp_faultaddr
== vp
->savp_ofaultaddr
) {
1436 DPRINTFN(10,("sa_pagefault(%d.%d) double page fault\n",
1437 p
->p_pid
, l
->l_lid
));
1441 sast
.sast_stack
.ss_sp
= (*p
->p_emul
->e_sa
->sae_ucsp
)(l_ctx
);
1442 sast
.sast_stack
.ss_size
= 1;
1443 found
= (RB_FIND(sasttree
, &sa
->sa_stackstree
, &sast
) != NULL
);
1446 DPRINTFN(10,("sa_pagefault(%d.%d) upcall page fault\n",
1447 p
->p_pid
, l
->l_lid
));
1451 vp
->savp_ofaultaddr
= vp
->savp_faultaddr
;
1459 * Called by sleepq_block() when it wants to call mi_switch().
1460 * Block current LWP and switch to another.
1462 * WE ARE NOT ALLOWED TO SLEEP HERE! WE ARE CALLED FROM WITHIN
1463 * SLEEPQ_BLOCK() ITSELF! We are called with sched_lock held, and must
1464 * hold it right through the mi_switch() call.
1466 * We return with the scheduler unlocked.
1468 * We are called in one of three conditions:
1470 * 1: We are an sa_yield thread. If there are any UNBLOCKED
1471 * upcalls to deliver, deliver them (by exiting) instead of sleeping.
1472 * 2: We are the main lwp (we're the lwp on our vp). Trigger
1473 * delivery of a BLOCKED upcall.
1474 * 3: We are not the main lwp on our vp. Chances are we got
1475 * woken up but the sleeper turned around and went back to sleep.
1476 * It seems that select and poll do this a lot. So just go back to sleep.
1480 sa_switch(struct lwp
*l
)
1482 struct proc
*p
= l
->l_proc
;
1483 struct sadata_vp
*vp
= l
->l_savp
;
1484 struct sadata_upcall
*sau
= NULL
;
1487 KASSERT(lwp_locked(l
, NULL
));
1489 DPRINTFN(4,("sa_switch(%d.%d VP %d)\n", p
->p_pid
, l
->l_lid
,
1490 vp
->savp_lwp
? vp
->savp_lwp
->l_lid
: 0));
1492 if ((l
->l_flag
& LW_WEXIT
) || (p
->p_sflag
& (PS_WCORE
| PS_WEXIT
))) {
1498 * We need to hold two locks from here on out. Since you can
1499 * sleepq_block() on ANY lock, there really can't be a locking
1500 * hierarcy relative to savp_mutex. So if we can't get the mutex,
1501 * drop the lwp lock, get the mutex, and carry on.
1503 * Assumes the lwp lock can never be a sleeping mutex.
1505 * We do however try hard to never not get savp_mutex. The only
1506 * times we lock it are either when we are the blessed lwp for
1507 * our vp, or when a blocked lwp is adding itself to the savp_worken
1508 * list. So contention should be rare.
1510 if (!mutex_tryenter(&vp
->savp_mutex
)) {
1512 mutex_enter(&vp
->savp_mutex
);
1515 if (l
->l_stat
== LSONPROC
) {
1516 /* Oops! We woke before we got to sleep. Ok, back we go! */
1518 mutex_exit(&vp
->savp_mutex
);
1522 if (l
->l_flag
& LW_SA_YIELD
) {
1524 * Case 0: we're blocking in sa_yield
1526 DPRINTFN(4,("sa_switch(%d.%d) yield, flags %x pflag %x\n",
1527 p
->p_pid
, l
->l_lid
, l
->l_flag
, l
->l_pflag
));
1528 if (vp
->savp_woken_count
== 0 && p
->p_timerpend
== 0) {
1529 DPRINTFN(4,("sa_switch(%d.%d) setting idle\n",
1530 p
->p_pid
, l
->l_lid
));
1531 l
->l_flag
|= LW_SA_IDLE
;
1532 mutex_exit(&vp
->savp_mutex
);
1536 * Make us running again. lwp_unsleep() will
1539 mutex_exit(&vp
->savp_mutex
);
1540 lwp_unsleep(l
, true);
1545 if (vp
->savp_lwp
== l
) {
1546 if (vp
->savp_pflags
& SAVP_FLAG_DELIVERING
) {
1548 * We've exited sa_switchcall() but NOT
1549 * made it into a new systemcall. Don't make
1552 mutex_exit(&vp
->savp_mutex
);
1557 * Case 1: we're blocking for the first time; generate
1558 * a SA_BLOCKED upcall and allocate resources for the
1561 if (vp
->savp_sleeper_upcall
) {
1562 sau
= vp
->savp_sleeper_upcall
;
1563 vp
->savp_sleeper_upcall
= NULL
;
1568 printf("sa_switch(%d.%d): no upcall data.\n",
1569 p
->p_pid
, l
->l_lid
);
1571 panic("Oops! Don't have a sleeper!\n");
1572 /* XXXWRS Shouldn't we just kill the app here? */
1573 mutex_exit(&vp
->savp_mutex
);
1579 * The process of allocating a new LWP could cause
1580 * sleeps. We're called from inside sleep, so that
1581 * would be Bad. Therefore, we must use a cached new
1582 * LWP. The first thing that this new LWP must do is
1583 * allocate another LWP for the cache.
1585 l2
= sa_getcachelwp(p
, vp
);
1588 /* No upcall for you! */
1589 /* XXX The consequences of this are more subtle and
1590 * XXX the recovery from this situation deserves
1594 /* XXXUPSXXX Should only happen with concurrency > 1 */
1595 mutex_exit(&vp
->savp_mutex
);
1597 sadata_upcall_free(sau
);
1601 cpu_setfunc(l2
, sa_switchcall
, sau
);
1602 sa_upcall0(sau
, SA_UPCALL_BLOCKED
| SA_UPCALL_LOCKED_EVENT
, l
,
1603 NULL
, 0, NULL
, NULL
);
1606 * Perform the double/upcall pagefault check.
1607 * We do this only here since we need l's ucontext to
1608 * get l's userspace stack. sa_upcall0 above has saved
1610 * The LP_SA_PAGEFAULT flag is set in the MD
1611 * pagefault code to indicate a pagefault. The MD
1612 * pagefault code also saves the faultaddr for us.
1614 * If the double check is true, turn this into a non-upcall
1617 if ((l
->l_flag
& LP_SA_PAGEFAULT
) && sa_pagefault(l
,
1618 &sau
->sau_event
.ss_captured
.ss_ctx
) != 0) {
1619 cpu_setfunc(l2
, sa_neverrun
, NULL
);
1620 sa_putcachelwp(p
, l2
);
1621 mutex_exit(&vp
->savp_mutex
);
1622 DPRINTFN(4,("sa_switch(%d.%d) Pagefault\n",
1623 p
->p_pid
, l
->l_lid
));
1626 * WRS Not sure how vp->savp_sleeper_upcall != NULL
1627 * but be careful none the less
1629 if (vp
->savp_sleeper_upcall
== NULL
)
1630 vp
->savp_sleeper_upcall
= sau
;
1632 sadata_upcall_free(sau
);
1633 DPRINTFN(10,("sa_switch(%d.%d) page fault resolved\n",
1634 p
->p_pid
, l
->l_lid
));
1635 mutex_enter(&vp
->savp_mutex
);
1636 if (vp
->savp_faultaddr
== vp
->savp_ofaultaddr
)
1637 vp
->savp_ofaultaddr
= -1;
1638 mutex_exit(&vp
->savp_mutex
);
1642 DPRINTFN(8,("sa_switch(%d.%d) blocked upcall %d\n",
1643 p
->p_pid
, l
->l_lid
, l2
->l_lid
));
1645 l
->l_flag
|= LW_SA_BLOCKING
;
1646 vp
->savp_blocker
= l
;
1652 } else if (vp
->savp_lwp
!= NULL
) {
1655 * Case 2: We've been woken up while another LWP was
1656 * on the VP, but we're going back to sleep without
1657 * having returned to userland and delivering the
1658 * SA_UNBLOCKED upcall (select and poll cause this
1659 * kind of behavior a lot).
1664 mutex_exit(&vp
->savp_mutex
);
1666 panic("sa_vp empty");
1669 DPRINTFN(4,("sa_switch(%d.%d) switching to LWP %d.\n",
1670 p
->p_pid
, l
->l_lid
, l2
? l2
->l_lid
: 0));
1671 /* WRS need to add code to make sure we switch to l2 */
1672 mutex_exit(&vp
->savp_mutex
);
1674 DPRINTFN(4,("sa_switch(%d.%d flag %x) returned.\n",
1675 p
->p_pid
, l
->l_lid
, l
->l_flag
));
1676 KASSERT(l
->l_wchan
== 0);
1682 * Routine for threads that have never run. Calls lwp_exit.
1683 * New, never-run cache threads get pointed at this routine, which just runs
1684 * and calls lwp_exit().
1687 sa_neverrun(void *arg
)
1693 DPRINTFN(1,("sa_neverrun(%d.%d %x) exiting\n", l
->l_proc
->p_pid
,
1694 l
->l_lid
, l
->l_flag
));
1702 * We need to pass an upcall to userland. We are now
1703 * running on a spare stack and need to allocate a new
1704 * one. Also, if we are passed an sa upcall, we need to dispatch
1708 sa_switchcall(void *arg
)
1712 struct sadata_vp
*vp
;
1713 struct sadata_upcall
*sau
;
1714 struct sastack
*sast
;
1724 KASSERT(vp
->savp_lwp
== l2
);
1725 if ((l2
->l_flag
& LW_WEXIT
) || (p
->p_sflag
& (PS_WCORE
| PS_WEXIT
))) {
1727 sadata_upcall_free(sau
);
1731 KASSERT(vp
->savp_lwp
== l2
);
1732 DPRINTFN(6,("sa_switchcall(%d.%d)\n", p
->p_pid
, l2
->l_lid
));
1734 l2
->l_flag
|= LW_SA
;
1736 l2
->l_pflag
|= LP_SA_NOBLOCK
;
1738 if (vp
->savp_lwpcache_count
== 0) {
1739 /* Allocate the next cache LWP */
1740 DPRINTFN(6,("sa_switchcall(%d.%d) allocating LWP\n",
1741 p
->p_pid
, l2
->l_lid
));
1742 sa_newcachelwp(l2
, NULL
);
1746 mutex_enter(&sa
->sa_mutex
);
1747 sast
= sa_getstack(p
->p_sa
);
1748 mutex_exit(&sa
->sa_mutex
);
1749 mutex_enter(&vp
->savp_mutex
);
1750 l
= vp
->savp_blocker
;
1752 sau
->sau_stack
= sast
->sast_stack
;
1753 SIMPLEQ_INSERT_TAIL(&vp
->savp_upcalls
, sau
, sau_next
);
1754 mutex_exit(&vp
->savp_mutex
);
1756 l2
->l_flag
|= LW_SA_UPCALL
;
1760 * Oops! We're in trouble. The app hasn't
1761 * passed us in any stacks on which to deliver
1764 * WRS: I think this code is wrong. If we can't
1765 * get a stack, we are dead. We either need
1766 * to block waiting for one (assuming there's a
1767 * live vp still in userland so it can hand back
1768 * stacks, or we should just kill the process
1769 * as we're deadlocked.
1771 if (vp
->savp_sleeper_upcall
== NULL
)
1772 vp
->savp_sleeper_upcall
= sau
;
1774 sadata_upcall_free(sau
);
1776 sa_putcachelwp(p
, l2
); /* sets LW_SA */
1777 mutex_exit(&vp
->savp_mutex
);
1780 l
->l_flag
&= ~LW_SA_BLOCKING
;
1782 //mutex_enter(p->p_lock); /* XXXAD */
1784 //mutex_exit(p->p_lock);
1787 /* mostly NOTREACHED */
1795 * Ok, clear LP_SA_NOBLOCK. However it'd be VERY BAD to generate
1796 * a blocked upcall before this upcall makes it to libpthread.
1797 * So disable BLOCKED upcalls until this vp enters a syscall.
1799 l2
->l_pflag
&= ~LP_SA_NOBLOCK
;
1800 vp
->savp_pflags
|= SAVP_FLAG_DELIVERING
;
1805 * Allocate a new lwp, attach it to either the given vp or to l's vp,
1806 * and add it to its vp's idle cache.
1807 * Assumes no locks (other than kernel lock) on entry and exit.
1808 * Locks scheduler lock during operation.
1809 * Returns 0 on success or if process is exiting. Returns ENOMEM
1810 * if it is unable to allocate a new uarea.
1813 sa_newcachelwp(struct lwp
*l
, struct sadata_vp
*targ_vp
)
1817 struct sadata_vp
*vp
;
1822 if (p
->p_sflag
& (PS_WCORE
| PS_WEXIT
))
1825 uaddr
= uvm_uarea_alloc();
1826 if (__predict_false(uaddr
== 0))
1829 error
= lwp_create(l
, p
, uaddr
, 0, NULL
, 0,
1830 sa_neverrun
, NULL
, &l2
, l
->l_class
);
1832 uvm_uarea_free(uaddr
);
1836 /* We don't want this LWP on the process's main LWP list, but
1837 * newlwp helpfully puts it there. Unclear if newlwp should
1840 mutex_enter(p
->p_lock
);
1842 mutex_exit(p
->p_lock
);
1844 vp
= (targ_vp
) ? targ_vp
: l
->l_savp
;
1845 mutex_enter(&vp
->savp_mutex
);
1847 sa_putcachelwp(p
, l2
);
1848 mutex_exit(&vp
->savp_mutex
);
1855 * Take a normal process LWP and place it in the SA cache.
1856 * LWP must not be running, or it must be our caller.
1857 * sadat_vp::savp_mutex held on entry and exit.
1859 * Previous NetBSD versions removed queued lwps from the list of
1860 * visible lwps. This made ps cleaner, and hid implementation details.
1861 * At present, this implementation no longer does that.
1864 sa_putcachelwp(struct proc
*p
, struct lwp
*l
)
1866 struct sadata_vp
*vp
;
1870 sq
= &vp
->savp_lwpcache
;
1872 KASSERT(mutex_owned(&vp
->savp_mutex
));
1874 #if 0 /* not now, leave lwp visible to all */
1875 LIST_REMOVE(l
, l_sibling
);
1877 l
->l_prflag
|= LPR_DETACHED
;
1881 DPRINTFN(5,("sa_putcachelwp(%d.%d) Adding LWP %d to cache\n",
1882 p
->p_pid
, curlwp
->l_lid
, l
->l_lid
));
1885 * Hand-rolled call of the form:
1886 * sleepq_enter(&vp->savp_woken, l, &vp->savp_mutex);
1887 * adapted to take into account the fact that (1) l and the mutex
1888 * we want to lend it are both locked, and (2) we don't have
1891 l
->l_mutex
= &vp
->savp_mutex
;
1894 * XXXWRS: Following is a hand-rolled call of the form:
1895 * sleepq_enqueue(sq, (void *)sq, "lwpcache", sa_sobj); but
1896 * hand-done since l might not be curlwp.
1899 l
->l_syncobj
= &sa_sobj
;
1902 l
->l_wmesg
= sa_lwpcache_wmesg
;
1904 l
->l_stat
= LSSLEEP
;
1907 vp
->savp_lwpcache_count
++;
1908 sleepq_insert(sq
, l
, &sa_sobj
);
1913 * Fetch a LWP from the cache.
1914 * Called with savp_mutex held.
1917 sa_getcachelwp(struct proc
*p
, struct sadata_vp
*vp
)
1920 sleepq_t
*sq
= &vp
->savp_lwpcache
;
1922 KASSERT(mutex_owned(&vp
->savp_mutex
));
1923 KASSERT(vp
->savp_lwpcache_count
> 0);
1925 vp
->savp_lwpcache_count
--;
1929 * Now we have a hand-unrolled version of part of sleepq_remove.
1930 * The main issue is we do NOT want to make the lwp runnable yet
1931 * since we need to set up the upcall first (we know our caller(s)).
1934 TAILQ_REMOVE(sq
, l
, l_sleepchain
);
1935 l
->l_syncobj
= &sched_syncobj
;
1938 l
->l_flag
&= ~LW_SINTR
;
1940 #if 0 /* Not now, for now leave lwps in lwp list */
1941 LIST_INSERT_HEAD(&p
->p_lwps
, l
, l_sibling
);
1943 DPRINTFN(5,("sa_getcachelwp(%d.%d) Got LWP %d from cache.\n",
1944 p
->p_pid
, curlwp
->l_lid
, l
->l_lid
));
1951 * Make an lwp we pulled out of the cache, with sa_getcachelwp()
1952 * above. This routine and sa_getcachelwp() must perform all the work
1953 * of sleepq_remove().
1956 sa_setrunning(struct lwp
*l
)
1958 struct schedstate_percpu
*spc
;
1959 struct cpu_info
*ci
;
1961 KASSERT(mutex_owned(&l
->l_savp
->savp_mutex
));
1963 /* Update sleep time delta, call the wake-up handler of scheduler */
1964 l
->l_slpticksum
+= (hardclock_ticks
- l
->l_slpticks
);
1968 * Since l was on the sleep queue, we locked it
1969 * when we locked savp_mutex. Now set it running.
1970 * This is the second-part of sleepq_remove().
1972 l
->l_priority
= MAXPRI_USER
; /* XXX WRS needs thought, used to be l_usrpri */
1973 /* Look for a CPU to wake up */
1974 l
->l_cpu
= sched_takecpu(l
);
1976 spc
= &ci
->ci_schedstate
;
1979 lwp_setlock(l
, spc
->spc_mutex
);
1980 sched_setrunnable(l
);
1983 sched_enqueue(l
, true);
1989 * We are about to exit the kernel and return to userland, and
1990 * userret() noticed we have upcalls pending. So deliver them.
1992 * This is the place where unblocking upcalls get generated. We
1993 * allocate the stack & upcall event here. We may block doing so, but
1994 * we lock our LWP state (clear LW_SA for the moment) while doing so.
1996 * In the case of delivering multiple upcall events, we will end up
1997 * writing multiple stacks out to userland at once. The last one we send
1998 * out will be the first one run, then it will notice the others and
2001 * No locks held on entry or exit. We lock varied processing.
2004 sa_upcall_userret(struct lwp
*l
)
2009 struct sadata_vp
*vp
;
2010 struct sadata_upcall
*sau
;
2011 struct sastack
*sast
;
2019 if (vp
->savp_pflags
& SAVP_FLAG_NOUPCALLS
) {
2022 * We made upcalls in sa_yield() (otherwise we would
2023 * still be in the loop there!). Don't do it again.
2024 * Clear LW_SA_UPCALL, unless there are upcalls to deliver.
2025 * they will get delivered next time we return to user mode.
2027 vp
->savp_pflags
&= ~SAVP_FLAG_NOUPCALLS
;
2028 mutex_enter(&vp
->savp_mutex
);
2029 if ((vp
->savp_woken_count
== 0)
2030 && SIMPLEQ_EMPTY(&vp
->savp_upcalls
)) {
2033 mutex_exit(&vp
->savp_mutex
);
2036 l
->l_flag
&= ~LW_SA_UPCALL
;
2039 DPRINTFN(7,("sa_upcall_userret(%d.%d %x) skipping processing\n",
2040 p
->p_pid
, l
->l_lid
, l
->l_flag
));
2044 SA_LWP_STATE_LOCK(l
, f
);
2046 DPRINTFN(7,("sa_upcall_userret(%d.%d %x) empty %d, woken %d\n",
2047 p
->p_pid
, l
->l_lid
, l
->l_flag
, SIMPLEQ_EMPTY(&vp
->savp_upcalls
),
2048 vp
->savp_woken_count
));
2050 KASSERT((l
->l_flag
& LW_SA_BLOCKING
) == 0);
2052 mutex_enter(&vp
->savp_mutex
);
2054 if (SIMPLEQ_EMPTY(&vp
->savp_upcalls
) &&
2055 vp
->savp_woken_count
!= 0) {
2056 mutex_exit(&vp
->savp_mutex
);
2057 mutex_enter(&sa
->sa_mutex
);
2058 sast
= sa_getstack(sa
);
2059 mutex_exit(&sa
->sa_mutex
);
2062 SA_LWP_STATE_UNLOCK(l
, f
);
2067 mutex_enter(&vp
->savp_mutex
);
2069 if (SIMPLEQ_EMPTY(&vp
->savp_upcalls
) &&
2070 vp
->savp_woken_count
!= 0 && sast
!= NULL
) {
2072 * Invoke an "unblocked" upcall. We create a message
2073 * with the first unblock listed here, and then
2074 * string along a number of other unblocked stacks when
2075 * we deliver the call.
2077 l2
= TAILQ_FIRST(&vp
->savp_woken
);
2078 TAILQ_REMOVE(&vp
->savp_woken
, l2
, l_sleepchain
);
2079 vp
->savp_woken_count
--;
2080 mutex_exit(&vp
->savp_mutex
);
2082 DPRINTFN(9,("sa_upcall_userret(%d.%d) using stack %p\n",
2083 l
->l_proc
->p_pid
, l
->l_lid
, sast
->sast_stack
.ss_sp
));
2085 if ((l
->l_flag
& LW_WEXIT
)
2086 || (p
->p_sflag
& (PS_WCORE
| PS_WEXIT
))) {
2091 DPRINTFN(8,("sa_upcall_userret(%d.%d) unblocking %d\n",
2092 p
->p_pid
, l
->l_lid
, l2
->l_lid
));
2094 sau
= sadata_upcall_alloc(1);
2095 if ((l
->l_flag
& LW_WEXIT
)
2096 || (p
->p_sflag
& (PS_WCORE
| PS_WEXIT
))) {
2097 sadata_upcall_free(sau
);
2102 sa_upcall0(sau
, SA_UPCALL_UNBLOCKED
, l2
, l
, 0, NULL
, NULL
);
2103 sau
->sau_stack
= sast
->sast_stack
;
2104 mutex_enter(&vp
->savp_mutex
);
2105 SIMPLEQ_INSERT_TAIL(&vp
->savp_upcalls
, sau
, sau_next
);
2106 l2
->l_flag
&= ~LW_SA_BLOCKING
;
2108 /* Now return l2 to the cache. Mutex already set */
2109 sq
= &vp
->savp_lwpcache
;
2111 l2
->l_wmesg
= sa_lwpcache_wmesg
;
2112 vp
->savp_lwpcache_count
++;
2113 sleepq_insert(sq
, l2
, &sa_sobj
);
2116 sa_setstackfree(sast
, sa
);
2118 KASSERT(vp
->savp_lwp
== l
);
2120 while ((sau
= SIMPLEQ_FIRST(&vp
->savp_upcalls
)) != NULL
) {
2121 SIMPLEQ_REMOVE_HEAD(&vp
->savp_upcalls
, sau_next
);
2122 mutex_exit(&vp
->savp_mutex
);
2123 sa_makeupcalls(l
, sau
);
2124 mutex_enter(&vp
->savp_mutex
);
2126 mutex_exit(&vp
->savp_mutex
);
2130 if (vp
->savp_woken_count
== 0) {
2131 l
->l_flag
&= ~LW_SA_UPCALL
;
2136 SA_LWP_STATE_UNLOCK(l
, f
);
2141 #define SACOPYOUT(sae, type, kp, up) \
2142 (((sae)->sae_sacopyout != NULL) ? \
2143 (*(sae)->sae_sacopyout)((type), (kp), (void *)(up)) : \
2144 copyout((kp), (void *)(up), sizeof(*(kp))))
2148 * We're delivering the first upcall on lwp l, so
2149 * copy everything out. We assigned the stack for this upcall
2150 * when we enqueued it.
2152 * SA_LWP_STATE should be locked (LP_SA_NOBLOCK set).
2154 * If the enqueued event was DEFERRED, this is the time when we set
2155 * up the upcall event's state.
2158 sa_makeupcalls(struct lwp
*l
, struct sadata_upcall
*sau
)
2162 const struct sa_emul
*sae
;
2164 struct sadata_vp
*vp
;
2166 uintptr_t sapp
, sap
;
2167 struct sa_t self_sa
;
2168 struct sa_t
*sas
[3];
2169 struct sa_t
**ksapp
= NULL
;
2171 union sau_state
*e_ss
;
2172 ucontext_t
*kup
, *up
;
2174 int i
, nint
, nevents
, type
, error
;
2177 sae
= p
->p_emul
->e_sa
;
2180 ucsize
= sae
->sae_ucsize
;
2182 if (sau
->sau_flags
& SAU_FLAG_DEFERRED_EVENT
)
2183 sa_upcall_getstate(&sau
->sau_event
,
2184 sau
->sau_event
.ss_deferred
.ss_lwp
, 0);
2185 if (sau
->sau_flags
& SAU_FLAG_DEFERRED_INTERRUPTED
)
2186 sa_upcall_getstate(&sau
->sau_interrupted
,
2187 sau
->sau_interrupted
.ss_deferred
.ss_lwp
, 0);
2189 #ifdef __MACHINE_STACK_GROWS_UP
2190 stack
= sau
->sau_stack
.ss_sp
;
2192 stack
= (char *)sau
->sau_stack
.ss_sp
+ sau
->sau_stack
.ss_size
;
2194 stack
= STACK_ALIGN(stack
, ALIGNBYTES
);
2196 self_sa
.sa_id
= l
->l_lid
;
2197 self_sa
.sa_cpu
= vp
->savp_id
;
2201 if (sau
->sau_event
.ss_captured
.ss_sa
.sa_context
!= NULL
) {
2202 if (copyout(&sau
->sau_event
.ss_captured
.ss_ctx
,
2203 sau
->sau_event
.ss_captured
.ss_sa
.sa_context
,
2208 sas
[1] = &sau
->sau_event
.ss_captured
.ss_sa
;
2211 if (sau
->sau_interrupted
.ss_captured
.ss_sa
.sa_context
!= NULL
) {
2212 KASSERT(sau
->sau_interrupted
.ss_captured
.ss_sa
.sa_context
!=
2213 sau
->sau_event
.ss_captured
.ss_sa
.sa_context
);
2214 if (copyout(&sau
->sau_interrupted
.ss_captured
.ss_ctx
,
2215 sau
->sau_interrupted
.ss_captured
.ss_sa
.sa_context
,
2220 sas
[2] = &sau
->sau_interrupted
.ss_captured
.ss_sa
;
2224 /* For now, limit ourselves to one unblock at once. */
2225 if (sau
->sau_type
== SA_UPCALL_UNBLOCKED
) {
2226 mutex_enter(&vp
->savp_mutex
);
2227 nevents
+= vp
->savp_woken_count
;
2228 mutex_exit(&vp
->savp_mutex
);
2229 /* XXX WRS Need to limit # unblocks we copy out at once! */
2233 /* Copy out the activation's ucontext */
2234 up
= (void *)STACK_ALLOC(stack
, ucsize
);
2235 stack
= STACK_GROW(stack
, ucsize
);
2236 kup
= kmem_zalloc(sizeof(*kup
), KM_SLEEP
);
2237 KASSERT(kup
!= NULL
);
2238 kup
->uc_stack
= sau
->sau_stack
;
2239 kup
->uc_flags
= _UC_STACK
;
2240 error
= SACOPYOUT(sae
, SAOUT_UCONTEXT
, kup
, up
);
2241 kmem_free(kup
, sizeof(*kup
));
2243 sadata_upcall_free(sau
);
2247 sas
[0]->sa_context
= up
;
2249 /* Next, copy out the sa_t's and pointers to them. */
2251 sz
= (1 + nevents
+ nint
) * sae
->sae_sasize
;
2252 sap
= (uintptr_t)STACK_ALLOC(stack
, sz
);
2254 stack
= STACK_GROW(stack
, sz
);
2256 sz
= (1 + nevents
+ nint
) * sae
->sae_sapsize
;
2257 sapp
= (uintptr_t)STACK_ALLOC(stack
, sz
);
2259 stack
= STACK_GROW(stack
, sz
);
2261 if (KTRPOINT(p
, KTR_SAUPCALL
))
2262 ksapp
= kmem_alloc(sizeof(struct sa_t
*) * (nevents
+ nint
+ 1),
2267 for (i
= nevents
+ nint
; i
>= 0; i
--) {
2270 sap
-= sae
->sae_sasize
;
2271 sapp
-= sae
->sae_sapsize
;
2273 if (i
== 1 + nevents
) /* interrupted sa */
2275 else if (i
<= 1) /* self_sa and event sa */
2277 else { /* extra sas */
2278 KASSERT(sau
->sau_type
== SA_UPCALL_UNBLOCKED
);
2281 e_ss
= kmem_alloc(sizeof(*e_ss
), KM_SLEEP
);
2283 /* Lock vp and all savp_woken lwps */
2284 mutex_enter(&vp
->savp_mutex
);
2285 sq
= &vp
->savp_woken
;
2286 KASSERT(vp
->savp_woken_count
> 0);
2287 l2
= TAILQ_FIRST(sq
);
2288 KASSERT(l2
!= NULL
);
2289 TAILQ_REMOVE(sq
, l2
, l_sleepchain
);
2290 vp
->savp_woken_count
--;
2293 ("sa_makeupcalls(%d.%d) unblocking extra %d\n",
2294 p
->p_pid
, l
->l_lid
, l2
->l_lid
));
2296 * Since l2 was on savp_woken, we locked it when
2297 * we locked savp_mutex
2299 sa_upcall_getstate(e_ss
, l2
, 1);
2300 l2
->l_flag
&= ~LW_SA_BLOCKING
;
2302 /* Now return l2 to the cache. Mutex already set */
2303 sq
= &vp
->savp_lwpcache
;
2305 l2
->l_wmesg
= sa_lwpcache_wmesg
;
2306 vp
->savp_lwpcache_count
++;
2307 sleepq_insert(sq
, l2
, &sa_sobj
);
2308 mutex_exit(&vp
->savp_mutex
);
2310 error
= copyout(&e_ss
->ss_captured
.ss_ctx
,
2311 e_ss
->ss_captured
.ss_sa
.sa_context
, ucsize
);
2312 sasp
= &e_ss
->ss_captured
.ss_sa
;
2315 SACOPYOUT(sae
, SAOUT_SA_T
, sasp
, sap
) ||
2316 SACOPYOUT(sae
, SAOUT_SAP_T
, &sap
, sapp
)) {
2317 /* Copying onto the stack didn't work. Die. */
2318 sadata_upcall_free(sau
);
2320 kmem_free(e_ss
, sizeof(*e_ss
));
2324 if (KTRPOINT(p
, KTR_SAUPCALL
))
2328 kmem_free(e_ss
, sizeof(*e_ss
));
2331 /* Copy out the arg, if any */
2332 /* xxx assume alignment works out; everything so far has been
2333 * a structure, so...
2336 ap
= STACK_ALLOC(stack
, sau
->sau_argsize
);
2337 stack
= STACK_GROW(stack
, sau
->sau_argsize
);
2338 if (copyout(sau
->sau_arg
, ap
, sau
->sau_argsize
) != 0) {
2339 /* Copying onto the stack didn't work. Die. */
2340 sadata_upcall_free(sau
);
2346 stack
= STACK_ALIGN(stack
, HPPA_FRAME_SIZE
);
2349 type
= sau
->sau_type
;
2351 if (vp
->savp_sleeper_upcall
== NULL
)
2352 vp
->savp_sleeper_upcall
= sau
;
2354 sadata_upcall_free(sau
);
2356 DPRINTFN(7,("sa_makeupcalls(%d.%d): type %d\n", p
->p_pid
,
2359 if (KTRPOINT(p
, KTR_SAUPCALL
)) {
2360 ktrsaupcall(l
, type
, nevents
, nint
, (void *)sapp
, ap
, ksapp
);
2361 kmem_free(ksapp
, sizeof(struct sa_t
*) * (nevents
+ nint
+ 1));
2364 (*sae
->sae_upcall
)(l
, type
, nevents
, nint
, (void *)sapp
, ap
, stack
,
2368 l
->l_flag
&= ~LW_SA_YIELD
;
2373 if (KTRPOINT(p
, KTR_SAUPCALL
))
2374 kmem_free(ksapp
, sizeof(struct sa_t
) * (nevents
+ nint
+ 1));
2380 * sa_unblock_userret:
2382 * Our lwp is in the process of returning to userland, and
2383 * userret noticed LW_SA_BLOCKING is set for us. This indicates that
2384 * we were at one time the blessed lwp for our vp and we blocked.
2385 * An upcall was delivered to our process indicating that we blocked.
2386 * Since then, we have unblocked in the kernel, and proceeded
2387 * to finish whatever work needed to be done. For instance, pages
2388 * have been faulted in for a trap or system call results have been
2389 * saved out for a systemcall.
2390 * We now need to simultaneously do two things. First, we have to
2391 * cause an UNBLOCKED upcall to be generated. Second, we actually
2392 * have to STOP executing. When the blocked upcall was generated, a
2393 * new lwp was given to our application. Thus if we simply returned,
2394 * we would be exceeding our concurrency.
2395 * So we put ourself on our vp's savp_woken list and take
2396 * steps to make sure the blessed lwp will notice us. Note: we maintain
2397 * loose concurrency controls, so the blessed lwp for our vp could in
2398 * fact be running on another cpu in the system.
2401 sa_unblock_userret(struct lwp
*l
)
2403 struct lwp
*l2
, *vp_lwp
;
2406 struct sadata_vp
*vp
;
2412 if ((l
->l_flag
& LW_WEXIT
) || (p
->p_sflag
& (PS_WCORE
| PS_WEXIT
)))
2415 if ((l
->l_flag
& LW_SA_BLOCKING
) == 0)
2418 DPRINTFN(7,("sa_unblock_userret(%d.%d %x) \n", p
->p_pid
, l
->l_lid
,
2424 vp_lwp
= vp
->savp_lwp
;
2427 KASSERT(vp_lwp
!= NULL
);
2428 DPRINTFN(3,("sa_unblock_userret(%d.%d) woken, flags %x, vp %d\n",
2429 l
->l_proc
->p_pid
, l
->l_lid
, l
->l_flag
,
2433 if (vp_lwp
->l_flag
& LW_SA_IDLE
) {
2434 KASSERT((vp_lwp
->l_flag
& LW_SA_UPCALL
) == 0);
2435 KASSERT(vp
->savp_wokenq_head
== NULL
);
2437 ("sa_unblock_userret(%d.%d) repossess: idle vp_lwp %d state %d\n",
2438 l
->l_proc
->p_pid
, l
->l_lid
,
2439 vp_lwp
->l_lid
, vp_lwp
->l_stat
));
2440 vp_lwp
->l_flag
&= ~LW_SA_IDLE
;
2446 "sa_unblock_userret(%d.%d) put on wokenq: vp_lwp %d state %d flags %x\n",
2447 l
->l_proc
->p_pid
, l
->l_lid
, vp_lwp
->l_lid
,
2448 vp_lwp
->l_stat
, vp_lwp
->l_flag
));
2452 if (!mutex_tryenter(&vp
->savp_mutex
)) {
2454 mutex_enter(&vp
->savp_mutex
);
2455 /* savp_lwp may have changed. We'll be ok even if it did */
2456 vp_lwp
= vp
->savp_lwp
;
2461 switch (vp_lwp
->l_stat
) {
2463 if (vp_lwp
->l_flag
& LW_SA_UPCALL
)
2465 vp_lwp
->l_flag
|= LW_SA_UPCALL
;
2466 if (vp_lwp
->l_flag
& LW_SA_YIELD
)
2468 spc_lock(vp_lwp
->l_cpu
);
2469 cpu_need_resched(vp_lwp
->l_cpu
, RESCHED_IMMED
);
2470 spc_unlock(vp_lwp
->l_cpu
);
2473 if (vp_lwp
->l_flag
& LW_SA_IDLE
) {
2474 vp_lwp
->l_flag
&= ~(LW_SA_IDLE
|LW_SA_YIELD
|LW_SINTR
);
2475 vp_lwp
->l_flag
|= LW_SA_UPCALL
;
2476 /* lwp_unsleep() will unlock the LWP */
2477 lwp_unsleep(vp_lwp
, true);
2479 "sa_unblock_userret(%d.%d) woke vp: %d state %d\n",
2480 l
->l_proc
->p_pid
, l
->l_lid
, vp_lwp
->l_lid
,
2485 vp_lwp
->l_flag
|= LW_SA_UPCALL
;
2490 vp_lwp
->l_flag
|= LW_SA_UPCALL
;
2493 if (vp_lwp
->l_flag
& LW_SA_UPCALL
)
2495 vp_lwp
->l_flag
|= LW_SA_UPCALL
;
2496 if (vp_lwp
->l_flag
& LW_SA_YIELD
)
2499 if (vp_lwp
->l_slptime
> 1) {
2500 void updatepri(struct lwp
*);
2504 vp_lwp
->l_slptime
= 0;
2505 if (vp_lwp
->l_cpu
== curcpu())
2509 * don't need to spc_lock the other cpu
2510 * as runable lwps have the cpu as their
2513 /* spc_lock(vp_lwp->l_cpu); */
2514 cpu_need_resched(vp_lwp
->l_cpu
, 0);
2515 /* spc_unlock(vp_lwp->l_cpu); */
2519 panic("sa_vp LWP not sleeping/onproc/runnable");
2526 * Add ourselves to the savp_woken queue. Still on p_lwps.
2528 * We now don't unlock savp_mutex since it now is l's mutex,
2529 * and it will be released in mi_switch().
2531 sleepq_enter(&vp
->savp_woken
, l
, &vp
->savp_mutex
);
2532 sleepq_enqueue(&vp
->savp_woken
, &vp
->savp_woken
, sa_lwpwoken_wmesg
,
2534 vp
->savp_woken_count
++;
2535 //l->l_stat = LSSUSPENDED;
2539 * We suspended ourself and put ourself on the savp_woken
2540 * list. The only way we come back from mi_switch() to this
2541 * routine is if we were put back on the run queues, which only
2542 * happens if the process is exiting. So just exit.
2544 * In the normal lwp lifecycle, cpu_setfunc() will make this lwp
2545 * run in a different routine by the time we next run.
2554 int debug_print_sa(struct proc
*);
2555 int debug_print_proc(int);
2558 debug_print_proc(int pid
)
2564 printf("No process %d\n", pid
);
2572 debug_print_sa(struct proc
*p
)
2575 struct sadata_vp
*vp
;
2577 printf("Process %d (%s), state %d, address %p, flags %x\n",
2578 p
->p_pid
, p
->p_comm
, p
->p_stat
, p
, p
->p_sflag
);
2579 printf("LWPs: %d (%d running, %d zombies)\n", p
->p_nlwps
, p
->p_nrlwps
,
2583 SLIST_FOREACH(vp
, &sa
->sa_vps
, savp_next
) {
2585 printf("SA VP: %d %s\n", vp
->savp_lwp
->l_lid
,
2586 vp
->savp_lwp
->l_flag
& LW_SA_YIELD
?
2587 (vp
->savp_lwp
->l_flag
& LW_SA_IDLE
?
2588 "idle" : "yielding") : "");
2589 printf("SAs: %d cached LWPs\n",
2590 vp
->savp_lwpcache_count
);
2591 printf("SAs: %d woken LWPs\n",
2592 vp
->savp_woken_count
);
2601 #endif /* KERN_SA */