4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Routines to support shuttle synchronization objects
33 #include <sys/types.h>
35 #include <sys/thread.h>
36 #include <sys/class.h>
37 #include <sys/debug.h>
38 #include <sys/sobject.h>
39 #include <sys/cpuvar.h>
40 #include <sys/schedctl.h>
43 static disp_lock_t shuttle_lock
; /* lock on shuttle objects */
46 * Place the thread in question on the run q.
49 shuttle_unsleep(kthread_t
*t
)
51 ASSERT(THREAD_LOCK_HELD(t
));
53 /* Waiting on a shuttle */
54 ASSERT(t
->t_wchan0
== (caddr_t
)1 && t
->t_wchan
== NULL
);
55 t
->t_flag
&= ~T_WAKEABLE
;
70 shuttle_change_pri(kthread_t
*t
, pri_t p
, pri_t
*t_prip
)
72 ASSERT(THREAD_LOCK_HELD(t
));
76 static sobj_ops_t shuttle_sobj_ops
= {
77 SOBJ_SHUTTLE
, shuttle_owner
, shuttle_unsleep
, shuttle_change_pri
81 * Mark the current thread as sleeping on a shuttle object, and
82 * resume the specified thread. The 't' thread must be marked as ONPROC.
84 * No locks other than 'l' should be held at this point.
87 shuttle_resume(kthread_t
*t
, kmutex_t
*l
)
89 klwp_t
*lwp
= ttolwp(curthread
);
93 thread_lock(curthread
);
94 disp_lock_enter_high(&shuttle_lock
);
96 lwp
->lwp_asleep
= 1; /* /proc */
97 lwp
->lwp_sysabort
= 0; /* /proc */
100 curthread
->t_flag
|= T_WAKEABLE
;
101 curthread
->t_sobj_ops
= &shuttle_sobj_ops
;
103 * setting cpu_dispthread before changing thread state
104 * so that kernel preemption will be deferred to after swtch_to()
107 cp
->cpu_dispthread
= t
;
108 cp
->cpu_dispatch_pri
= DISP_PRIO(t
);
110 * Set the wchan0 field so that /proc won't just do a setrun
111 * on this thread when trying to stop a process. Instead,
112 * /proc will mark the thread as VSTOPPED similar to threads
113 * that are blocked on user level condition variables.
115 curthread
->t_wchan0
= (caddr_t
)1;
116 CL_INACTIVE(curthread
);
117 DTRACE_SCHED1(wakeup
, kthread_t
*, t
);
119 THREAD_SLEEP(curthread
, &shuttle_lock
);
120 disp_lock_exit_high(&shuttle_lock
);
123 * Update ustate records (there is no waitrq obviously)
125 (void) new_mstate(curthread
, LMS_SLEEP
);
130 t
->t_flag
&= ~T_WAKEABLE
;
132 t
->t_sobj_ops
= NULL
;
135 * Make sure we end up on the right CPU if we are dealing with bound
136 * CPU's or processor partitions.
138 if (t
->t_bound_cpu
!= NULL
|| t
->t_cpupart
!= cp
->cpu_part
) {
144 * We re-assign t_disp_queue and t_lockp of 't' here because
145 * 't' could have been preempted.
147 if (t
->t_disp_queue
!= cp
->cpu_disp
) {
148 t
->t_disp_queue
= cp
->cpu_disp
;
149 thread_onproc(t
, cp
);
153 * We can't call thread_unlock_high() here because t's thread lock
154 * could have changed by thread_onproc() call above to point to
155 * CPU->cpu_thread_lock.
157 disp_lock_exit_high(oldtlp
);
161 * Make sure we didn't receive any important events while
164 if (lwp
&& (ISSIG(curthread
, JUSTLOOKING
) ||
165 MUSTRETURN(curproc
, curthread
) || schedctl_cancel_pending()))
170 * Caller must check for ISSIG/lwp_sysabort conditions
171 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
176 * Mark the current thread as sleeping on a shuttle object, and
177 * switch to a new thread.
178 * No locks other than 'l' should be held at this point.
181 shuttle_swtch(kmutex_t
*l
)
183 klwp_t
*lwp
= ttolwp(curthread
);
185 thread_lock(curthread
);
186 disp_lock_enter_high(&shuttle_lock
);
187 lwp
->lwp_asleep
= 1; /* /proc */
188 lwp
->lwp_sysabort
= 0; /* /proc */
190 curthread
->t_flag
|= T_WAKEABLE
;
191 curthread
->t_sobj_ops
= &shuttle_sobj_ops
;
192 curthread
->t_wchan0
= (caddr_t
)1;
193 CL_INACTIVE(curthread
);
195 THREAD_SLEEP(curthread
, &shuttle_lock
);
196 (void) new_mstate(curthread
, LMS_SLEEP
);
197 disp_lock_exit_high(&shuttle_lock
);
199 if (ISSIG(curthread
, JUSTLOOKING
) ||
200 MUSTRETURN(curproc
, curthread
) || schedctl_cancel_pending())
204 * Caller must check for ISSIG/lwp_sysabort conditions
205 * and clear lwp->lwp_asleep/lwp->lwp_sysabort
210 * Mark the specified thread as once again sleeping on a shuttle object. This
211 * routine is called to put a server thread -- one that was dequeued but for
212 * which shuttle_resume() was _not_ called -- back to sleep on a shuttle
213 * object. Because we don't hit the sched:::wakeup DTrace probe until
214 * shuttle_resume(), we do _not_ have a sched:::sleep probe here.
217 shuttle_sleep(kthread_t
*t
)
219 klwp_t
*lwp
= ttolwp(t
);
220 proc_t
*p
= ttoproc(t
);
223 disp_lock_enter_high(&shuttle_lock
);
225 lwp
->lwp_asleep
= 1; /* /proc */
226 lwp
->lwp_sysabort
= 0; /* /proc */
229 t
->t_flag
|= T_WAKEABLE
;
230 t
->t_sobj_ops
= &shuttle_sobj_ops
;
231 t
->t_wchan0
= (caddr_t
)1;
233 ASSERT(t
->t_mstate
== LMS_SLEEP
);
234 THREAD_SLEEP(t
, &shuttle_lock
);
235 disp_lock_exit_high(&shuttle_lock
);
236 if (lwp
&& (ISSIG(t
, JUSTLOOKING
) || MUSTRETURN(p
, t
)))