1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2001
5 * API for invoking Haskell functions via the RTS
7 * --------------------------------------------------------------------------*/
9 #include "rts/PosixSource.h"
18 #include "Capability.h"
19 #include "StableName.h"
20 #include "StablePtr.h"
23 #include "sm/NonMoving.h"
25 /* ----------------------------------------------------------------------------
26 Building Haskell objects from C datatypes.
27 ------------------------------------------------------------------------- */
29 rts_mkChar (Capability
*cap
, HsChar c
)
32 // See Note [Precomputed static closures]
33 if (c
<= MAX_CHARLIKE
) {
34 p
= (StgClosure
*)CHARLIKE_CLOSURE(c
);
36 p
= (StgClosure
*)allocate(cap
, CONSTR_sizeW(0,1));
37 SET_HDR(p
, Czh_con_info
, CCS_SYSTEM
);
38 p
->payload
[0] = (StgClosure
*)(StgWord
)(StgChar
)c
;
40 return TAG_CLOSURE(1, p
);
44 rts_mkInt (Capability
*cap
, HsInt i
)
47 // See Note [Precomputed static closures]
48 if (i
>= MIN_INTLIKE
&& i
<= MAX_INTLIKE
) {
49 p
= (StgClosure
*)INTLIKE_CLOSURE(i
);
51 p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
52 SET_HDR(p
, Izh_con_info
, CCS_SYSTEM
);
53 *(StgInt
*)p
->payload
= i
;
55 return TAG_CLOSURE(1, p
);
59 rts_mkInt8 (Capability
*cap
, HsInt8 i
)
61 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
62 SET_HDR(p
, I8zh_con_info
, CCS_SYSTEM
);
63 *(StgInt8
*)p
->payload
= i
;
64 return TAG_CLOSURE(1, p
);
68 rts_mkInt16 (Capability
*cap
, HsInt16 i
)
70 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
71 SET_HDR(p
, I16zh_con_info
, CCS_SYSTEM
);
72 *(StgInt16
*)p
->payload
= i
;
73 return TAG_CLOSURE(1, p
);
77 rts_mkInt32 (Capability
*cap
, HsInt32 i
)
79 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
80 SET_HDR(p
, I32zh_con_info
, CCS_SYSTEM
);
81 *(StgInt32
*)p
->payload
= i
;
82 return TAG_CLOSURE(1, p
);
86 rts_mkInt64 (Capability
*cap
, HsInt64 i
)
88 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,sizeofW(StgInt64
)));
89 SET_HDR(p
, I64zh_con_info
, CCS_SYSTEM
);
90 ASSIGN_Int64((P_
)&(p
->payload
[0]), i
);
91 return TAG_CLOSURE(1, p
);
95 rts_mkWord (Capability
*cap
, HsWord i
)
97 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
98 SET_HDR(p
, Wzh_con_info
, CCS_SYSTEM
);
99 *(StgWord
*)p
->payload
= i
;
100 return TAG_CLOSURE(1, p
);
104 rts_mkWord8 (Capability
*cap
, HsWord8 w
)
106 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
107 SET_HDR(p
, W8zh_con_info
, CCS_SYSTEM
);
108 *(StgWord8
*)p
->payload
= w
;
109 return TAG_CLOSURE(1, p
);
113 rts_mkWord16 (Capability
*cap
, HsWord16 w
)
115 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
116 SET_HDR(p
, W16zh_con_info
, CCS_SYSTEM
);
117 *(StgWord16
*)p
->payload
= w
;
118 return TAG_CLOSURE(1, p
);
122 rts_mkWord32 (Capability
*cap
, HsWord32 w
)
124 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
125 SET_HDR(p
, W32zh_con_info
, CCS_SYSTEM
);
126 *(StgWord32
*)p
->payload
= w
;
127 return TAG_CLOSURE(1, p
);
131 rts_mkWord64 (Capability
*cap
, HsWord64 w
)
133 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,sizeofW(StgWord64
)));
134 SET_HDR(p
, W64zh_con_info
, CCS_SYSTEM
);
135 ASSIGN_Word64((P_
)&(p
->payload
[0]), w
);
136 return TAG_CLOSURE(1, p
);
141 rts_mkFloat (Capability
*cap
, HsFloat f
)
143 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,1));
144 SET_HDR(p
, Fzh_con_info
, CCS_SYSTEM
);
145 ASSIGN_FLT((P_
)p
->payload
, (StgFloat
)f
);
146 return TAG_CLOSURE(1, p
);
150 rts_mkDouble (Capability
*cap
, HsDouble d
)
152 StgClosure
*p
= (StgClosure
*)allocate(cap
,CONSTR_sizeW(0,sizeofW(StgDouble
)));
153 SET_HDR(p
, Dzh_con_info
, CCS_SYSTEM
);
154 ASSIGN_DBL((P_
)p
->payload
, (StgDouble
)d
);
155 return TAG_CLOSURE(1, p
);
159 rts_mkStablePtr (Capability
*cap
, HsStablePtr s
)
161 StgClosure
*p
= (StgClosure
*)allocate(cap
,sizeofW(StgHeader
)+1);
162 SET_HDR(p
, StablePtr_con_info
, CCS_SYSTEM
);
163 p
->payload
[0] = (StgClosure
*)s
;
164 return TAG_CLOSURE(1, p
);
168 rts_mkPtr (Capability
*cap
, HsPtr a
)
170 StgClosure
*p
= (StgClosure
*)allocate(cap
,sizeofW(StgHeader
)+1);
171 SET_HDR(p
, Ptr_con_info
, CCS_SYSTEM
);
172 p
->payload
[0] = (StgClosure
*)a
;
173 return TAG_CLOSURE(1, p
);
177 rts_mkFunPtr (Capability
*cap
, HsFunPtr a
)
179 StgClosure
*p
= (StgClosure
*)allocate(cap
,sizeofW(StgHeader
)+1);
180 SET_HDR(p
, FunPtr_con_info
, CCS_SYSTEM
);
181 p
->payload
[0] = (StgClosure
*)a
;
182 return TAG_CLOSURE(1, p
);
186 rts_mkBool (Capability
*cap STG_UNUSED
, HsBool b
)
189 return TAG_CLOSURE(2, (StgClosure
*)True_closure
);
191 return TAG_CLOSURE(1, (StgClosure
*)False_closure
);
196 rts_mkString (Capability
*cap
, char *s
)
198 return rts_apply(cap
, (StgClosure
*)unpackCString_closure
, rts_mkPtr(cap
,s
));
202 rts_apply (Capability
*cap
, HaskellObj f
, HaskellObj arg
)
206 ap
= (StgThunk
*)allocate(cap
,sizeofW(StgThunk
) + 2);
207 // Here we don't want to use CCS_SYSTEM, because it's a hidden cost centre,
208 // and evaluating Haskell code under a hidden cost centre leads to
209 // confusing profiling output. (#7753)
210 SET_HDR(ap
, (StgInfoTable
*)&stg_ap_2_upd_info
, CCS_MAIN
);
212 ap
->payload
[1] = arg
;
213 return (StgClosure
*)ap
;
216 /* ----------------------------------------------------------------------------
217 Deconstructing Haskell objects
219 We would like to assert that we have the right kind of object in
220 each case, but this is problematic because in GHCi the info table
221 for the D# constructor (say) might be dynamically loaded. Hence we
222 omit these assertions for now.
223 ------------------------------------------------------------------------- */
226 rts_getChar (HaskellObj p
)
228 // See comment above:
229 // ASSERT(p->header.info == Czh_con_info ||
230 // p->header.info == Czh_static_info);
231 return (StgChar
)(StgWord
)(UNTAG_CLOSURE(p
)->payload
[0]);
235 rts_getInt (HaskellObj p
)
237 // See comment above:
238 // ASSERT(p->header.info == Izh_con_info ||
239 // p->header.info == Izh_static_info);
240 return *(HsInt
*)(UNTAG_CLOSURE(p
)->payload
);
244 rts_getInt8 (HaskellObj p
)
246 // See comment above:
247 // ASSERT(p->header.info == I8zh_con_info ||
248 // p->header.info == I8zh_static_info);
249 return *(HsInt8
*)(UNTAG_CLOSURE(p
)->payload
);
253 rts_getInt16 (HaskellObj p
)
255 // See comment above:
256 // ASSERT(p->header.info == I16zh_con_info ||
257 // p->header.info == I16zh_static_info);
258 return *(HsInt16
*)(UNTAG_CLOSURE(p
)->payload
);
262 rts_getInt32 (HaskellObj p
)
264 // See comment above:
265 // ASSERT(p->header.info == I32zh_con_info ||
266 // p->header.info == I32zh_static_info);
267 return *(HsInt32
*)(UNTAG_CLOSURE(p
)->payload
);
271 rts_getInt64 (HaskellObj p
)
273 // See comment above:
274 // ASSERT(p->header.info == I64zh_con_info ||
275 // p->header.info == I64zh_static_info);
276 return PK_Int64((P_
)&(UNTAG_CLOSURE(p
)->payload
[0]));
280 rts_getWord (HaskellObj p
)
282 // See comment above:
283 // ASSERT(p->header.info == Wzh_con_info ||
284 // p->header.info == Wzh_static_info);
285 return *(HsWord
*)(UNTAG_CLOSURE(p
)->payload
);
289 rts_getWord8 (HaskellObj p
)
291 // See comment above:
292 // ASSERT(p->header.info == W8zh_con_info ||
293 // p->header.info == W8zh_static_info);
294 return *(HsWord8
*)(UNTAG_CLOSURE(p
)->payload
);
298 rts_getWord16 (HaskellObj p
)
300 // See comment above:
301 // ASSERT(p->header.info == W16zh_con_info ||
302 // p->header.info == W16zh_static_info);
303 return *(HsWord16
*)(UNTAG_CLOSURE(p
)->payload
);
307 rts_getWord32 (HaskellObj p
)
309 // See comment above:
310 // ASSERT(p->header.info == W32zh_con_info ||
311 // p->header.info == W32zh_static_info);
312 return *(HsWord32
*)(UNTAG_CLOSURE(p
)->payload
);
316 rts_getWord64 (HaskellObj p
)
318 // See comment above:
319 // ASSERT(p->header.info == W64zh_con_info ||
320 // p->header.info == W64zh_static_info);
321 return PK_Word64((P_
)&(UNTAG_CLOSURE(p
)->payload
[0]));
325 rts_getFloat (HaskellObj p
)
327 // See comment above:
328 // ASSERT(p->header.info == Fzh_con_info ||
329 // p->header.info == Fzh_static_info);
330 return (float)(PK_FLT((P_
)UNTAG_CLOSURE(p
)->payload
));
334 rts_getDouble (HaskellObj p
)
336 // See comment above:
337 // ASSERT(p->header.info == Dzh_con_info ||
338 // p->header.info == Dzh_static_info);
339 return (double)(PK_DBL((P_
)UNTAG_CLOSURE(p
)->payload
));
343 rts_getStablePtr (HaskellObj p
)
345 // See comment above:
346 // ASSERT(p->header.info == StablePtr_con_info ||
347 // p->header.info == StablePtr_static_info);
348 return (StgStablePtr
)(UNTAG_CLOSURE(p
)->payload
[0]);
352 rts_getPtr (HaskellObj p
)
354 // See comment above:
355 // ASSERT(p->header.info == Ptr_con_info ||
356 // p->header.info == Ptr_static_info);
357 return (Capability
*)(UNTAG_CLOSURE(p
)->payload
[0]);
361 rts_getFunPtr (HaskellObj p
)
363 // See comment above:
364 // ASSERT(p->header.info == FunPtr_con_info ||
365 // p->header.info == FunPtr_static_info);
366 return (void *)(UNTAG_CLOSURE(p
)->payload
[0]);
370 rts_getBool (HaskellObj p
)
372 const StgWord tag
= GET_CLOSURE_TAG(p
);
377 const StgInfoTable
*info
;
379 info
= get_itbl((const StgClosure
*)UNTAG_CONST_CLOSURE(p
));
380 if (info
->srt
== 0) { // srt is the constructor tag
387 /* -----------------------------------------------------------------------------
389 -------------------------------------------------------------------------- */
391 INLINE_HEADER
void pushClosure (StgTSO
*tso
, StgWord c
) {
393 tso
->stackobj
->sp
[0] = (W_
) c
;
397 createGenThread (Capability
*cap
, W_ stack_size
, StgClosure
*closure
)
400 t
= createThread (cap
, stack_size
);
401 pushClosure(t
, (W_
)closure
);
402 pushClosure(t
, (W_
)&stg_enter_info
);
407 createIOThread (Capability
*cap
, W_ stack_size
, StgClosure
*closure
)
410 t
= createThread (cap
, stack_size
);
411 pushClosure(t
, (W_
)&stg_ap_v_info
);
412 pushClosure(t
, (W_
)closure
);
413 pushClosure(t
, (W_
)&stg_enter_info
);
418 * Same as above, but also evaluate the result of the IO action
419 * to whnf while we're at it.
423 createStrictIOThread(Capability
*cap
, W_ stack_size
, StgClosure
*closure
)
426 t
= createThread(cap
, stack_size
);
427 pushClosure(t
, (W_
)&stg_forceIO_info
);
428 pushClosure(t
, (W_
)&stg_ap_v_info
);
429 pushClosure(t
, (W_
)closure
);
430 pushClosure(t
, (W_
)&stg_enter_info
);
434 /* ----------------------------------------------------------------------------
435 Evaluating Haskell expressions
437 The running task (capability->running_task) must be bounded i.e. you must
438 call newBoundTask() before calling these functions. Note that rts_lock() and
439 rts_pause() both call newBoundTask().
440 ------------------------------------------------------------------------- */
442 void rts_eval (/* inout */ Capability
**cap
,
443 /* in */ HaskellObj p
,
444 /* out */ HaskellObj
*ret
)
448 tso
= createGenThread(*cap
, RtsFlags
.GcFlags
.initialStkSize
, p
);
449 scheduleWaitThread(tso
,ret
,cap
);
452 void rts_eval_ (/* inout */ Capability
**cap
,
453 /* in */ HaskellObj p
,
454 /* in */ unsigned int stack_size
,
455 /* out */ HaskellObj
*ret
)
459 tso
= createGenThread(*cap
, stack_size
, p
);
460 scheduleWaitThread(tso
,ret
,cap
);
464 * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
465 * result to WHNF before returning.
467 void rts_evalIO (/* inout */ Capability
**cap
,
468 /* in */ HaskellObj p
,
469 /* out */ HaskellObj
*ret
)
473 tso
= createStrictIOThread(*cap
, RtsFlags
.GcFlags
.initialStkSize
, p
);
474 scheduleWaitThread(tso
,ret
,cap
);
478 * rts_inCall() is similar to rts_evalIO, but expects to be called as an incall,
479 * and is not expected to be called by user code directly.
481 void rts_inCall (/* inout */ Capability
**cap
,
482 /* in */ HaskellObj p
,
483 /* out */ HaskellObj
*ret
)
487 tso
= createStrictIOThread(*cap
, RtsFlags
.GcFlags
.initialStkSize
, p
);
488 if ((*cap
)->running_task
->preferred_capability
!= -1) {
489 // enabled_capabilities should not change between here and waitCapability()
490 ASSERT((*cap
)->no
== ((*cap
)->running_task
->preferred_capability
% enabled_capabilities
));
491 // we requested explicit affinity; don't move this thread from now on.
492 tso
->flags
|= TSO_LOCKED
;
494 scheduleWaitThread(tso
,ret
,cap
);
498 * rts_evalStableIOMain() is suitable for calling main Haskell thread
499 * stored in (StablePtr (IO a)) it calls rts_evalStableIO but wraps
500 * function in GHC.TopHandler.runMainIO that installs top_handlers.
503 void rts_evalStableIOMain(/* inout */ Capability
**cap
,
504 /* in */ HsStablePtr s
,
505 /* out */ HsStablePtr
*ret
)
508 StgClosure
*p
, *r
, *w
;
509 SchedulerStatus stat
;
511 p
= (StgClosure
*)deRefStablePtr(s
);
512 w
= rts_apply(*cap
, &ghczminternal_GHCziInternalziTopHandler_runMainIO_closure
, p
);
513 tso
= createStrictIOThread(*cap
, RtsFlags
.GcFlags
.initialStkSize
, w
);
514 // async exceptions are always blocked by default in the created
515 // thread. See #1048.
516 tso
->flags
|= TSO_BLOCKEX
| TSO_INTERRUPTIBLE
;
517 scheduleWaitThread(tso
,&r
,cap
);
518 stat
= rts_getSchedStatus(*cap
);
520 if (stat
== Success
&& ret
!= NULL
) {
522 *ret
= getStablePtr((StgPtr
)r
);
527 * rts_evalStableIO() is suitable for calling from Haskell. It
528 * evaluates a value of the form (StablePtr (IO a)), forcing the
529 * action's result to WHNF before returning. The result is returned
532 void rts_evalStableIO (/* inout */ Capability
**cap
,
533 /* in */ HsStablePtr s
,
534 /* out */ HsStablePtr
*ret
)
538 SchedulerStatus stat
;
540 p
= (StgClosure
*)deRefStablePtr(s
);
541 tso
= createStrictIOThread(*cap
, RtsFlags
.GcFlags
.initialStkSize
, p
);
542 // async exceptions are always blocked by default in the created
543 // thread. See #1048.
544 tso
->flags
|= TSO_BLOCKEX
| TSO_INTERRUPTIBLE
;
545 scheduleWaitThread(tso
,&r
,cap
);
546 stat
= rts_getSchedStatus(*cap
);
548 if (stat
== Success
&& ret
!= NULL
) {
550 *ret
= getStablePtr((StgPtr
)r
);
555 * Like rts_evalIO(), but doesn't force the action's result.
557 void rts_evalLazyIO (/* inout */ Capability
**cap
,
558 /* in */ HaskellObj p
,
559 /* out */ HaskellObj
*ret
)
563 tso
= createIOThread(*cap
, RtsFlags
.GcFlags
.initialStkSize
, p
);
564 scheduleWaitThread(tso
,ret
,cap
);
567 void rts_evalLazyIO_ (/* inout */ Capability
**cap
,
568 /* in */ HaskellObj p
,
569 /* in */ unsigned int stack_size
,
570 /* out */ HaskellObj
*ret
)
574 tso
= createIOThread(*cap
, stack_size
, p
);
575 scheduleWaitThread(tso
,ret
,cap
);
578 /* Convenience function for decoding the returned status. */
581 rts_checkSchedStatus (char* site
, Capability
*cap
)
583 SchedulerStatus rc
= cap
->running_task
->incall
->rstat
;
588 errorBelch("%s: uncaught exception",site
);
589 stg_exit(EXIT_FAILURE
);
591 errorBelch("%s: interrupted", site
);
592 #if defined(THREADED_RTS)
593 // The RTS is shutting down, and the process will probably
594 // soon exit. We don't want to preempt the shutdown
595 // by exiting the whole process here, so we just terminate the
596 // current thread. Don't forget to release the cap first though.
600 stg_exit(EXIT_FAILURE
);
603 errorBelch("%s: Return code (%d) not ok",(site
),(rc
));
604 stg_exit(EXIT_FAILURE
);
609 rts_getSchedStatus (Capability
*cap
)
611 return cap
->running_task
->incall
->rstat
;
614 #if defined(THREADED_RTS)
615 // The task that paused the RTS. The rts_pausing_task variable is owned by the
616 // task that owns all capabilities (there is at most one such task).
618 // It's possible to remove this and instead define the pausing task as whichever
619 // task owns all capabilities, but using `rts_pausing_task` leads to marginally
620 // cleaner code/API and better error messages.
621 Task
* rts_pausing_task
= NULL
;
630 // Bound the current task. This is necessary to support rts_eval* functions.
631 task
= newBoundTask();
633 if (task
->running_finalizers
) {
634 errorBelch("error: a C finalizer called back into Haskell.\n"
635 " This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n"
636 " To create finalizers that may call back into Haskell, use\n"
637 " Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr.");
638 stg_exit(EXIT_FAILURE
);
641 #if defined(THREADED_RTS)
642 if (rts_pausing_task
== task
) {
643 errorBelch("error: rts_lock: The RTS is already paused by this thread.\n"
644 " There is no need to call rts_lock if you have already called rts_pause.");
645 stg_exit(EXIT_FAILURE
);
650 waitForCapability(&cap
, task
);
652 if (task
->incall
->prev_stack
== NULL
) {
653 // This is a new outermost call from C into Haskell land.
654 // Until the corresponding call to rts_unlock, this task
655 // is doing work on behalf of the RTS.
656 traceTaskCreate(task
, cap
);
659 return (Capability
*)cap
;
662 // Exiting the RTS: we hold a Capability that is not necessarily the
663 // same one that was originally returned by rts_lock(), because
664 // rts_evalIO() etc. may return a new one. Now that we have
665 // investigated the return value, we can release the Capability,
666 // and free the Task (in that order).
669 rts_unlock (Capability
*cap
)
673 task
= cap
->running_task
;
674 ASSERT_FULL_CAPABILITY_INVARIANTS(cap
,task
);
676 // Now release the Capability. With the capability released, GC
677 // may happen. NB. does not try to put the current Task on the
679 // NB. keep cap->lock held while we call exitMyTask(). This
680 // is necessary during shutdown, where we want the invariant that
681 // after shutdownCapability(), all the Tasks associated with the
682 // Capability have completed their shutdown too. Otherwise we
683 // could have exitMyTask()/workerTaskStop() running at some
684 // random point in the future, which causes problems for
685 // freeTaskManager().
686 ACQUIRE_LOCK(&cap
->lock
);
687 releaseCapability_(cap
,false);
689 // Finally, we can release the Task to the free list.
691 RELEASE_LOCK(&cap
->lock
);
693 if (task
->incall
== NULL
) {
694 // This is the end of an outermost call from C into Haskell land.
695 // From here on, the task goes back to C land and we should not count
696 // it as doing work on behalf of the RTS.
697 traceTaskDelete(task
);
702 Capability
*capability
;
705 Capability
*pauseTokenCapability(PauseToken
*pauseToken
) {
706 return pauseToken
->capability
;
709 #if defined(THREADED_RTS)
711 // See Note [Locking and Pausing the RTS]
712 PauseToken
*rts_pause (void)
715 // Wait for any nonmoving collection to finish before pausing the RTS.
716 // The nonmoving collector needs to synchronise with the mutator,
717 // so pausing the mutator while a collection is ongoing might lead to deadlock or
718 // capabilities being prematurely re-awoken.
719 if (RtsFlags
.GcFlags
.useNonmoving
) {
720 nonmovingBlockConcurrentMark(true);
724 // It is an error if this thread already paused the RTS. If another
725 // thread has paused the RTS, then rts_pause will block until rts_resume is
726 // called (and compete with other threads calling rts_pause). The blocking
727 // behavior is implied by the use of `stopAllCapabilities`.
728 Task
* task
= getMyTask();
729 if (rts_pausing_task
== task
)
731 // This task already passed the RTS.
732 errorBelch("error: rts_pause: This thread has already paused the RTS.");
733 stg_exit(EXIT_FAILURE
);
736 // The current task must not own a capability. This is true for non-worker
737 // threads e.g. when making a safe FFI call. We allow pausing when
738 // `task->cap->running_task != task` because the capability can be taken by
739 // other capabilities. Doing this check is justified because rts_pause is a
740 // user facing function and we want good error reporting. We also don't
741 // expect rts_pause to be performance critical.
743 // N.B. we use a relaxed load since there is no easy way to synchronize
744 // here and this check is ultimately just a convenience for the user..
745 if (task
->cap
&& RELAXED_LOAD(&task
->cap
->running_task
) == task
)
747 // This task owns a capability (and it can't be taken by other capabilities).
748 errorBelch(task
->cap
->in_haskell
749 ? ("error: rts_pause: attempting to pause via an unsafe FFI call.\n"
750 " Perhaps a 'foreign import unsafe' should be 'safe'?")
751 : ("error: rts_pause: attempting to pause from a Task that owns a capability.\n"
752 " Have you already acquired a capability e.g. with rts_lock?"));
753 stg_exit(EXIT_FAILURE
);
756 // Bound the current task. This is necessary to support rts_eval* functions.
757 task
= newBoundTask();
758 stopAllCapabilities(NULL
, task
);
760 // Now we own all capabilities so we own rts_pausing_task and may set it.
761 rts_pausing_task
= task
;
763 PauseToken
*token
= stgMallocBytes(sizeof(PauseToken
), "rts_pause");
764 token
->capability
= task
->cap
;
768 static void assert_isPausedOnMyTask(const char *functionName
);
770 // See Note [Locking and Pausing the RTS]. The pauseToken argument is here just
771 // for symmetry with rts_pause and to match the pattern of rts_lock/rts_unlock.
772 void rts_resume (PauseToken
*pauseToken
)
774 assert_isPausedOnMyTask("rts_resume");
775 Task
* task
= getMyTask();
777 // Now we own all capabilities so we own rts_pausing_task and may write to
779 rts_pausing_task
= NULL
;
781 // releaseAllCapabilities will not block because the current task owns all
783 releaseAllCapabilities(getNumCapabilities(), NULL
, task
);
787 if (RtsFlags
.GcFlags
.useNonmoving
) {
788 nonmovingUnblockConcurrentMark();
793 bool rts_isPaused(void)
795 return rts_pausing_task
!= NULL
;
798 // Check that the rts_pause was called on this thread/task and this thread owns
799 // all capabilities. If not, outputs an error and exits with EXIT_FAILURE.
800 static void assert_isPausedOnMyTask(const char *functionName
)
802 Task
* task
= getMyTask();
803 if (rts_pausing_task
== NULL
)
806 "error: %s: the rts is not paused. Did you forget to call rts_pause?",
808 stg_exit(EXIT_FAILURE
);
811 if (task
!= rts_pausing_task
)
813 // We don't have ownership of rts_pausing_task, so it may have changed
814 // just after the above read. Still, we are guaranteed that
815 // rts_pausing_task won't be set to the current task (because the
816 // current task is here now!), so the error messages are still correct.
818 "error: %s: called from a different OS thread than rts_pause.",
821 stg_exit(EXIT_FAILURE
);
824 // Check that we own all capabilities.
825 for (unsigned int i
= 0; i
< getNumCapabilities(); i
++)
827 Capability
*cap
= getCapability(i
);
828 if (cap
->running_task
!= task
)
831 "error: %s: the pausing thread does not own all capabilities.\n"
832 " Have you manually released a capability after calling rts_pause?",
834 stg_exit(EXIT_FAILURE
);
840 void rts_listThreads(ListThreadsCb cb
, void *user
)
842 assert_isPausedOnMyTask("rts_listThreads");
844 // The rts is paused and can only be resumed by the current thread. Hence it
845 // is safe to read global thread data.
847 for (uint32_t g
=0; g
< RtsFlags
.GcFlags
.generations
; g
++) {
848 StgTSO
*tso
= generations
[g
].threads
;
849 while (tso
!= END_TSO_QUEUE
) {
851 tso
= tso
->global_link
;
856 struct list_roots_ctx
{
861 // This is an evac_fn.
862 static void list_roots_helper(void *user
, StgClosure
**p
) {
863 struct list_roots_ctx
*ctx
= (struct list_roots_ctx
*) user
;
864 ctx
->cb(ctx
->user
, *p
);
868 void rts_listMiscRoots (ListRootsCb cb
, void *user
)
870 assert_isPausedOnMyTask("rts_listMiscRoots");
872 struct list_roots_ctx ctx
;
876 threadStableNameTable(&list_roots_helper
, (void *)&ctx
);
877 threadStablePtrTable(&list_roots_helper
, (void *)&ctx
);
881 PauseToken STG_NORETURN
884 errorBelch("Warning: Pausing the RTS is only possible for "
885 "multithreaded RTS.");
886 stg_exit(EXIT_FAILURE
);
890 rts_resume (PauseToken
*pauseToken STG_UNUSED
)
892 errorBelch("Warning: Resuming the RTS is only possible for "
893 "multithreaded RTS.");
894 stg_exit(EXIT_FAILURE
);
897 bool rts_isPaused(void)
899 errorBelch("Warning: Pausing/Resuming the RTS is only possible for "
900 "multithreaded RTS.");
905 void rts_listThreads(ListThreadsCb cb STG_UNUSED
, void *user STG_UNUSED
)
907 errorBelch("Warning: rts_listThreads is only possible for multithreaded RTS.");
911 void rts_listMiscRoots (ListRootsCb cb STG_UNUSED
, void *user STG_UNUSED
)
913 errorBelch("Warning: rts_listMiscRoots is only possible for multithreaded RTS.");
922 /* -----------------------------------------------------------------------------
923 tryPutMVar from outside Haskell
927 hs_try_putmvar(cap, mvar)
929 is equivalent to the Haskell call
935 * non-blocking: takes a bounded, short, amount of time
936 * asynchronous: the actual putMVar may be performed after the
937 call returns. That's why hs_try_putmvar() doesn't return a
938 result to say whether the put succeeded.
940 NOTE: this call transfers ownership of the StablePtr to the RTS, which will
941 free it after the tryPutMVar has taken place. The reason is that otherwise,
942 it would be very difficult for the caller to arrange to free the StablePtr
943 in all circumstances.
945 For more details, see the section "Waking up Haskell threads from C" in the
947 -------------------------------------------------------------------------- */
949 void hs_try_putmvar (/* in */ int capability
,
950 /* in */ HsStablePtr mvar
)
952 Task
*task
= getMyTask();
954 Capability
*task_old_cap USED_IF_THREADS
;
956 if (capability
< 0) {
957 capability
= task
->preferred_capability
;
958 if (capability
< 0) {
962 cap
= getCapability(capability
% enabled_capabilities
);
964 #if !defined(THREADED_RTS)
966 performTryPutMVar(cap
, (StgMVar
*)deRefStablePtr(mvar
), Unit_closure
);
971 ACQUIRE_LOCK(&cap
->lock
);
972 // If the capability is free, we can perform the tryPutMVar immediately
973 if (cap
->running_task
== NULL
) {
974 cap
->running_task
= task
;
975 task_old_cap
= task
->cap
;
977 RELEASE_LOCK(&cap
->lock
);
979 performTryPutMVar(cap
, (StgMVar
*)deRefStablePtr(mvar
), Unit_closure
);
983 // Wake up the capability, which will start running the thread that we
984 // just awoke (if there was one).
985 releaseCapability(cap
);
986 task
->cap
= task_old_cap
;
988 PutMVar
*p
= stgMallocBytes(sizeof(PutMVar
),"hs_try_putmvar");
989 // We cannot deref the StablePtr if we don't have a capability,
990 // so we have to store it and deref it later.
992 p
->link
= cap
->putMVars
;
994 RELEASE_LOCK(&cap
->lock
);