Fixed some C/C++ compiler errors due to stricter checks.
[rubinius.git] / machine / thread_state.hpp
blobae7cd7ff5c927753e2aba3f8a2c611cc2c536eba
1 #ifndef RBX_STATE_HPP
2 #define RBX_STATE_HPP
4 #include "missing/time.h"
6 #include "diagnostics.hpp"
7 #include "globals.hpp"
8 #include "type_info.hpp"
9 #include "unwind_info.hpp"
11 #include "memory/root.hpp"
12 #include "memory/root_buffer.hpp"
13 #include "memory/thca.hpp"
14 #include "memory/variable_buffer.hpp"
16 #include "sodium/randombytes.h"
18 #include <atomic>
19 #include <chrono>
20 #include <condition_variable>
21 #include <functional>
22 #include <mutex>
23 #include <regex>
24 #include <setjmp.h>
25 #include <stdint.h>
26 #include <string.h>
27 #include <string>
28 #include <thread>
29 #include <vector>
31 namespace rubinius {
32 struct CallFrame;
34 class Fiber;
35 class NativeMethodEnvironment;
36 class VariableScope;
37 class C_API;
38 class Channel;
39 class Class;
40 class Console;
41 class Environment;
42 class Exception;
43 class Object;
44 class Machine;
45 class MachineState;
46 class Memory;
47 class Park;
48 class Profiler;
49 class Signals;
50 class String;
51 class Symbol;
52 class Threads;
53 class UnwindState;
55 namespace memory {
56 class Collector;
59 enum MethodMissingReason {
60 eNone,
61 ePrivate,
62 eProtected,
63 eSuper,
64 eVCall,
65 eNormal
68 enum ConstantMissingReason {
69 vFound,
70 vPrivate,
71 vNonExistent
74 class ThreadState {
75 public:
76 const static uint64_t cLockLimit = 5000000000;
78 enum Kind {
79 eThread,
80 eFiber,
81 eSystem
84 enum Phase {
85 eManaged = 0x01,
86 eUnmanaged = 0x81,
87 eWaiting = 0x82,
90 const static int cYieldingPhase = 0x80;
92 enum ThreadStatus {
93 eNoStatus = 0,
94 eRun,
95 eSleep,
96 eDead,
97 eException
100 // TODO: Thread
101 enum FiberTransition {
102 eSuspending,
103 eSuspended,
104 eResuming,
105 eRunning,
106 eCanceled,
107 eFinished
110 private:
111 jmp_buf thread_unwind_;
112 bool thread_unwinding_;
113 memory::Roots roots_;
114 std::string name_;
115 memory::VariableRootBuffers variable_root_buffers_;
116 memory::RootBuffers root_buffers_;
117 Kind kind_;
118 diagnostics::MachineMetrics* metrics_;
120 protected:
121 pthread_t os_thread_;
122 uint32_t id_;
124 friend class ThreadState;
126 private:
127 static const int cWaitLimit = 100;
129 Machine* _machine_;
131 UnwindInfoSet unwinds_;
133 CallFrame* call_frame_;
134 memory::THCA* thca_;
136 int8_t* stack_start_;
137 int8_t* stack_barrier_start_;
138 int8_t* stack_barrier_end_;
140 size_t stack_size_;
141 size_t stack_cushion_;
142 ssize_t stack_probe_;
144 bool interrupt_with_signal_;
145 bool interrupt_by_kill_;
147 std::atomic<bool> should_wakeup_;
148 std::atomic<ThreadStatus> thread_status_;
150 std::mutex lock_;
151 std::mutex thread_lock_;
152 std::mutex sleep_lock_;
153 std::condition_variable sleep_cond_;
154 std::mutex join_lock_;
155 std::condition_variable join_cond_;
156 std::mutex fiber_mutex_;
158 std::mutex fiber_wait_mutex_;
159 std::condition_variable fiber_wait_condition_;
161 std::atomic<FiberTransition> fiber_transition_flag_;
163 MethodMissingReason method_missing_reason_;
164 ConstantMissingReason constant_missing_reason_;
166 bool main_thread_;
168 std::atomic<Phase> thread_phase_;
170 uint64_t sample_interval_;
171 uint64_t sample_counter_;
173 bool checkpoint_;
175 diagnostics::metric checkpoints_;
176 diagnostics::metric stops_;
178 public:
179 /* Data members */
180 Channel* waiting_channel_;
181 Exception* interrupted_exception_;
182 /// The Thread object for this VM state
183 Thread* thread_;
184 Fiber* fiber_;
186 /// Object that waits for inflation
187 Object* waiting_object_;
189 uint64_t start_time_;
191 NativeMethodEnvironment* native_method_environment;
193 void (*custom_wakeup_)(void*);
194 void* custom_wakeup_data_;
196 UnwindState* unwind_state_;
198 public:
199 ThreadState(uint32_t id, Machine* m, const char* name = nullptr);
200 ~ThreadState();
202 MachineState* const machine_state();
204 Machine* const machine() {
205 return _machine_;
208 jmp_buf& get_thread_unwind() {
209 return thread_unwind_;
212 bool thread_unwinding_p() {
213 return thread_unwinding_;
216 Configuration* const configuration();
217 Environment* const environment();
218 Threads* const threads();
219 Diagnostics* const diagnostics();
220 memory::Collector* const collector();
221 Signals* const signals();
222 Memory* const memory();
223 C_API* const c_api();
224 Profiler* const profiler();
225 Console* const console();
227 // -*-*-*
228 static ThreadState* current();
230 memory::Roots& roots() {
231 return roots_;
234 memory::VariableRootBuffers& variable_root_buffers() {
235 return variable_root_buffers_;
238 memory::RootBuffers& root_buffers() {
239 return root_buffers_;
242 const char* kind_name() const {
243 switch(kind_) {
244 case eThread:
245 return "Thread";
246 case eFiber:
247 return "Fiber";
248 case eSystem:
249 return "MachineThread";
252 /* GCC cannot determine that the above switch covers the enum and hence
253 * every exit from this function is covered.
255 return "unknown kind";
258 Kind kind() const {
259 return kind_;
262 void set_kind(Kind kind) {
263 kind_ = kind;
266 std::string name() const {
267 return name_;
270 void set_name(STATE, const char* name);
272 pthread_t& os_thread() {
273 return os_thread_;
276 diagnostics::MachineMetrics* metrics() {
277 return metrics_;
280 UnwindInfoSet& unwinds() {
281 return unwinds_;
284 uint32_t thread_id() const {
285 return id_;
288 const char* phase_name();
290 Phase thread_phase() {
291 return thread_phase_.load(std::memory_order_acquire);
294 void set_thread_phase(Phase thread_phase) {
295 thread_phase_.store(thread_phase, std::memory_order_release);
298 bool wakeup_p() {
299 return should_wakeup_;
302 void unset_wakeup() {
303 should_wakeup_ = false;
306 void set_wakeup() {
307 should_wakeup_ = true;
310 std::mutex& thread_lock() {
311 return thread_lock_;
314 std::mutex& sleep_lock() {
315 return sleep_lock_;
318 std::condition_variable& sleep_cond() {
319 return sleep_cond_;
322 std::mutex& join_lock() {
323 return join_lock_;
326 std::condition_variable& join_cond() {
327 return join_cond_;
330 std::mutex& fiber_mutex() {
331 return fiber_mutex_;
334 std::mutex& fiber_wait_mutex() {
335 return fiber_wait_mutex_;
338 std::condition_variable& fiber_wait_condition() {
339 return fiber_wait_condition_;
342 FiberTransition fiber_transition_flag() {
343 return fiber_transition_flag_;
346 bool suspending_p() const {
347 return fiber_transition_flag_ == eSuspending;
350 bool suspended_p() const {
351 return fiber_transition_flag_ == eSuspended;
354 bool resuming_p() const {
355 return fiber_transition_flag_ == eResuming;
358 bool running_p() const {
359 return fiber_transition_flag_ == eRunning;
362 bool canceled_p() const {
363 return fiber_transition_flag_ == eCanceled;
366 bool finished_p() const {
367 return fiber_transition_flag_ == eFinished;
370 void set_suspending() {
371 fiber_transition_flag_ = eSuspending;
374 void set_suspended() {
375 fiber_transition_flag_ = eSuspended;
378 void set_resuming() {
379 fiber_transition_flag_ = eResuming;
382 void set_running() {
383 fiber_transition_flag_ = eRunning;
386 void set_canceled() {
387 fiber_transition_flag_ = eCanceled;
390 void set_finished() {
391 fiber_transition_flag_ = eFinished;
394 void set_thread(Thread* thread);
395 void set_fiber(Fiber* fiber);
397 Thread* thread() {
398 return thread_;
401 Fiber* fiber() {
402 return fiber_;
405 ThreadStatus thread_status() {
406 return thread_status_;
409 void set_thread_run() {
410 thread_status_ = eRun;
413 void set_thread_sleep() {
414 thread_status_ = eSleep;
417 void set_thread_dead() {
418 thread_status_ = eDead;
421 void set_thread_exception() {
422 thread_status_ = eException;
425 bool sleeping_p() {
426 return thread_status_ == eSleep;
429 bool zombie_p() {
430 return thread_status_ == eDead || thread_status_ == eException;
433 void set_main_thread() {
434 main_thread_ = true;
437 bool main_thread_p() {
438 return main_thread_;
441 Object* allocate_object(STATE, intptr_t bytes, object_type type) {
442 return thca_->allocate(state, bytes, type);
446 bool limited_wait_for(std::function<bool ()> f) {
447 bool status = false;
449 // TODO: randomize wait interval
450 for(int i = 0; i < cWaitLimit && !(status = f()); i++) {
451 std::this_thread::sleep_for(std::chrono::microseconds(10));
454 return status;
457 void set_start_time();
458 double run_time();
460 void raise_stack_error(STATE);
462 void validate_stack_size(STATE, size_t size);
464 size_t stack_size() {
465 return stack_size_;
468 void set_stack_bounds(size_t size) {
469 int8_t stack_address;
471 stack_size_ = size - stack_cushion_;
472 stack_start_ = &stack_address;
474 // Determine the direction of the stack
475 set_stack_barrier();
478 void set_stack_barrier() {
479 int8_t barrier;
481 if(stack_start_ - &barrier < 0) {
482 // barrier = reinterpret_cast<int8_t*>(stack_start_ + stack_size_ - 2 * stack_cushion_);
483 stack_probe_ = stack_cushion_ / 2;
484 stack_barrier_start_ = stack_start_ + stack_size_ - 2 * stack_cushion_;
485 stack_barrier_end_ = stack_barrier_start_ + stack_cushion_;
486 } else {
487 // barrier = reinterpret_cast<void*>(ss - stack_size_ + stack_cushion_);
488 stack_probe_ = -(stack_cushion_ / 2);
489 stack_barrier_end_ = stack_start_ - stack_size_ + stack_cushion_;
490 stack_barrier_start_ = stack_barrier_end_ - stack_cushion_;
494 bool stack_limit_p(void* address) {
495 int8_t* probe = reinterpret_cast<int8_t*>(address) + stack_probe_;
497 return probe > stack_barrier_start_ && probe <= stack_barrier_end_;
500 bool check_stack(STATE, void* address) {
501 if(stack_limit_p(address)) {
502 raise_stack_error(state);
503 return false;
506 return true;
509 void set_previous_frame(CallFrame* frame);
511 bool push_call_frame(STATE, CallFrame* frame) {
512 if(!check_stack(state, frame)) return false;
514 set_previous_frame(frame);
515 call_frame_ = frame;
517 return true;
520 bool pop_call_frame(STATE, CallFrame* frame) {
521 call_frame_ = frame;
523 return !thread_interrupted_p();
526 bool thread_interrupted_p() {
527 return check_thread_raise_or_kill(this);
530 bool check_thread_raise_or_kill(STATE);
532 // Do NOT de-duplicate
533 void set_call_frame(CallFrame* frame) {
534 call_frame_ = frame;
537 CallFrame* call_frame() {
538 return call_frame_;
541 CallFrame* get_call_frame(ssize_t up=0);
542 CallFrame* get_ruby_frame(ssize_t up=0);
543 CallFrame* get_variables_frame(ssize_t up=0);
544 CallFrame* get_scope_frame(ssize_t up=0);
545 CallFrame* get_noncore_frame(STATE);
546 CallFrame* get_filtered_frame(STATE, const std::regex& filter);
548 bool scope_valid_p(VariableScope* scope);
550 Globals& globals();
552 MethodMissingReason method_missing_reason() const {
553 return method_missing_reason_;
556 void set_method_missing_reason(MethodMissingReason reason) {
557 method_missing_reason_ = reason;
560 ConstantMissingReason constant_missing_reason() const {
561 return constant_missing_reason_;
564 void set_constant_missing_reason(ConstantMissingReason reason) {
565 constant_missing_reason_ = reason;
568 void after_fork_child();
570 bool interrupt_by_kill() const {
571 return interrupt_by_kill_;
574 void clear_interrupt_by_kill() {
575 interrupt_by_kill_ = false;
578 void set_interrupt_by_kill() {
579 interrupt_by_kill_ = true;
582 Exception* interrupted_exception() const {
583 return interrupted_exception_;
586 void clear_interrupted_exception();
588 memory::VariableRootBuffers& current_root_buffers();
590 public:
591 void discard();
593 void bootstrap_class(STATE);
594 void bootstrap_ontology(STATE);
595 void bootstrap_symbol(STATE);
597 void sample(STATE);
599 #define RBX_PROFILE_MAX_SHIFT 0xf
600 #define RBX_PROFILE_MAX_INTERVAL 0x1fff
602 void set_sample_interval() {
603 sample_interval_ = randombytes_random();
604 sample_interval_ >>= (sample_interval_ & RBX_PROFILE_MAX_SHIFT);
605 sample_interval_ &= RBX_PROFILE_MAX_INTERVAL;
606 sample_counter_ = 0;
609 void set_checkpoint() {
610 checkpoint_ = true;
613 void checkpoint(STATE) {
614 if(!checkpoint_) return;
616 ++checkpoints_;
618 if(check_stop()) {
619 ++stops_;
620 checkpoint_ = false;
623 // TODO: profiler
624 if(sample_counter_++ >= sample_interval_) {
625 sample(state);
626 set_sample_interval();
630 #define SET_THREAD_UNWIND(ts) /*ep.file = __FILE__; ep.line = __LINE__; */ _setjmp(ts->get_thread_unwind())
632 void halt_thread();
634 void managed_phase();
635 void unmanaged_phase();
637 void set_current_thread();
639 void setup_errno(STATE, int num, const char* name, Class* sce, Module* ern);
640 void bootstrap_exceptions(STATE);
641 void initialize_fundamental_constants(STATE);
642 void initialize_builtin_classes(STATE);
643 void initialize_platform_data(STATE);
644 Object* ruby_lib_version();
646 static void init_ffi(STATE);
648 void raise_from_errno(const char* reason);
649 void raise_exception(Exception* exc);
650 Exception* new_exception(Class* cls, const char* msg);
651 Object* current_block();
653 Object* path2class(STATE, const char* name);
655 void wait_on_channel(Channel* channel);
656 void wait_on_custom_function(STATE, void (*func)(void*), void* data);
657 void clear_waiter();
659 void sleep(Object* duration);
660 bool wakeup();
662 void interrupt_with_signal() {
663 interrupt_with_signal_ = true;
666 void register_raise(STATE, Exception* exc);
667 void register_kill(STATE);
669 void visit_objects(STATE, std::function<void (STATE, Object**)> f);
670 void trace_objects(STATE, std::function<void (STATE, Object**)> f);
672 // -*-*-*
674 Symbol* const symbol(const char* str);
675 Symbol* const symbol(const char* str, size_t len);
676 Symbol* const symbol(std::string str);
677 Symbol* const symbol(String* str);
679 const uint32_t hash_seed();
681 UnwindState* unwind_state();
683 void raise_stack_error();
685 // TODO ThreadNexus
686 bool stop_p();
687 bool set_stop();
688 void unset_stop();
689 bool halt_p();
690 void set_halt();
692 bool try_managed_phase();
693 void waiting_phase();
695 void set_managed();
697 bool valid_thread_p(unsigned int thread_id);
699 #ifdef RBX_GC_STACK_CHECK
700 void check_stack(STATE);
701 #endif
703 bool yielding_p();
705 void yield();
707 uint64_t wait();
708 void wait_for_all();
710 bool lock_owned_p();
712 bool try_lock();
713 bool try_lock_wait();
715 bool check_stop() {
716 if(!can_stop_p()) return false;
718 while(stop_p()) {
719 yield();
721 return true;
724 return false;
727 bool can_stop_p();
729 void stop() {
730 while(set_stop()) {
731 if(try_lock_wait()) {
732 wait_for_all();
734 return;
739 void halt() {
740 set_halt();
741 stop();
742 unset_stop();
743 unlock();
746 void lock(std::function<void ()> process) {
747 lock();
748 process();
749 unlock();
752 void lock() {
753 try_lock_wait();
756 bool try_lock(std::function<void ()> process) {
757 if(try_lock()) {
758 process();
759 unlock();
760 return true;
761 } else {
762 return false;
766 void unlock();
768 void detect_deadlock(uint64_t nanoseconds);
769 void detect_deadlock(uint64_t nanoseconds, ThreadState* thread_state);
771 // TODO ThreadNexus end
775 #endif