2 * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2015, Rene Gollent, rene@gollent.com.
4 * Distributed under the terms of the MIT License.
16 #include <arch/debug.h>
17 #include <arch/user_debugger.h>
18 #include <core_dump.h>
22 #include <KernelExport.h>
23 #include <kscheduler.h>
25 #include <ksyscalls.h>
30 #include <thread_types.h>
31 #include <user_debugger.h>
33 #include <vm/vm_types.h>
35 #include <AutoDeleter.h>
36 #include <util/AutoLock.h>
38 #include "BreakpointManager.h"
41 //#define TRACE_USER_DEBUGGER
42 #ifdef TRACE_USER_DEBUGGER
43 # define TRACE(x) dprintf x
49 // TODO: Since the introduction of team_debug_info::debugger_changed_condition
50 // there's some potential for simplifications. E.g. clear_team_debug_info() and
51 // destroy_team_debug_info() are now only used in nub_thread_cleanup() (plus
52 // arch_clear_team_debug_info() in install_team_debugger_init_debug_infos()).
55 static port_id sDefaultDebuggerPort
= -1;
56 // accessed atomically
58 static timer sProfilingTimers
[SMP_MAX_CPUS
];
59 // a profiling timer for each CPU -- used when a profiled thread is running
63 static void schedule_profiling_timer(Thread
* thread
, bigtime_t interval
);
64 static int32
profiling_event(timer
* unused
);
65 static status_t
ensure_debugger_installed();
66 static void get_team_debug_info(team_debug_info
&teamDebugInfo
);
69 static inline status_t
70 kill_interruptable_write_port(port_id port
, int32 code
, const void *buffer
,
73 return write_port_etc(port
, code
, buffer
, bufferSize
, B_KILL_CAN_INTERRUPT
,
79 debugger_write(port_id port
, int32 code
, const void *buffer
, size_t bufferSize
,
82 TRACE(("debugger_write(): thread: %" B_PRId32
", team %" B_PRId32
", "
83 "port: %" B_PRId32
", code: %" B_PRIx32
", message: %p, size: %lu, "
84 "dontWait: %d\n", thread_get_current_thread()->id
,
85 thread_get_current_thread()->team
->id
, port
, code
, buffer
, bufferSize
,
88 status_t error
= B_OK
;
90 // get the team debug info
91 team_debug_info teamDebugInfo
;
92 get_team_debug_info(teamDebugInfo
);
93 sem_id writeLock
= teamDebugInfo
.debugger_write_lock
;
96 TRACE(("debugger_write(): acquiring write lock...\n"));
97 error
= acquire_sem_etc(writeLock
, 1,
98 dontWait
? (uint32
)B_RELATIVE_TIMEOUT
: (uint32
)B_KILL_CAN_INTERRUPT
, 0);
100 TRACE(("debugger_write() done1: %" B_PRIx32
"\n", error
));
104 // re-get the team debug info
105 get_team_debug_info(teamDebugInfo
);
107 if (teamDebugInfo
.debugger_port
!= port
108 || (teamDebugInfo
.flags
& B_TEAM_DEBUG_DEBUGGER_HANDOVER
)) {
109 // The debugger has changed in the meantime or we are about to be
110 // handed over to a new debugger. In either case we don't send the
112 TRACE(("debugger_write(): %s\n",
113 (teamDebugInfo
.debugger_port
!= port
? "debugger port changed"
114 : "handover flag set")));
116 TRACE(("debugger_write(): writing to port...\n"));
118 error
= write_port_etc(port
, code
, buffer
, bufferSize
,
119 dontWait
? (uint32
)B_RELATIVE_TIMEOUT
: (uint32
)B_KILL_CAN_INTERRUPT
, 0);
122 // release the write lock
123 release_sem(writeLock
);
125 TRACE(("debugger_write() done: %" B_PRIx32
"\n", error
));
131 /*! Updates the thread::flags field according to what user debugger flags are
133 Interrupts must be disabled and the thread's debug info lock must be held.
136 update_thread_user_debug_flag(Thread
* thread
)
138 if ((atomic_get(&thread
->debug_info
.flags
) & B_THREAD_DEBUG_STOP
) != 0)
139 atomic_or(&thread
->flags
, THREAD_FLAGS_DEBUG_THREAD
);
141 atomic_and(&thread
->flags
, ~THREAD_FLAGS_DEBUG_THREAD
);
145 /*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
147 Interrupts must be disabled and the thread debug info lock must be held.
150 update_thread_breakpoints_flag(Thread
* thread
)
152 Team
* team
= thread
->team
;
154 if (arch_has_breakpoints(&team
->debug_info
.arch_info
))
155 atomic_or(&thread
->flags
, THREAD_FLAGS_BREAKPOINTS_DEFINED
);
157 atomic_and(&thread
->flags
, ~THREAD_FLAGS_BREAKPOINTS_DEFINED
);
161 /*! Updates the Thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
162 threads of the current team.
165 update_threads_breakpoints_flag()
167 Team
* team
= thread_get_current_thread()->team
;
169 TeamLocker
teamLocker(team
);
171 Thread
* thread
= team
->thread_list
;
173 if (arch_has_breakpoints(&team
->debug_info
.arch_info
)) {
174 for (; thread
!= NULL
; thread
= thread
->team_next
)
175 atomic_or(&thread
->flags
, THREAD_FLAGS_BREAKPOINTS_DEFINED
);
177 for (; thread
!= NULL
; thread
= thread
->team_next
)
178 atomic_and(&thread
->flags
, ~THREAD_FLAGS_BREAKPOINTS_DEFINED
);
183 /*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
184 given thread, which must be the current thread.
187 update_thread_debugger_installed_flag(Thread
* thread
)
189 Team
* team
= thread
->team
;
191 if (atomic_get(&team
->debug_info
.flags
) & B_TEAM_DEBUG_DEBUGGER_INSTALLED
)
192 atomic_or(&thread
->flags
, THREAD_FLAGS_DEBUGGER_INSTALLED
);
194 atomic_and(&thread
->flags
, ~THREAD_FLAGS_DEBUGGER_INSTALLED
);
198 /*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
199 threads of the given team.
200 The team's lock must be held.
203 update_threads_debugger_installed_flag(Team
* team
)
205 Thread
* thread
= team
->thread_list
;
207 if (atomic_get(&team
->debug_info
.flags
) & B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
208 for (; thread
!= NULL
; thread
= thread
->team_next
)
209 atomic_or(&thread
->flags
, THREAD_FLAGS_DEBUGGER_INSTALLED
);
211 for (; thread
!= NULL
; thread
= thread
->team_next
)
212 atomic_and(&thread
->flags
, ~THREAD_FLAGS_DEBUGGER_INSTALLED
);
218 * For the first initialization the function must be called with \a initLock
219 * set to \c true. If it would be possible that another thread accesses the
220 * structure at the same time, `lock' must be held when calling the function.
223 clear_team_debug_info(struct team_debug_info
*info
, bool initLock
)
226 arch_clear_team_debug_info(&info
->arch_info
);
227 atomic_set(&info
->flags
, B_TEAM_DEBUG_DEFAULT_FLAGS
);
228 info
->debugger_team
= -1;
229 info
->debugger_port
= -1;
230 info
->nub_thread
= -1;
232 info
->debugger_write_lock
= -1;
233 info
->causing_thread
= -1;
234 info
->image_event
= 0;
235 info
->breakpoint_manager
= NULL
;
238 B_INITIALIZE_SPINLOCK(&info
->lock
);
239 info
->debugger_changed_condition
= NULL
;
245 * `lock' must not be held nor may interrupts be disabled.
246 * \a info must not be a member of a team struct (or the team struct must no
247 * longer be accessible, i.e. the team should already be removed).
249 * In case the team is still accessible, the procedure is:
251 * 2. copy the team debug info on stack
252 * 3. call clear_team_debug_info() on the team debug info
254 * 5. call destroy_team_debug_info() on the copied team debug info
257 destroy_team_debug_info(struct team_debug_info
*info
)
260 arch_destroy_team_debug_info(&info
->arch_info
);
262 // delete the breakpoint manager
263 delete info
->breakpoint_manager
;
264 info
->breakpoint_manager
= NULL
;
266 // delete the debugger port write lock
267 if (info
->debugger_write_lock
>= 0) {
268 delete_sem(info
->debugger_write_lock
);
269 info
->debugger_write_lock
= -1;
272 // delete the nub port
273 if (info
->nub_port
>= 0) {
274 set_port_owner(info
->nub_port
, B_CURRENT_TEAM
);
275 delete_port(info
->nub_port
);
279 // wait for the nub thread
280 if (info
->nub_thread
>= 0) {
281 if (info
->nub_thread
!= thread_get_current_thread()->id
) {
283 wait_for_thread(info
->nub_thread
, &result
);
286 info
->nub_thread
= -1;
289 atomic_set(&info
->flags
, 0);
290 info
->debugger_team
= -1;
291 info
->debugger_port
= -1;
292 info
->causing_thread
= -1;
293 info
->image_event
= -1;
299 init_thread_debug_info(struct thread_debug_info
*info
)
302 B_INITIALIZE_SPINLOCK(&info
->lock
);
303 arch_clear_thread_debug_info(&info
->arch_info
);
304 info
->flags
= B_THREAD_DEBUG_DEFAULT_FLAGS
;
305 info
->debug_port
= -1;
306 info
->ignore_signals
= 0;
307 info
->ignore_signals_once
= 0;
308 info
->profile
.sample_area
= -1;
309 info
->profile
.samples
= NULL
;
310 info
->profile
.buffer_full
= false;
311 info
->profile
.installed_timer
= NULL
;
316 /*! Clears the debug info for the current thread.
317 Invoked with thread debug info lock being held.
320 clear_thread_debug_info(struct thread_debug_info
*info
, bool dying
)
323 // cancel profiling timer
324 if (info
->profile
.installed_timer
!= NULL
) {
325 cancel_timer(info
->profile
.installed_timer
);
326 info
->profile
.installed_timer
= NULL
;
329 arch_clear_thread_debug_info(&info
->arch_info
);
330 atomic_set(&info
->flags
,
331 B_THREAD_DEBUG_DEFAULT_FLAGS
| (dying
? B_THREAD_DEBUG_DYING
: 0));
332 info
->debug_port
= -1;
333 info
->ignore_signals
= 0;
334 info
->ignore_signals_once
= 0;
335 info
->profile
.sample_area
= -1;
336 info
->profile
.samples
= NULL
;
337 info
->profile
.buffer_full
= false;
343 destroy_thread_debug_info(struct thread_debug_info
*info
)
346 area_id sampleArea
= info
->profile
.sample_area
;
347 if (sampleArea
>= 0) {
349 if (get_area_info(sampleArea
, &areaInfo
) == B_OK
) {
350 unlock_memory(areaInfo
.address
, areaInfo
.size
, B_READ_DEVICE
);
351 delete_area(sampleArea
);
355 arch_destroy_thread_debug_info(&info
->arch_info
);
357 if (info
->debug_port
>= 0) {
358 delete_port(info
->debug_port
);
359 info
->debug_port
= -1;
362 info
->ignore_signals
= 0;
363 info
->ignore_signals_once
= 0;
365 atomic_set(&info
->flags
, 0);
371 prepare_debugger_change(team_id teamID
, ConditionVariable
& condition
,
374 // We look up the team by ID, even in case of the current team, so we can be
375 // sure, that the team is not already dying.
376 if (teamID
== B_CURRENT_TEAM
)
377 teamID
= thread_get_current_thread()->team
->id
;
381 team
= Team::GetAndLock(teamID
);
383 return B_BAD_TEAM_ID
;
384 BReference
<Team
> teamReference(team
, true);
385 TeamLocker
teamLocker(team
, true);
387 // don't allow messing with the kernel team
388 if (team
== team_get_kernel_team())
389 return B_NOT_ALLOWED
;
391 // check whether the condition is already set
392 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
394 if (team
->debug_info
.debugger_changed_condition
== NULL
) {
395 // nobody there yet -- set our condition variable and be done
396 team
->debug_info
.debugger_changed_condition
= &condition
;
400 // we'll have to wait
401 ConditionVariableEntry entry
;
402 team
->debug_info
.debugger_changed_condition
->Add(&entry
);
404 debugInfoLocker
.Unlock();
413 prepare_debugger_change(Team
* team
, ConditionVariable
& condition
)
416 // check whether the condition is already set
417 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
419 if (team
->debug_info
.debugger_changed_condition
== NULL
) {
420 // nobody there yet -- set our condition variable and be done
421 team
->debug_info
.debugger_changed_condition
= &condition
;
425 // we'll have to wait
426 ConditionVariableEntry entry
;
427 team
->debug_info
.debugger_changed_condition
->Add(&entry
);
429 debugInfoLocker
.Unlock();
437 finish_debugger_change(Team
* team
)
439 // unset our condition variable and notify all threads waiting on it
440 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
442 ConditionVariable
* condition
= team
->debug_info
.debugger_changed_condition
;
443 team
->debug_info
.debugger_changed_condition
= NULL
;
445 condition
->NotifyAll();
450 user_debug_prepare_for_exec()
452 Thread
*thread
= thread_get_current_thread();
453 Team
*team
= thread
->team
;
455 // If a debugger is installed for the team and the thread debug stuff
456 // initialized, change the ownership of the debug port for the thread
457 // to the kernel team, since exec_team() deletes all ports owned by this
458 // team. We change the ownership back later.
459 if (atomic_get(&team
->debug_info
.flags
) & B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
461 port_id debugPort
= -1;
463 InterruptsSpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
465 if ((thread
->debug_info
.flags
& B_THREAD_DEBUG_INITIALIZED
) != 0)
466 debugPort
= thread
->debug_info
.debug_port
;
468 threadDebugInfoLocker
.Unlock();
470 // set the new port ownership
472 set_port_owner(debugPort
, team_get_kernel_team_id());
478 user_debug_finish_after_exec()
480 Thread
*thread
= thread_get_current_thread();
481 Team
*team
= thread
->team
;
483 // If a debugger is installed for the team and the thread debug stuff
484 // initialized for this thread, change the ownership of its debug port
485 // back to this team.
486 if (atomic_get(&team
->debug_info
.flags
) & B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
488 port_id debugPort
= -1;
490 InterruptsSpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
492 if (thread
->debug_info
.flags
& B_THREAD_DEBUG_INITIALIZED
)
493 debugPort
= thread
->debug_info
.debug_port
;
495 threadDebugInfoLocker
.Unlock();
497 // set the new port ownership
499 set_port_owner(debugPort
, team
->id
);
507 #ifdef ARCH_INIT_USER_DEBUG
508 ARCH_INIT_USER_DEBUG();
514 get_team_debug_info(team_debug_info
&teamDebugInfo
)
516 Thread
*thread
= thread_get_current_thread();
518 cpu_status state
= disable_interrupts();
519 GRAB_TEAM_DEBUG_INFO_LOCK(thread
->team
->debug_info
);
521 memcpy(&teamDebugInfo
, &thread
->team
->debug_info
, sizeof(team_debug_info
));
523 RELEASE_TEAM_DEBUG_INFO_LOCK(thread
->team
->debug_info
);
524 restore_interrupts(state
);
529 thread_hit_debug_event_internal(debug_debugger_message event
,
530 const void *message
, int32 size
, bool requireDebugger
, bool &restart
)
533 Thread
*thread
= thread_get_current_thread();
535 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32
", event: %" B_PRIu32
536 ", message: %p, size: %" B_PRId32
"\n", thread
->id
, (uint32
)event
,
539 // check, if there's a debug port already
540 bool setPort
= !(atomic_get(&thread
->debug_info
.flags
)
541 & B_THREAD_DEBUG_INITIALIZED
);
543 // create a port, if there is none yet
546 char nameBuffer
[128];
547 snprintf(nameBuffer
, sizeof(nameBuffer
), "nub to thread %" B_PRId32
,
550 port
= create_port(1, nameBuffer
);
552 dprintf("thread_hit_debug_event(): Failed to create debug port: "
553 "%s\n", strerror(port
));
558 // check the debug info structures once more: get the debugger port, set
559 // the thread's debug port, and update the thread's debug flags
560 port_id deletePort
= port
;
561 port_id debuggerPort
= -1;
562 port_id nubPort
= -1;
563 status_t error
= B_OK
;
564 cpu_status state
= disable_interrupts();
565 GRAB_TEAM_DEBUG_INFO_LOCK(thread
->team
->debug_info
);
566 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
568 uint32 threadFlags
= thread
->debug_info
.flags
;
569 threadFlags
&= ~B_THREAD_DEBUG_STOP
;
570 bool debuggerInstalled
571 = (thread
->team
->debug_info
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
);
572 if (thread
->id
== thread
->team
->debug_info
.nub_thread
) {
573 // Ugh, we're the nub thread. We shouldn't be here.
574 TRACE(("thread_hit_debug_event(): Misdirected nub thread: %" B_PRId32
578 } else if (debuggerInstalled
|| !requireDebugger
) {
579 if (debuggerInstalled
) {
580 debuggerPort
= thread
->team
->debug_info
.debugger_port
;
581 nubPort
= thread
->team
->debug_info
.nub_port
;
585 if (threadFlags
& B_THREAD_DEBUG_INITIALIZED
) {
586 // someone created a port for us (the port we've created will
588 port
= thread
->debug_info
.debug_port
;
590 thread
->debug_info
.debug_port
= port
;
591 deletePort
= -1; // keep the port
592 threadFlags
|= B_THREAD_DEBUG_INITIALIZED
;
595 if (threadFlags
& B_THREAD_DEBUG_INITIALIZED
) {
596 port
= thread
->debug_info
.debug_port
;
598 // someone deleted our port
607 threadFlags
|= B_THREAD_DEBUG_STOPPED
;
608 atomic_set(&thread
->debug_info
.flags
, threadFlags
);
610 update_thread_user_debug_flag(thread
);
612 threadDebugInfoLocker
.Unlock();
613 RELEASE_TEAM_DEBUG_INFO_LOCK(thread
->team
->debug_info
);
614 restore_interrupts(state
);
616 // delete the superfluous port
618 delete_port(deletePort
);
621 TRACE(("thread_hit_debug_event() error: thread: %" B_PRId32
", error: "
622 "%" B_PRIx32
"\n", thread
->id
, error
));
626 // send a message to the debugger port
627 if (debuggerInstalled
) {
628 // update the message's origin info first
629 debug_origin
*origin
= (debug_origin
*)message
;
630 origin
->thread
= thread
->id
;
631 origin
->team
= thread
->team
->id
;
632 origin
->nub_port
= nubPort
;
634 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32
", sending "
635 "message to debugger port %" B_PRId32
"\n", thread
->id
,
638 error
= debugger_write(debuggerPort
, event
, message
, size
, false);
641 status_t result
= B_THREAD_DEBUG_HANDLE_EVENT
;
642 bool singleStep
= false;
647 // read a command from the debug port
649 debugged_thread_message_data commandMessage
;
650 ssize_t commandMessageSize
= read_port_etc(port
, &command
,
651 &commandMessage
, sizeof(commandMessage
), B_KILL_CAN_INTERRUPT
,
654 if (commandMessageSize
< 0) {
655 error
= commandMessageSize
;
656 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32
", failed "
657 "to receive message from port %" B_PRId32
": %" B_PRIx32
"\n",
658 thread
->id
, port
, error
));
663 case B_DEBUGGED_THREAD_MESSAGE_CONTINUE
:
664 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32
": "
665 "B_DEBUGGED_THREAD_MESSAGE_CONTINUE\n",
667 result
= commandMessage
.continue_thread
.handle_event
;
669 singleStep
= commandMessage
.continue_thread
.single_step
;
673 case B_DEBUGGED_THREAD_SET_CPU_STATE
:
675 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32
": "
676 "B_DEBUGGED_THREAD_SET_CPU_STATE\n",
678 arch_set_debug_cpu_state(
679 &commandMessage
.set_cpu_state
.cpu_state
);
684 case B_DEBUGGED_THREAD_GET_CPU_STATE
:
686 port_id replyPort
= commandMessage
.get_cpu_state
.reply_port
;
688 // prepare the message
689 debug_nub_get_cpu_state_reply replyMessage
;
690 replyMessage
.error
= B_OK
;
691 replyMessage
.message
= event
;
692 arch_get_debug_cpu_state(&replyMessage
.cpu_state
);
695 error
= kill_interruptable_write_port(replyPort
, event
,
696 &replyMessage
, sizeof(replyMessage
));
701 case B_DEBUGGED_THREAD_DEBUGGER_CHANGED
:
703 // Check, if the debugger really changed, i.e. is different
704 // than the one we know.
705 team_debug_info teamDebugInfo
;
706 get_team_debug_info(teamDebugInfo
);
708 if (teamDebugInfo
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
709 if (!debuggerInstalled
710 || teamDebugInfo
.debugger_port
!= debuggerPort
) {
711 // debugger was installed or has changed: restart
717 if (debuggerInstalled
) {
718 // debugger is gone: continue the thread normally
728 TRACE(("thread_hit_debug_event(): thread: %" B_PRId32
", failed to send "
729 "message to debugger port %" B_PRId32
": %" B_PRIx32
"\n",
730 thread
->id
, debuggerPort
, error
));
733 // update the thread debug info
734 bool destroyThreadInfo
= false;
735 thread_debug_info threadDebugInfo
;
737 state
= disable_interrupts();
738 threadDebugInfoLocker
.Lock();
740 // check, if the team is still being debugged
741 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
742 if (teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
743 // update the single-step flag
745 atomic_or(&thread
->debug_info
.flags
,
746 B_THREAD_DEBUG_SINGLE_STEP
);
747 atomic_or(&thread
->flags
, THREAD_FLAGS_SINGLE_STEP
);
749 atomic_and(&thread
->debug_info
.flags
,
750 ~(int32
)B_THREAD_DEBUG_SINGLE_STEP
);
753 // unset the "stopped" state
754 atomic_and(&thread
->debug_info
.flags
, ~B_THREAD_DEBUG_STOPPED
);
756 update_thread_user_debug_flag(thread
);
759 // the debugger is gone: cleanup our info completely
760 threadDebugInfo
= thread
->debug_info
;
761 clear_thread_debug_info(&thread
->debug_info
, false);
762 destroyThreadInfo
= true;
765 threadDebugInfoLocker
.Unlock();
766 restore_interrupts(state
);
768 // enable/disable single stepping
769 arch_update_thread_single_step();
771 if (destroyThreadInfo
)
772 destroy_thread_debug_info(&threadDebugInfo
);
774 return (error
== B_OK
? result
: error
);
779 thread_hit_debug_event(debug_debugger_message event
, const void *message
,
780 int32 size
, bool requireDebugger
)
786 result
= thread_hit_debug_event_internal(event
, message
, size
,
787 requireDebugger
, restart
);
788 } while (result
>= 0 && restart
);
790 // Prepare to continue -- we install a debugger change condition, so no one
791 // will change the debugger while we're playing with the breakpoint manager.
792 // TODO: Maybe better use ref-counting and a flag in the breakpoint manager.
793 Team
* team
= thread_get_current_thread()->team
;
794 ConditionVariable debugChangeCondition
;
795 prepare_debugger_change(team
, debugChangeCondition
);
797 if (team
->debug_info
.breakpoint_manager
!= NULL
) {
799 void* pc
= arch_debug_get_interrupt_pc(&isSyscall
);
800 if (pc
!= NULL
&& !isSyscall
)
801 team
->debug_info
.breakpoint_manager
->PrepareToContinue(pc
);
804 finish_debugger_change(team
);
811 thread_hit_serious_debug_event(debug_debugger_message event
,
812 const void *message
, int32 messageSize
)
814 // ensure that a debugger is installed for this team
815 status_t error
= ensure_debugger_installed();
817 Thread
*thread
= thread_get_current_thread();
818 dprintf("thread_hit_serious_debug_event(): Failed to install debugger: "
819 "thread: %" B_PRId32
": %s\n", thread
->id
, strerror(error
));
823 // enter the debug loop
824 return thread_hit_debug_event(event
, message
, messageSize
, true);
829 user_debug_pre_syscall(uint32 syscall
, void *args
)
831 // check whether a debugger is installed
832 Thread
*thread
= thread_get_current_thread();
833 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
834 if (!(teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
))
837 // check whether pre-syscall tracing is enabled for team or thread
838 int32 threadDebugFlags
= atomic_get(&thread
->debug_info
.flags
);
839 if (!(teamDebugFlags
& B_TEAM_DEBUG_PRE_SYSCALL
)
840 && !(threadDebugFlags
& B_THREAD_DEBUG_PRE_SYSCALL
)) {
844 // prepare the message
845 debug_pre_syscall message
;
846 message
.syscall
= syscall
;
848 // copy the syscall args
849 if (syscall
< (uint32
)kSyscallCount
) {
850 if (kSyscallInfos
[syscall
].parameter_size
> 0)
851 memcpy(message
.args
, args
, kSyscallInfos
[syscall
].parameter_size
);
854 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PRE_SYSCALL
, &message
,
855 sizeof(message
), true);
860 user_debug_post_syscall(uint32 syscall
, void *args
, uint64 returnValue
,
863 // check whether a debugger is installed
864 Thread
*thread
= thread_get_current_thread();
865 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
866 if (!(teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
))
869 // check whether post-syscall tracing is enabled for team or thread
870 int32 threadDebugFlags
= atomic_get(&thread
->debug_info
.flags
);
871 if (!(teamDebugFlags
& B_TEAM_DEBUG_POST_SYSCALL
)
872 && !(threadDebugFlags
& B_THREAD_DEBUG_POST_SYSCALL
)) {
876 // prepare the message
877 debug_post_syscall message
;
878 message
.start_time
= startTime
;
879 message
.end_time
= system_time();
880 message
.return_value
= returnValue
;
881 message
.syscall
= syscall
;
883 // copy the syscall args
884 if (syscall
< (uint32
)kSyscallCount
) {
885 if (kSyscallInfos
[syscall
].parameter_size
> 0)
886 memcpy(message
.args
, args
, kSyscallInfos
[syscall
].parameter_size
);
889 thread_hit_debug_event(B_DEBUGGER_MESSAGE_POST_SYSCALL
, &message
,
890 sizeof(message
), true);
894 /** \brief To be called when an unhandled processor exception (error/fault)
896 * \param exception The debug_why_stopped value identifying the kind of fault.
897 * \param singal The signal corresponding to the exception.
898 * \return \c true, if the caller shall continue normally, i.e. usually send
899 * a deadly signal. \c false, if the debugger insists to continue the
900 * program (e.g. because it has solved the removed the cause of the
904 user_debug_exception_occurred(debug_exception_type exception
, int signal
)
906 // First check whether there's a signal handler installed for the signal.
907 // If so, we don't want to install a debugger for the team. We always send
908 // the signal instead. An already installed debugger will be notified, if
909 // it has requested notifications of signal.
910 struct sigaction signalAction
;
911 if (sigaction(signal
, NULL
, &signalAction
) == 0
912 && signalAction
.sa_handler
!= SIG_DFL
) {
916 // prepare the message
917 debug_exception_occurred message
;
918 message
.exception
= exception
;
919 message
.signal
= signal
;
921 status_t result
= thread_hit_serious_debug_event(
922 B_DEBUGGER_MESSAGE_EXCEPTION_OCCURRED
, &message
, sizeof(message
));
923 return (result
!= B_THREAD_DEBUG_IGNORE_EVENT
);
928 user_debug_handle_signal(int signal
, struct sigaction
*handler
, bool deadly
)
930 // check, if a debugger is installed and is interested in signals
931 Thread
*thread
= thread_get_current_thread();
932 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
934 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_SIGNALS
)) {
938 // prepare the message
939 debug_signal_received message
;
940 message
.signal
= signal
;
941 message
.handler
= *handler
;
942 message
.deadly
= deadly
;
944 status_t result
= thread_hit_debug_event(B_DEBUGGER_MESSAGE_SIGNAL_RECEIVED
,
945 &message
, sizeof(message
), true);
946 return (result
!= B_THREAD_DEBUG_IGNORE_EVENT
);
951 user_debug_stop_thread()
953 // check whether this is actually an emulated single-step notification
954 Thread
* thread
= thread_get_current_thread();
955 InterruptsSpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
957 bool singleStepped
= false;
958 if ((atomic_and(&thread
->debug_info
.flags
,
959 ~B_THREAD_DEBUG_NOTIFY_SINGLE_STEP
)
960 & B_THREAD_DEBUG_NOTIFY_SINGLE_STEP
) != 0) {
961 singleStepped
= true;
964 threadDebugInfoLocker
.Unlock();
967 user_debug_single_stepped();
969 debug_thread_debugged message
;
970 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED
,
971 &message
, sizeof(message
));
977 user_debug_team_created(team_id teamID
)
979 // check, if a debugger is installed and is interested in team creation
981 Thread
*thread
= thread_get_current_thread();
982 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
984 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_TEAM_CREATION
)) {
988 // prepare the message
989 debug_team_created message
;
990 message
.new_team
= teamID
;
992 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_CREATED
, &message
,
993 sizeof(message
), true);
998 user_debug_team_deleted(team_id teamID
, port_id debuggerPort
)
1000 if (debuggerPort
>= 0) {
1001 TRACE(("user_debug_team_deleted(team: %" B_PRId32
", debugger port: "
1002 "%" B_PRId32
")\n", teamID
, debuggerPort
));
1004 debug_team_deleted message
;
1005 message
.origin
.thread
= -1;
1006 message
.origin
.team
= teamID
;
1007 message
.origin
.nub_port
= -1;
1008 write_port_etc(debuggerPort
, B_DEBUGGER_MESSAGE_TEAM_DELETED
, &message
,
1009 sizeof(message
), B_RELATIVE_TIMEOUT
, 0);
1015 user_debug_team_exec()
1017 // check, if a debugger is installed and is interested in team creation
1019 Thread
*thread
= thread_get_current_thread();
1020 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
1022 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_TEAM_CREATION
)) {
1026 // prepare the message
1027 debug_team_exec message
;
1028 message
.image_event
= atomic_add(&thread
->team
->debug_info
.image_event
, 1)
1031 thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC
, &message
,
1032 sizeof(message
), true);
1036 /*! Called by a new userland thread to update the debugging related flags of
1037 \c Thread::flags before the thread first enters userland.
1038 \param thread The calling thread.
1041 user_debug_update_new_thread_flags(Thread
* thread
)
1043 // lock it and update it's flags
1044 InterruptsSpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
1046 update_thread_user_debug_flag(thread
);
1047 update_thread_breakpoints_flag(thread
);
1048 update_thread_debugger_installed_flag(thread
);
1053 user_debug_thread_created(thread_id threadID
)
1055 // check, if a debugger is installed and is interested in thread events
1056 Thread
*thread
= thread_get_current_thread();
1057 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
1059 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_THREADS
)) {
1063 // prepare the message
1064 debug_thread_created message
;
1065 message
.new_thread
= threadID
;
1067 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_CREATED
, &message
,
1068 sizeof(message
), true);
1073 user_debug_thread_deleted(team_id teamID
, thread_id threadID
)
1075 // Things are a bit complicated here, since this thread no longer belongs to
1076 // the debugged team (but to the kernel). So we can't use debugger_write().
1078 // get the team debug flags and debugger port
1079 Team
* team
= Team::Get(teamID
);
1082 BReference
<Team
> teamReference(team
, true);
1084 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
1086 int32 teamDebugFlags
= atomic_get(&team
->debug_info
.flags
);
1087 port_id debuggerPort
= team
->debug_info
.debugger_port
;
1088 sem_id writeLock
= team
->debug_info
.debugger_write_lock
;
1090 debugInfoLocker
.Unlock();
1092 // check, if a debugger is installed and is interested in thread events
1094 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_THREADS
)) {
1098 // acquire the debugger write lock
1099 status_t error
= acquire_sem_etc(writeLock
, 1, B_KILL_CAN_INTERRUPT
, 0);
1103 // re-get the team debug info -- we need to check whether anything changed
1104 debugInfoLocker
.Lock();
1106 teamDebugFlags
= atomic_get(&team
->debug_info
.flags
);
1107 port_id newDebuggerPort
= team
->debug_info
.debugger_port
;
1109 debugInfoLocker
.Unlock();
1111 // Send the message only if the debugger hasn't changed in the meantime or
1112 // the team is about to be handed over.
1113 if (newDebuggerPort
== debuggerPort
1114 || (teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_HANDOVER
) == 0) {
1115 debug_thread_deleted message
;
1116 message
.origin
.thread
= threadID
;
1117 message
.origin
.team
= teamID
;
1118 message
.origin
.nub_port
= -1;
1120 write_port_etc(debuggerPort
, B_DEBUGGER_MESSAGE_THREAD_DELETED
,
1121 &message
, sizeof(message
), B_KILL_CAN_INTERRUPT
, 0);
1124 // release the debugger write lock
1125 release_sem(writeLock
);
1129 /*! Called for a thread that is about to die, cleaning up all user debug
1130 facilities installed for the thread.
1131 \param thread The current thread, the one that is going to die.
1134 user_debug_thread_exiting(Thread
* thread
)
1136 // thread is the current thread, so using team is safe
1137 Team
* team
= thread
->team
;
1139 InterruptsLocker interruptsLocker
;
1141 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
1143 int32 teamDebugFlags
= atomic_get(&team
->debug_info
.flags
);
1144 port_id debuggerPort
= team
->debug_info
.debugger_port
;
1146 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
1148 // check, if a debugger is installed
1149 if ((teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) == 0
1150 || debuggerPort
< 0) {
1154 // detach the profile info and mark the thread dying
1155 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
1157 thread_debug_info
& threadDebugInfo
= thread
->debug_info
;
1158 if (threadDebugInfo
.profile
.samples
== NULL
)
1161 area_id sampleArea
= threadDebugInfo
.profile
.sample_area
;
1162 int32 sampleCount
= threadDebugInfo
.profile
.sample_count
;
1163 int32 droppedTicks
= threadDebugInfo
.profile
.dropped_ticks
;
1164 int32 stackDepth
= threadDebugInfo
.profile
.stack_depth
;
1165 bool variableStackDepth
= threadDebugInfo
.profile
.variable_stack_depth
;
1166 int32 imageEvent
= threadDebugInfo
.profile
.image_event
;
1167 threadDebugInfo
.profile
.sample_area
= -1;
1168 threadDebugInfo
.profile
.samples
= NULL
;
1169 threadDebugInfo
.profile
.buffer_full
= false;
1171 atomic_or(&threadDebugInfo
.flags
, B_THREAD_DEBUG_DYING
);
1173 threadDebugInfoLocker
.Unlock();
1174 interruptsLocker
.Unlock();
1176 // notify the debugger
1177 debug_profiler_update message
;
1178 message
.origin
.thread
= thread
->id
;
1179 message
.origin
.team
= thread
->team
->id
;
1180 message
.origin
.nub_port
= -1; // asynchronous message
1181 message
.sample_count
= sampleCount
;
1182 message
.dropped_ticks
= droppedTicks
;
1183 message
.stack_depth
= stackDepth
;
1184 message
.variable_stack_depth
= variableStackDepth
;
1185 message
.image_event
= imageEvent
;
1186 message
.stopped
= true;
1187 debugger_write(debuggerPort
, B_DEBUGGER_MESSAGE_PROFILER_UPDATE
,
1188 &message
, sizeof(message
), false);
1190 if (sampleArea
>= 0) {
1192 if (get_area_info(sampleArea
, &areaInfo
) == B_OK
) {
1193 unlock_memory(areaInfo
.address
, areaInfo
.size
, B_READ_DEVICE
);
1194 delete_area(sampleArea
);
1201 user_debug_image_created(const image_info
*imageInfo
)
1203 // check, if a debugger is installed and is interested in image events
1204 Thread
*thread
= thread_get_current_thread();
1205 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
1207 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_IMAGES
)) {
1211 // prepare the message
1212 debug_image_created message
;
1213 memcpy(&message
.info
, imageInfo
, sizeof(image_info
));
1214 message
.image_event
= atomic_add(&thread
->team
->debug_info
.image_event
, 1)
1217 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED
, &message
,
1218 sizeof(message
), true);
1223 user_debug_image_deleted(const image_info
*imageInfo
)
1225 // check, if a debugger is installed and is interested in image events
1226 Thread
*thread
= thread_get_current_thread();
1227 int32 teamDebugFlags
= atomic_get(&thread
->team
->debug_info
.flags
);
1229 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED
| B_TEAM_DEBUG_IMAGES
)) {
1233 // prepare the message
1234 debug_image_deleted message
;
1235 memcpy(&message
.info
, imageInfo
, sizeof(image_info
));
1236 message
.image_event
= atomic_add(&thread
->team
->debug_info
.image_event
, 1)
1239 thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_DELETED
, &message
,
1240 sizeof(message
), true);
1245 user_debug_breakpoint_hit(bool software
)
1247 // prepare the message
1248 debug_breakpoint_hit message
;
1249 arch_get_debug_cpu_state(&message
.cpu_state
);
1251 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_BREAKPOINT_HIT
, &message
,
1257 user_debug_watchpoint_hit()
1259 // prepare the message
1260 debug_watchpoint_hit message
;
1261 arch_get_debug_cpu_state(&message
.cpu_state
);
1263 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_WATCHPOINT_HIT
, &message
,
1269 user_debug_single_stepped()
1271 // clear the single-step thread flag
1272 Thread
* thread
= thread_get_current_thread();
1273 atomic_and(&thread
->flags
, ~(int32
)THREAD_FLAGS_SINGLE_STEP
);
1275 // prepare the message
1276 debug_single_step message
;
1277 arch_get_debug_cpu_state(&message
.cpu_state
);
1279 thread_hit_serious_debug_event(B_DEBUGGER_MESSAGE_SINGLE_STEP
, &message
,
1284 /*! Schedules the profiling timer for the current thread.
1285 The caller must hold the thread's debug info lock.
1286 \param thread The current thread.
1287 \param interval The time after which the timer should fire.
1290 schedule_profiling_timer(Thread
* thread
, bigtime_t interval
)
1292 struct timer
* timer
= &sProfilingTimers
[thread
->cpu
->cpu_num
];
1293 thread
->debug_info
.profile
.installed_timer
= timer
;
1294 thread
->debug_info
.profile
.timer_end
= system_time() + interval
;
1295 add_timer(timer
, &profiling_event
, interval
, B_ONE_SHOT_RELATIVE_TIMER
);
1299 /*! Samples the current thread's instruction pointer/stack trace.
1300 The caller must hold the current thread's debug info lock.
1301 \param flushBuffer Return parameter: Set to \c true when the sampling
1302 buffer must be flushed.
1305 profiling_do_sample(bool& flushBuffer
)
1307 Thread
* thread
= thread_get_current_thread();
1308 thread_debug_info
& debugInfo
= thread
->debug_info
;
1310 if (debugInfo
.profile
.samples
== NULL
)
1313 // Check, whether the buffer is full or an image event occurred since the
1314 // last sample was taken.
1315 int32 maxSamples
= debugInfo
.profile
.max_samples
;
1316 int32 sampleCount
= debugInfo
.profile
.sample_count
;
1317 int32 stackDepth
= debugInfo
.profile
.stack_depth
;
1318 int32 imageEvent
= thread
->team
->debug_info
.image_event
;
1319 if (debugInfo
.profile
.sample_count
> 0) {
1320 if (debugInfo
.profile
.last_image_event
< imageEvent
1321 && debugInfo
.profile
.variable_stack_depth
1322 && sampleCount
+ 2 <= maxSamples
) {
1323 // an image event occurred, but we use variable stack depth and
1324 // have enough room in the buffer to indicate an image event
1325 addr_t
* event
= debugInfo
.profile
.samples
+ sampleCount
;
1326 event
[0] = B_DEBUG_PROFILE_IMAGE_EVENT
;
1327 event
[1] = imageEvent
;
1329 debugInfo
.profile
.sample_count
= sampleCount
;
1330 debugInfo
.profile
.last_image_event
= imageEvent
;
1333 if (debugInfo
.profile
.last_image_event
< imageEvent
1334 || debugInfo
.profile
.flush_threshold
- sampleCount
< stackDepth
) {
1335 if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc(NULL
))) {
1340 // We can't flush the buffer now, since we interrupted a kernel
1341 // function. If the buffer is not full yet, we add the samples,
1342 // otherwise we have to drop them.
1343 if (maxSamples
- sampleCount
< stackDepth
) {
1344 debugInfo
.profile
.dropped_ticks
++;
1349 // first sample -- set the image event
1350 debugInfo
.profile
.image_event
= imageEvent
;
1351 debugInfo
.profile
.last_image_event
= imageEvent
;
1355 addr_t
* returnAddresses
= debugInfo
.profile
.samples
1356 + debugInfo
.profile
.sample_count
;
1357 if (debugInfo
.profile
.variable_stack_depth
) {
1358 // variable sample count per hit
1359 *returnAddresses
= arch_debug_get_stack_trace(returnAddresses
+ 1,
1360 stackDepth
- 1, 1, 0, STACK_TRACE_KERNEL
| STACK_TRACE_USER
);
1362 debugInfo
.profile
.sample_count
+= *returnAddresses
+ 1;
1364 // fixed sample count per hit
1365 if (stackDepth
> 1) {
1366 int32 count
= arch_debug_get_stack_trace(returnAddresses
,
1367 stackDepth
, 1, 0, STACK_TRACE_KERNEL
| STACK_TRACE_USER
);
1369 for (int32 i
= count
; i
< stackDepth
; i
++)
1370 returnAddresses
[i
] = 0;
1372 *returnAddresses
= (addr_t
)arch_debug_get_interrupt_pc(NULL
);
1374 debugInfo
.profile
.sample_count
+= stackDepth
;
1382 profiling_buffer_full(void*)
1384 // It is undefined whether the function is called with interrupts enabled
1385 // or disabled. We are allowed to enable interrupts, though. First make
1386 // sure interrupts are disabled.
1387 disable_interrupts();
1389 Thread
* thread
= thread_get_current_thread();
1390 thread_debug_info
& debugInfo
= thread
->debug_info
;
1392 SpinLocker
threadDebugInfoLocker(debugInfo
.lock
);
1394 if (debugInfo
.profile
.samples
!= NULL
&& debugInfo
.profile
.buffer_full
) {
1395 int32 sampleCount
= debugInfo
.profile
.sample_count
;
1396 int32 droppedTicks
= debugInfo
.profile
.dropped_ticks
;
1397 int32 stackDepth
= debugInfo
.profile
.stack_depth
;
1398 bool variableStackDepth
= debugInfo
.profile
.variable_stack_depth
;
1399 int32 imageEvent
= debugInfo
.profile
.image_event
;
1401 // notify the debugger
1402 debugInfo
.profile
.sample_count
= 0;
1403 debugInfo
.profile
.dropped_ticks
= 0;
1405 threadDebugInfoLocker
.Unlock();
1406 enable_interrupts();
1408 // prepare the message
1409 debug_profiler_update message
;
1410 message
.sample_count
= sampleCount
;
1411 message
.dropped_ticks
= droppedTicks
;
1412 message
.stack_depth
= stackDepth
;
1413 message
.variable_stack_depth
= variableStackDepth
;
1414 message
.image_event
= imageEvent
;
1415 message
.stopped
= false;
1417 thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE
, &message
,
1418 sizeof(message
), false);
1420 disable_interrupts();
1421 threadDebugInfoLocker
.Lock();
1423 // do the sampling and reschedule timer, if still profiling this thread
1425 if (profiling_do_sample(flushBuffer
)) {
1426 debugInfo
.profile
.buffer_full
= false;
1427 schedule_profiling_timer(thread
, debugInfo
.profile
.interval
);
1431 threadDebugInfoLocker
.Unlock();
1432 enable_interrupts();
1436 /*! Profiling timer event callback.
1437 Called with interrupts disabled.
1440 profiling_event(timer
* /*unused*/)
1442 Thread
* thread
= thread_get_current_thread();
1443 thread_debug_info
& debugInfo
= thread
->debug_info
;
1445 SpinLocker
threadDebugInfoLocker(debugInfo
.lock
);
1447 bool flushBuffer
= false;
1448 if (profiling_do_sample(flushBuffer
)) {
1450 // The sample buffer needs to be flushed; we'll have to notify the
1451 // debugger. We can't do that right here. Instead we set a post
1452 // interrupt callback doing that for us, and don't reschedule the
1454 thread
->post_interrupt_callback
= profiling_buffer_full
;
1455 debugInfo
.profile
.installed_timer
= NULL
;
1456 debugInfo
.profile
.buffer_full
= true;
1458 schedule_profiling_timer(thread
, debugInfo
.profile
.interval
);
1460 debugInfo
.profile
.installed_timer
= NULL
;
1462 return B_HANDLED_INTERRUPT
;
1466 /*! Called by the scheduler when a debugged thread has been unscheduled.
1467 The scheduler lock is being held.
1470 user_debug_thread_unscheduled(Thread
* thread
)
1472 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
1474 // if running, cancel the profiling timer
1475 struct timer
* timer
= thread
->debug_info
.profile
.installed_timer
;
1476 if (timer
!= NULL
) {
1477 // track remaining time
1478 bigtime_t left
= thread
->debug_info
.profile
.timer_end
- system_time();
1479 thread
->debug_info
.profile
.interval_left
= max_c(left
, 0);
1480 thread
->debug_info
.profile
.installed_timer
= NULL
;
1483 threadDebugInfoLocker
.Unlock();
1484 // not necessary, but doesn't harm and reduces contention
1485 cancel_timer(timer
);
1486 // since invoked on the same CPU, this will not possibly wait for
1487 // an already called timer hook
1492 /*! Called by the scheduler when a debugged thread has been scheduled.
1493 The scheduler lock is being held.
1496 user_debug_thread_scheduled(Thread
* thread
)
1498 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
1500 if (thread
->debug_info
.profile
.samples
!= NULL
1501 && !thread
->debug_info
.profile
.buffer_full
) {
1502 // install profiling timer
1503 schedule_profiling_timer(thread
,
1504 thread
->debug_info
.profile
.interval_left
);
1509 /*! \brief Called by the debug nub thread of a team to broadcast a message to
1510 all threads of the team that are initialized for debugging (and
1511 thus have a debug port).
1514 broadcast_debugged_thread_message(Thread
*nubThread
, int32 code
,
1515 const void *message
, int32 size
)
1517 // iterate through the threads
1518 thread_info threadInfo
;
1520 while (get_next_thread_info(nubThread
->team
->id
, &cookie
, &threadInfo
)
1522 // get the thread and lock it
1523 Thread
* thread
= Thread::GetAndLock(threadInfo
.thread
);
1527 BReference
<Thread
> threadReference(thread
, true);
1528 ThreadLocker
threadLocker(thread
, true);
1530 // get the thread's debug port
1531 InterruptsSpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
1533 port_id threadDebugPort
= -1;
1534 if (thread
&& thread
!= nubThread
&& thread
->team
== nubThread
->team
1535 && (thread
->debug_info
.flags
& B_THREAD_DEBUG_INITIALIZED
) != 0
1536 && (thread
->debug_info
.flags
& B_THREAD_DEBUG_STOPPED
) != 0) {
1537 threadDebugPort
= thread
->debug_info
.debug_port
;
1540 threadDebugInfoLocker
.Unlock();
1541 threadLocker
.Unlock();
1543 // send the message to the thread
1544 if (threadDebugPort
>= 0) {
1545 status_t error
= kill_interruptable_write_port(threadDebugPort
,
1546 code
, message
, size
);
1547 if (error
!= B_OK
) {
1548 TRACE(("broadcast_debugged_thread_message(): Failed to send "
1549 "message to thread %" B_PRId32
": %" B_PRIx32
"\n",
1550 thread
->id
, error
));
1558 nub_thread_cleanup(Thread
*nubThread
)
1560 TRACE(("nub_thread_cleanup(%" B_PRId32
"): debugger port: %" B_PRId32
"\n",
1561 nubThread
->id
, nubThread
->team
->debug_info
.debugger_port
));
1563 ConditionVariable debugChangeCondition
;
1564 prepare_debugger_change(nubThread
->team
, debugChangeCondition
);
1566 team_debug_info teamDebugInfo
;
1567 bool destroyDebugInfo
= false;
1569 TeamLocker
teamLocker(nubThread
->team
);
1570 // required by update_threads_debugger_installed_flag()
1572 cpu_status state
= disable_interrupts();
1573 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread
->team
->debug_info
);
1575 team_debug_info
&info
= nubThread
->team
->debug_info
;
1576 if (info
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
1577 && info
.nub_thread
== nubThread
->id
) {
1578 teamDebugInfo
= info
;
1579 clear_team_debug_info(&info
, false);
1580 destroyDebugInfo
= true;
1583 // update the thread::flags fields
1584 update_threads_debugger_installed_flag(nubThread
->team
);
1586 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread
->team
->debug_info
);
1587 restore_interrupts(state
);
1589 teamLocker
.Unlock();
1591 if (destroyDebugInfo
)
1592 teamDebugInfo
.breakpoint_manager
->RemoveAllBreakpoints();
1594 finish_debugger_change(nubThread
->team
);
1596 if (destroyDebugInfo
)
1597 destroy_team_debug_info(&teamDebugInfo
);
1599 // notify all threads that the debugger is gone
1600 broadcast_debugged_thread_message(nubThread
,
1601 B_DEBUGGED_THREAD_DEBUGGER_CHANGED
, NULL
, 0);
1605 /** \brief Debug nub thread helper function that returns the debug port of
1606 * a thread of the same team.
1609 debug_nub_thread_get_thread_debug_port(Thread
*nubThread
,
1610 thread_id threadID
, port_id
&threadDebugPort
)
1612 threadDebugPort
= -1;
1615 Thread
* thread
= Thread::GetAndLock(threadID
);
1617 return B_BAD_THREAD_ID
;
1618 BReference
<Thread
> threadReference(thread
, true);
1619 ThreadLocker
threadLocker(thread
, true);
1621 // get the debug port
1622 InterruptsSpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
1624 if (thread
->team
!= nubThread
->team
)
1626 if ((thread
->debug_info
.flags
& B_THREAD_DEBUG_STOPPED
) == 0)
1627 return B_BAD_THREAD_STATE
;
1629 threadDebugPort
= thread
->debug_info
.debug_port
;
1631 threadDebugInfoLocker
.Unlock();
1633 if (threadDebugPort
< 0)
1641 debug_nub_thread(void *)
1643 Thread
*nubThread
= thread_get_current_thread();
1645 // check, if we're still the current nub thread and get our port
1646 cpu_status state
= disable_interrupts();
1647 GRAB_TEAM_DEBUG_INFO_LOCK(nubThread
->team
->debug_info
);
1649 if (nubThread
->team
->debug_info
.nub_thread
!= nubThread
->id
) {
1650 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread
->team
->debug_info
);
1651 restore_interrupts(state
);
1655 port_id port
= nubThread
->team
->debug_info
.nub_port
;
1656 sem_id writeLock
= nubThread
->team
->debug_info
.debugger_write_lock
;
1657 BreakpointManager
* breakpointManager
1658 = nubThread
->team
->debug_info
.breakpoint_manager
;
1660 RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread
->team
->debug_info
);
1661 restore_interrupts(state
);
1663 TRACE(("debug_nub_thread() thread: %" B_PRId32
", team %" B_PRId32
", nub "
1664 "port: %" B_PRId32
"\n", nubThread
->id
, nubThread
->team
->id
, port
));
1666 // notify all threads that a debugger has been installed
1667 broadcast_debugged_thread_message(nubThread
,
1668 B_DEBUGGED_THREAD_DEBUGGER_CHANGED
, NULL
, 0);
1670 // command processing loop
1673 debug_nub_message_data message
;
1674 ssize_t messageSize
= read_port_etc(port
, &command
, &message
,
1675 sizeof(message
), B_KILL_CAN_INTERRUPT
, 0);
1677 if (messageSize
< 0) {
1678 // The port is no longer valid or we were interrupted by a kill
1679 // signal: If we are still listed in the team's debug info as nub
1680 // thread, we need to update that.
1681 nub_thread_cleanup(nubThread
);
1683 TRACE(("nub thread %" B_PRId32
": terminating: %lx\n",
1684 nubThread
->id
, messageSize
));
1689 bool sendReply
= false;
1691 debug_nub_read_memory_reply read_memory
;
1692 debug_nub_write_memory_reply write_memory
;
1693 debug_nub_get_cpu_state_reply get_cpu_state
;
1694 debug_nub_set_breakpoint_reply set_breakpoint
;
1695 debug_nub_set_watchpoint_reply set_watchpoint
;
1696 debug_nub_get_signal_masks_reply get_signal_masks
;
1697 debug_nub_get_signal_handler_reply get_signal_handler
;
1698 debug_nub_start_profiler_reply start_profiler
;
1699 debug_profiler_update profiler_update
;
1700 debug_nub_write_core_file_reply write_core_file
;
1702 int32 replySize
= 0;
1703 port_id replyPort
= -1;
1705 // process the command
1707 case B_DEBUG_MESSAGE_READ_MEMORY
:
1709 // get the parameters
1710 replyPort
= message
.read_memory
.reply_port
;
1711 void *address
= message
.read_memory
.address
;
1712 int32 size
= message
.read_memory
.size
;
1713 status_t result
= B_OK
;
1715 // check the parameters
1716 if (!BreakpointManager::CanAccessAddress(address
, false))
1717 result
= B_BAD_ADDRESS
;
1718 else if (size
<= 0 || size
> B_MAX_READ_WRITE_MEMORY_SIZE
)
1719 result
= B_BAD_VALUE
;
1722 size_t bytesRead
= 0;
1723 if (result
== B_OK
) {
1724 result
= breakpointManager
->ReadMemory(address
,
1725 reply
.read_memory
.data
, size
, bytesRead
);
1727 reply
.read_memory
.error
= result
;
1729 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_READ_MEMORY: "
1730 "reply port: %" B_PRId32
", address: %p, size: %" B_PRId32
1731 ", result: %" B_PRIx32
", read: %ld\n", nubThread
->id
,
1732 replyPort
, address
, size
, result
, bytesRead
));
1734 // send only as much data as necessary
1735 reply
.read_memory
.size
= bytesRead
;
1736 replySize
= reply
.read_memory
.data
+ bytesRead
- (char*)&reply
;
1741 case B_DEBUG_MESSAGE_WRITE_MEMORY
:
1743 // get the parameters
1744 replyPort
= message
.write_memory
.reply_port
;
1745 void *address
= message
.write_memory
.address
;
1746 int32 size
= message
.write_memory
.size
;
1747 const char *data
= message
.write_memory
.data
;
1748 int32 realSize
= (char*)&message
+ messageSize
- data
;
1749 status_t result
= B_OK
;
1751 // check the parameters
1752 if (!BreakpointManager::CanAccessAddress(address
, true))
1753 result
= B_BAD_ADDRESS
;
1754 else if (size
<= 0 || size
> realSize
)
1755 result
= B_BAD_VALUE
;
1758 size_t bytesWritten
= 0;
1759 if (result
== B_OK
) {
1760 result
= breakpointManager
->WriteMemory(address
, data
, size
,
1763 reply
.write_memory
.error
= result
;
1765 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_WRITE_MEMORY: "
1766 "reply port: %" B_PRId32
", address: %p, size: %" B_PRId32
1767 ", result: %" B_PRIx32
", written: %ld\n", nubThread
->id
,
1768 replyPort
, address
, size
, result
, bytesWritten
));
1770 reply
.write_memory
.size
= bytesWritten
;
1772 replySize
= sizeof(debug_nub_write_memory_reply
);
1776 case B_DEBUG_MESSAGE_SET_TEAM_FLAGS
:
1778 // get the parameters
1779 int32 flags
= message
.set_team_flags
.flags
1780 & B_TEAM_DEBUG_USER_FLAG_MASK
;
1782 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_TEAM_FLAGS"
1783 ": flags: %" B_PRIx32
"\n", nubThread
->id
, flags
));
1785 Team
*team
= thread_get_current_thread()->team
;
1788 cpu_status state
= disable_interrupts();
1789 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
1791 flags
|= team
->debug_info
.flags
& B_TEAM_DEBUG_KERNEL_FLAG_MASK
;
1792 atomic_set(&team
->debug_info
.flags
, flags
);
1794 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
1795 restore_interrupts(state
);
1800 case B_DEBUG_MESSAGE_SET_THREAD_FLAGS
:
1802 // get the parameters
1803 thread_id threadID
= message
.set_thread_flags
.thread
;
1804 int32 flags
= message
.set_thread_flags
.flags
1805 & B_THREAD_DEBUG_USER_FLAG_MASK
;
1807 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_THREAD_FLAGS"
1808 ": thread: %" B_PRId32
", flags: %" B_PRIx32
"\n",
1809 nubThread
->id
, threadID
, flags
));
1812 Thread
* thread
= Thread::GetAndLock(threadID
);
1815 BReference
<Thread
> threadReference(thread
, true);
1816 ThreadLocker
threadLocker(thread
, true);
1818 InterruptsSpinLocker
threadDebugInfoLocker(
1819 thread
->debug_info
.lock
);
1821 if (thread
->team
== thread_get_current_thread()->team
) {
1822 flags
|= thread
->debug_info
.flags
1823 & B_THREAD_DEBUG_KERNEL_FLAG_MASK
;
1824 atomic_set(&thread
->debug_info
.flags
, flags
);
1830 case B_DEBUG_MESSAGE_CONTINUE_THREAD
:
1832 // get the parameters
1837 threadID
= message
.continue_thread
.thread
;
1838 handleEvent
= message
.continue_thread
.handle_event
;
1839 singleStep
= message
.continue_thread
.single_step
;
1841 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_CONTINUE_THREAD"
1842 ": thread: %" B_PRId32
", handle event: %" B_PRIu32
", "
1843 "single step: %d\n", nubThread
->id
, threadID
, handleEvent
,
1846 // find the thread and get its debug port
1847 port_id threadDebugPort
= -1;
1848 status_t result
= debug_nub_thread_get_thread_debug_port(
1849 nubThread
, threadID
, threadDebugPort
);
1851 // send a message to the debugged thread
1852 if (result
== B_OK
) {
1853 debugged_thread_continue commandMessage
;
1854 commandMessage
.handle_event
= handleEvent
;
1855 commandMessage
.single_step
= singleStep
;
1857 result
= write_port(threadDebugPort
,
1858 B_DEBUGGED_THREAD_MESSAGE_CONTINUE
,
1859 &commandMessage
, sizeof(commandMessage
));
1860 } else if (result
== B_BAD_THREAD_STATE
) {
1861 Thread
* thread
= Thread::GetAndLock(threadID
);
1865 BReference
<Thread
> threadReference(thread
, true);
1866 ThreadLocker
threadLocker(thread
, true);
1867 if (thread
->state
== B_THREAD_SUSPENDED
) {
1868 threadLocker
.Unlock();
1869 resume_thread(threadID
);
1877 case B_DEBUG_MESSAGE_SET_CPU_STATE
:
1879 // get the parameters
1880 thread_id threadID
= message
.set_cpu_state
.thread
;
1881 const debug_cpu_state
&cpuState
1882 = message
.set_cpu_state
.cpu_state
;
1884 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_CPU_STATE"
1885 ": thread: %" B_PRId32
"\n", nubThread
->id
, threadID
));
1887 // find the thread and get its debug port
1888 port_id threadDebugPort
= -1;
1889 status_t result
= debug_nub_thread_get_thread_debug_port(
1890 nubThread
, threadID
, threadDebugPort
);
1892 // send a message to the debugged thread
1893 if (result
== B_OK
) {
1894 debugged_thread_set_cpu_state commandMessage
;
1895 memcpy(&commandMessage
.cpu_state
, &cpuState
,
1896 sizeof(debug_cpu_state
));
1897 write_port(threadDebugPort
,
1898 B_DEBUGGED_THREAD_SET_CPU_STATE
,
1899 &commandMessage
, sizeof(commandMessage
));
1905 case B_DEBUG_MESSAGE_GET_CPU_STATE
:
1907 // get the parameters
1908 thread_id threadID
= message
.get_cpu_state
.thread
;
1909 replyPort
= message
.get_cpu_state
.reply_port
;
1911 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_GET_CPU_STATE"
1912 ": thread: %" B_PRId32
"\n", nubThread
->id
, threadID
));
1914 // find the thread and get its debug port
1915 port_id threadDebugPort
= -1;
1916 status_t result
= debug_nub_thread_get_thread_debug_port(
1917 nubThread
, threadID
, threadDebugPort
);
1919 // send a message to the debugged thread
1920 if (threadDebugPort
>= 0) {
1921 debugged_thread_get_cpu_state commandMessage
;
1922 commandMessage
.reply_port
= replyPort
;
1923 result
= write_port(threadDebugPort
,
1924 B_DEBUGGED_THREAD_GET_CPU_STATE
, &commandMessage
,
1925 sizeof(commandMessage
));
1928 // send a reply to the debugger in case of error
1929 if (result
!= B_OK
) {
1930 reply
.get_cpu_state
.error
= result
;
1932 replySize
= sizeof(reply
.get_cpu_state
);
1938 case B_DEBUG_MESSAGE_SET_BREAKPOINT
:
1940 // get the parameters
1941 replyPort
= message
.set_breakpoint
.reply_port
;
1942 void *address
= message
.set_breakpoint
.address
;
1944 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_BREAKPOINT"
1945 ": address: %p\n", nubThread
->id
, address
));
1947 // check the address
1948 status_t result
= B_OK
;
1950 || !BreakpointManager::CanAccessAddress(address
, false)) {
1951 result
= B_BAD_ADDRESS
;
1954 // set the breakpoint
1956 result
= breakpointManager
->InstallBreakpoint(address
);
1959 update_threads_breakpoints_flag();
1961 // prepare the reply
1962 reply
.set_breakpoint
.error
= result
;
1963 replySize
= sizeof(reply
.set_breakpoint
);
1969 case B_DEBUG_MESSAGE_CLEAR_BREAKPOINT
:
1971 // get the parameters
1972 void *address
= message
.clear_breakpoint
.address
;
1974 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_CLEAR_BREAKPOINT"
1975 ": address: %p\n", nubThread
->id
, address
));
1977 // check the address
1978 status_t result
= B_OK
;
1980 || !BreakpointManager::CanAccessAddress(address
, false)) {
1981 result
= B_BAD_ADDRESS
;
1984 // clear the breakpoint
1986 result
= breakpointManager
->UninstallBreakpoint(address
);
1989 update_threads_breakpoints_flag();
1994 case B_DEBUG_MESSAGE_SET_WATCHPOINT
:
1996 // get the parameters
1997 replyPort
= message
.set_watchpoint
.reply_port
;
1998 void *address
= message
.set_watchpoint
.address
;
1999 uint32 type
= message
.set_watchpoint
.type
;
2000 int32 length
= message
.set_watchpoint
.length
;
2002 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_WATCHPOINT"
2003 ": address: %p, type: %" B_PRIu32
", length: %" B_PRId32
"\n",
2004 nubThread
->id
, address
, type
, length
));
2006 // check the address and size
2007 status_t result
= B_OK
;
2009 || !BreakpointManager::CanAccessAddress(address
, false)) {
2010 result
= B_BAD_ADDRESS
;
2013 result
= B_BAD_VALUE
;
2015 // set the watchpoint
2016 if (result
== B_OK
) {
2017 result
= breakpointManager
->InstallWatchpoint(address
, type
,
2022 update_threads_breakpoints_flag();
2024 // prepare the reply
2025 reply
.set_watchpoint
.error
= result
;
2026 replySize
= sizeof(reply
.set_watchpoint
);
2032 case B_DEBUG_MESSAGE_CLEAR_WATCHPOINT
:
2034 // get the parameters
2035 void *address
= message
.clear_watchpoint
.address
;
2037 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_CLEAR_WATCHPOINT"
2038 ": address: %p\n", nubThread
->id
, address
));
2040 // check the address
2041 status_t result
= B_OK
;
2043 || !BreakpointManager::CanAccessAddress(address
, false)) {
2044 result
= B_BAD_ADDRESS
;
2047 // clear the watchpoint
2049 result
= breakpointManager
->UninstallWatchpoint(address
);
2052 update_threads_breakpoints_flag();
2057 case B_DEBUG_MESSAGE_SET_SIGNAL_MASKS
:
2059 // get the parameters
2060 thread_id threadID
= message
.set_signal_masks
.thread
;
2061 uint64 ignore
= message
.set_signal_masks
.ignore_mask
;
2062 uint64 ignoreOnce
= message
.set_signal_masks
.ignore_once_mask
;
2063 uint32 ignoreOp
= message
.set_signal_masks
.ignore_op
;
2064 uint32 ignoreOnceOp
= message
.set_signal_masks
.ignore_once_op
;
2066 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_SIGNAL_MASKS"
2067 ": thread: %" B_PRId32
", ignore: %" B_PRIx64
" (op: %"
2068 B_PRIu32
"), ignore once: %" B_PRIx64
" (op: %" B_PRIu32
2069 ")\n", nubThread
->id
, threadID
, ignore
, ignoreOp
,
2070 ignoreOnce
, ignoreOnceOp
));
2073 Thread
* thread
= Thread::GetAndLock(threadID
);
2076 BReference
<Thread
> threadReference(thread
, true);
2077 ThreadLocker
threadLocker(thread
, true);
2079 InterruptsSpinLocker
threadDebugInfoLocker(
2080 thread
->debug_info
.lock
);
2082 if (thread
->team
== thread_get_current_thread()->team
) {
2083 thread_debug_info
&threadDebugInfo
= thread
->debug_info
;
2086 case B_DEBUG_SIGNAL_MASK_AND
:
2087 threadDebugInfo
.ignore_signals
&= ignore
;
2089 case B_DEBUG_SIGNAL_MASK_OR
:
2090 threadDebugInfo
.ignore_signals
|= ignore
;
2092 case B_DEBUG_SIGNAL_MASK_SET
:
2093 threadDebugInfo
.ignore_signals
= ignore
;
2097 // set ignore once mask
2098 switch (ignoreOnceOp
) {
2099 case B_DEBUG_SIGNAL_MASK_AND
:
2100 threadDebugInfo
.ignore_signals_once
&= ignoreOnce
;
2102 case B_DEBUG_SIGNAL_MASK_OR
:
2103 threadDebugInfo
.ignore_signals_once
|= ignoreOnce
;
2105 case B_DEBUG_SIGNAL_MASK_SET
:
2106 threadDebugInfo
.ignore_signals_once
= ignoreOnce
;
2114 case B_DEBUG_MESSAGE_GET_SIGNAL_MASKS
:
2116 // get the parameters
2117 replyPort
= message
.get_signal_masks
.reply_port
;
2118 thread_id threadID
= message
.get_signal_masks
.thread
;
2119 status_t result
= B_OK
;
2123 uint64 ignoreOnce
= 0;
2125 Thread
* thread
= Thread::GetAndLock(threadID
);
2126 if (thread
!= NULL
) {
2127 BReference
<Thread
> threadReference(thread
, true);
2128 ThreadLocker
threadLocker(thread
, true);
2130 InterruptsSpinLocker
threadDebugInfoLocker(
2131 thread
->debug_info
.lock
);
2133 ignore
= thread
->debug_info
.ignore_signals
;
2134 ignoreOnce
= thread
->debug_info
.ignore_signals_once
;
2136 result
= B_BAD_THREAD_ID
;
2138 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_GET_SIGNAL_MASKS"
2139 ": reply port: %" B_PRId32
", thread: %" B_PRId32
", "
2140 "ignore: %" B_PRIx64
", ignore once: %" B_PRIx64
", result: "
2141 "%" B_PRIx32
"\n", nubThread
->id
, replyPort
, threadID
,
2142 ignore
, ignoreOnce
, result
));
2144 // prepare the message
2145 reply
.get_signal_masks
.error
= result
;
2146 reply
.get_signal_masks
.ignore_mask
= ignore
;
2147 reply
.get_signal_masks
.ignore_once_mask
= ignoreOnce
;
2148 replySize
= sizeof(reply
.get_signal_masks
);
2153 case B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER
:
2155 // get the parameters
2156 int signal
= message
.set_signal_handler
.signal
;
2157 struct sigaction
&handler
= message
.set_signal_handler
.handler
;
2159 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_SET_SIGNAL_HANDLER"
2160 ": signal: %d, handler: %p\n", nubThread
->id
, signal
,
2161 handler
.sa_handler
));
2164 sigaction(signal
, &handler
, NULL
);
2169 case B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER
:
2171 // get the parameters
2172 replyPort
= message
.get_signal_handler
.reply_port
;
2173 int signal
= message
.get_signal_handler
.signal
;
2174 status_t result
= B_OK
;
2177 if (sigaction(signal
, NULL
, &reply
.get_signal_handler
.handler
)
2182 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_GET_SIGNAL_HANDLER"
2183 ": reply port: %" B_PRId32
", signal: %d, handler: %p\n",
2184 nubThread
->id
, replyPort
, signal
,
2185 reply
.get_signal_handler
.handler
.sa_handler
));
2187 // prepare the message
2188 reply
.get_signal_handler
.error
= result
;
2189 replySize
= sizeof(reply
.get_signal_handler
);
2194 case B_DEBUG_MESSAGE_PREPARE_HANDOVER
:
2196 TRACE(("nub thread %" B_PRId32
": B_DEBUG_MESSAGE_PREPARE_HANDOVER"
2197 "\n", nubThread
->id
));
2199 Team
*team
= nubThread
->team
;
2201 // Acquire the debugger write lock. As soon as we have it and
2202 // have set the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag, no thread
2203 // will write anything to the debugger port anymore.
2204 status_t result
= acquire_sem_etc(writeLock
, 1,
2205 B_KILL_CAN_INTERRUPT
, 0);
2206 if (result
== B_OK
) {
2207 // set the respective team debug flag
2208 cpu_status state
= disable_interrupts();
2209 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2211 atomic_or(&team
->debug_info
.flags
,
2212 B_TEAM_DEBUG_DEBUGGER_HANDOVER
);
2213 BreakpointManager
* breakpointManager
2214 = team
->debug_info
.breakpoint_manager
;
2216 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2217 restore_interrupts(state
);
2219 // remove all installed breakpoints
2220 breakpointManager
->RemoveAllBreakpoints();
2222 release_sem(writeLock
);
2224 // We probably got a SIGKILL. If so, we will terminate when
2225 // reading the next message fails.
2231 case B_DEBUG_MESSAGE_HANDED_OVER
:
2233 // notify all threads that the debugger has changed
2234 broadcast_debugged_thread_message(nubThread
,
2235 B_DEBUGGED_THREAD_DEBUGGER_CHANGED
, NULL
, 0);
2240 case B_DEBUG_START_PROFILER
:
2242 // get the parameters
2243 thread_id threadID
= message
.start_profiler
.thread
;
2244 replyPort
= message
.start_profiler
.reply_port
;
2245 area_id sampleArea
= message
.start_profiler
.sample_area
;
2246 int32 stackDepth
= message
.start_profiler
.stack_depth
;
2247 bool variableStackDepth
2248 = message
.start_profiler
.variable_stack_depth
;
2249 bigtime_t interval
= max_c(message
.start_profiler
.interval
,
2250 B_DEBUG_MIN_PROFILE_INTERVAL
);
2251 status_t result
= B_OK
;
2253 TRACE(("nub thread %" B_PRId32
": B_DEBUG_START_PROFILER: "
2254 "thread: %" B_PRId32
", sample area: %" B_PRId32
"\n",
2255 nubThread
->id
, threadID
, sampleArea
));
2259 else if (stackDepth
> B_DEBUG_STACK_TRACE_DEPTH
)
2260 stackDepth
= B_DEBUG_STACK_TRACE_DEPTH
;
2262 // provision for an extra entry per hit (for the number of
2263 // samples), if variable stack depth
2264 if (variableStackDepth
)
2267 // clone the sample area
2270 result
= get_area_info(sampleArea
, &areaInfo
);
2272 area_id clonedSampleArea
= -1;
2273 void* samples
= NULL
;
2274 if (result
== B_OK
) {
2275 clonedSampleArea
= clone_area("profiling samples", &samples
,
2276 B_ANY_KERNEL_ADDRESS
, B_READ_AREA
| B_WRITE_AREA
,
2278 if (clonedSampleArea
>= 0) {
2279 // we need the memory locked
2280 result
= lock_memory(samples
, areaInfo
.size
,
2282 if (result
!= B_OK
) {
2283 delete_area(clonedSampleArea
);
2284 clonedSampleArea
= -1;
2287 result
= clonedSampleArea
;
2290 // get the thread and set the profile info
2291 int32 imageEvent
= nubThread
->team
->debug_info
.image_event
;
2292 if (result
== B_OK
) {
2293 Thread
* thread
= Thread::GetAndLock(threadID
);
2294 BReference
<Thread
> threadReference(thread
, true);
2295 ThreadLocker
threadLocker(thread
, true);
2297 if (thread
!= NULL
&& thread
->team
== nubThread
->team
) {
2298 thread_debug_info
&threadDebugInfo
= thread
->debug_info
;
2300 InterruptsSpinLocker
threadDebugInfoLocker(
2301 threadDebugInfo
.lock
);
2303 if (threadDebugInfo
.profile
.samples
== NULL
) {
2304 threadDebugInfo
.profile
.interval
= interval
;
2305 threadDebugInfo
.profile
.sample_area
2307 threadDebugInfo
.profile
.samples
= (addr_t
*)samples
;
2308 threadDebugInfo
.profile
.max_samples
2309 = areaInfo
.size
/ sizeof(addr_t
);
2310 threadDebugInfo
.profile
.flush_threshold
2311 = threadDebugInfo
.profile
.max_samples
2312 * B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
2314 threadDebugInfo
.profile
.sample_count
= 0;
2315 threadDebugInfo
.profile
.dropped_ticks
= 0;
2316 threadDebugInfo
.profile
.stack_depth
= stackDepth
;
2317 threadDebugInfo
.profile
.variable_stack_depth
2318 = variableStackDepth
;
2319 threadDebugInfo
.profile
.buffer_full
= false;
2320 threadDebugInfo
.profile
.interval_left
= interval
;
2321 threadDebugInfo
.profile
.installed_timer
= NULL
;
2322 threadDebugInfo
.profile
.image_event
= imageEvent
;
2323 threadDebugInfo
.profile
.last_image_event
2326 result
= B_BAD_VALUE
;
2328 result
= B_BAD_THREAD_ID
;
2331 // on error unlock and delete the sample area
2332 if (result
!= B_OK
) {
2333 if (clonedSampleArea
>= 0) {
2334 unlock_memory(samples
, areaInfo
.size
, B_READ_DEVICE
);
2335 delete_area(clonedSampleArea
);
2339 // send a reply to the debugger
2340 reply
.start_profiler
.error
= result
;
2341 reply
.start_profiler
.interval
= interval
;
2342 reply
.start_profiler
.image_event
= imageEvent
;
2344 replySize
= sizeof(reply
.start_profiler
);
2349 case B_DEBUG_STOP_PROFILER
:
2351 // get the parameters
2352 thread_id threadID
= message
.stop_profiler
.thread
;
2353 replyPort
= message
.stop_profiler
.reply_port
;
2354 status_t result
= B_OK
;
2356 TRACE(("nub thread %" B_PRId32
": B_DEBUG_STOP_PROFILER: "
2357 "thread: %" B_PRId32
"\n", nubThread
->id
, threadID
));
2359 area_id sampleArea
= -1;
2360 addr_t
* samples
= NULL
;
2361 int32 sampleCount
= 0;
2362 int32 stackDepth
= 0;
2363 bool variableStackDepth
= false;
2364 int32 imageEvent
= 0;
2365 int32 droppedTicks
= 0;
2367 // get the thread and detach the profile info
2368 Thread
* thread
= Thread::GetAndLock(threadID
);
2369 BReference
<Thread
> threadReference(thread
, true);
2370 ThreadLocker
threadLocker(thread
, true);
2372 if (thread
&& thread
->team
== nubThread
->team
) {
2373 thread_debug_info
&threadDebugInfo
= thread
->debug_info
;
2375 InterruptsSpinLocker
threadDebugInfoLocker(
2376 threadDebugInfo
.lock
);
2378 if (threadDebugInfo
.profile
.samples
!= NULL
) {
2379 sampleArea
= threadDebugInfo
.profile
.sample_area
;
2380 samples
= threadDebugInfo
.profile
.samples
;
2381 sampleCount
= threadDebugInfo
.profile
.sample_count
;
2382 droppedTicks
= threadDebugInfo
.profile
.dropped_ticks
;
2383 stackDepth
= threadDebugInfo
.profile
.stack_depth
;
2385 = threadDebugInfo
.profile
.variable_stack_depth
;
2386 imageEvent
= threadDebugInfo
.profile
.image_event
;
2387 threadDebugInfo
.profile
.sample_area
= -1;
2388 threadDebugInfo
.profile
.samples
= NULL
;
2389 threadDebugInfo
.profile
.buffer_full
= false;
2390 threadDebugInfo
.profile
.dropped_ticks
= 0;
2392 result
= B_BAD_VALUE
;
2394 result
= B_BAD_THREAD_ID
;
2396 threadLocker
.Unlock();
2398 // prepare the reply
2399 if (result
== B_OK
) {
2400 reply
.profiler_update
.origin
.thread
= threadID
;
2401 reply
.profiler_update
.image_event
= imageEvent
;
2402 reply
.profiler_update
.stack_depth
= stackDepth
;
2403 reply
.profiler_update
.variable_stack_depth
2404 = variableStackDepth
;
2405 reply
.profiler_update
.sample_count
= sampleCount
;
2406 reply
.profiler_update
.dropped_ticks
= droppedTicks
;
2407 reply
.profiler_update
.stopped
= true;
2409 reply
.profiler_update
.origin
.thread
= result
;
2411 replySize
= sizeof(debug_profiler_update
);
2414 if (sampleArea
>= 0) {
2416 if (get_area_info(sampleArea
, &areaInfo
) == B_OK
) {
2417 unlock_memory(samples
, areaInfo
.size
, B_READ_DEVICE
);
2418 delete_area(sampleArea
);
2425 case B_DEBUG_WRITE_CORE_FILE
:
2427 // get the parameters
2428 replyPort
= message
.write_core_file
.reply_port
;
2429 char* path
= message
.write_core_file
.path
;
2430 path
[sizeof(message
.write_core_file
.path
) - 1] = '\0';
2432 TRACE(("nub thread %" B_PRId32
": B_DEBUG_WRITE_CORE_FILE"
2433 ": path: %s\n", nubThread
->id
, path
));
2435 // write the core file
2436 status_t result
= core_dump_write_core_file(path
, false);
2438 // prepare the reply
2439 reply
.write_core_file
.error
= result
;
2440 replySize
= sizeof(reply
.write_core_file
);
2447 // send the reply, if necessary
2449 status_t error
= kill_interruptable_write_port(replyPort
, command
,
2452 if (error
!= B_OK
) {
2453 // The debugger port is either not longer existing or we got
2454 // interrupted by a kill signal. In either case we terminate.
2455 TRACE(("nub thread %" B_PRId32
": failed to send reply to port "
2456 "%" B_PRId32
": %s\n", nubThread
->id
, replyPort
,
2459 nub_thread_cleanup(nubThread
);
2467 /** \brief Helper function for install_team_debugger(), that sets up the team
2468 and thread debug infos.
2470 The caller must hold the team's lock as well as the team debug info lock.
2472 The function also clears the arch specific team and thread debug infos
2473 (including among other things formerly set break/watchpoints).
2476 install_team_debugger_init_debug_infos(Team
*team
, team_id debuggerTeam
,
2477 port_id debuggerPort
, port_id nubPort
, thread_id nubThread
,
2478 sem_id debuggerPortWriteLock
, thread_id causingThread
)
2480 atomic_set(&team
->debug_info
.flags
,
2481 B_TEAM_DEBUG_DEFAULT_FLAGS
| B_TEAM_DEBUG_DEBUGGER_INSTALLED
);
2482 team
->debug_info
.nub_port
= nubPort
;
2483 team
->debug_info
.nub_thread
= nubThread
;
2484 team
->debug_info
.debugger_team
= debuggerTeam
;
2485 team
->debug_info
.debugger_port
= debuggerPort
;
2486 team
->debug_info
.debugger_write_lock
= debuggerPortWriteLock
;
2487 team
->debug_info
.causing_thread
= causingThread
;
2489 arch_clear_team_debug_info(&team
->debug_info
.arch_info
);
2491 // set the user debug flags and signal masks of all threads to the default
2492 for (Thread
*thread
= team
->thread_list
; thread
;
2493 thread
= thread
->team_next
) {
2494 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
2496 if (thread
->id
== nubThread
) {
2497 atomic_set(&thread
->debug_info
.flags
, B_THREAD_DEBUG_NUB_THREAD
);
2499 int32 flags
= thread
->debug_info
.flags
2500 & ~B_THREAD_DEBUG_USER_FLAG_MASK
;
2501 atomic_set(&thread
->debug_info
.flags
,
2502 flags
| B_THREAD_DEBUG_DEFAULT_FLAGS
);
2503 thread
->debug_info
.ignore_signals
= 0;
2504 thread
->debug_info
.ignore_signals_once
= 0;
2506 arch_clear_thread_debug_info(&thread
->debug_info
.arch_info
);
2510 // update the thread::flags fields
2511 update_threads_debugger_installed_flag(team
);
2516 install_team_debugger(team_id teamID
, port_id debuggerPort
,
2517 thread_id causingThread
, bool useDefault
, bool dontReplace
)
2519 TRACE(("install_team_debugger(team: %" B_PRId32
", port: %" B_PRId32
", "
2520 "default: %d, dontReplace: %d)\n", teamID
, debuggerPort
, useDefault
,
2524 debuggerPort
= atomic_get(&sDefaultDebuggerPort
);
2526 // get the debugger team
2527 port_info debuggerPortInfo
;
2528 status_t error
= get_port_info(debuggerPort
, &debuggerPortInfo
);
2529 if (error
!= B_OK
) {
2530 TRACE(("install_team_debugger(): Failed to get debugger port info: "
2531 "%" B_PRIx32
"\n", error
));
2534 team_id debuggerTeam
= debuggerPortInfo
.team
;
2536 // Check the debugger team: It must neither be the kernel team nor the
2538 if (debuggerTeam
== team_get_kernel_team_id() || debuggerTeam
== teamID
) {
2539 TRACE(("install_team_debugger(): Can't debug kernel or debugger team. "
2540 "debugger: %" B_PRId32
", debugged: %" B_PRId32
"\n", debuggerTeam
,
2542 return B_NOT_ALLOWED
;
2547 ConditionVariable debugChangeCondition
;
2548 error
= prepare_debugger_change(teamID
, debugChangeCondition
, team
);
2552 // get the real team ID
2555 // check, if a debugger is already installed
2558 port_id result
= B_ERROR
;
2559 bool handOver
= false;
2560 port_id oldDebuggerPort
= -1;
2561 port_id nubPort
= -1;
2563 TeamLocker
teamLocker(team
);
2564 cpu_status state
= disable_interrupts();
2565 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2567 int32 teamDebugFlags
= team
->debug_info
.flags
;
2569 if (teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
2570 // There's already a debugger installed.
2571 if (teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_HANDOVER
) {
2573 // We're fine with already having a debugger.
2576 result
= team
->debug_info
.nub_port
;
2578 // a handover to another debugger is requested
2579 // Set the handing-over flag -- we'll clear both flags after
2580 // having sent the handed-over message to the new debugger.
2581 atomic_or(&team
->debug_info
.flags
,
2582 B_TEAM_DEBUG_DEBUGGER_HANDING_OVER
);
2584 oldDebuggerPort
= team
->debug_info
.debugger_port
;
2585 result
= nubPort
= team
->debug_info
.nub_port
;
2586 if (causingThread
< 0)
2587 causingThread
= team
->debug_info
.causing_thread
;
2589 // set the new debugger
2590 install_team_debugger_init_debug_infos(team
, debuggerTeam
,
2591 debuggerPort
, nubPort
, team
->debug_info
.nub_thread
,
2592 team
->debug_info
.debugger_write_lock
, causingThread
);
2598 // there's already a debugger installed
2599 error
= (dontReplace
? B_OK
: B_BAD_VALUE
);
2601 result
= team
->debug_info
.nub_port
;
2603 } else if ((teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_DISABLED
) != 0
2605 // No debugger yet, disable_debugger() had been invoked, and we
2606 // would install the default debugger. Just fail.
2607 error
= B_BAD_VALUE
;
2610 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2611 restore_interrupts(state
);
2612 teamLocker
.Unlock();
2614 if (handOver
&& set_port_owner(nubPort
, debuggerTeam
) != B_OK
) {
2615 // The old debugger must just have died. Just proceed as
2616 // if there was no debugger installed. We may still be too
2617 // early, in which case we'll fail, but this race condition
2618 // should be unbelievably rare and relatively harmless.
2624 // prepare the handed-over message
2625 debug_handed_over notification
;
2626 notification
.origin
.thread
= -1;
2627 notification
.origin
.team
= teamID
;
2628 notification
.origin
.nub_port
= nubPort
;
2629 notification
.debugger
= debuggerTeam
;
2630 notification
.debugger_port
= debuggerPort
;
2631 notification
.causing_thread
= causingThread
;
2633 // notify the new debugger
2634 error
= write_port_etc(debuggerPort
,
2635 B_DEBUGGER_MESSAGE_HANDED_OVER
, ¬ification
,
2636 sizeof(notification
), B_RELATIVE_TIMEOUT
, 0);
2637 if (error
!= B_OK
) {
2638 dprintf("install_team_debugger(): Failed to send message to new "
2639 "debugger: %s\n", strerror(error
));
2642 // clear the handed-over and handing-over flags
2643 state
= disable_interrupts();
2644 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2646 atomic_and(&team
->debug_info
.flags
,
2647 ~(B_TEAM_DEBUG_DEBUGGER_HANDOVER
2648 | B_TEAM_DEBUG_DEBUGGER_HANDING_OVER
));
2650 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2651 restore_interrupts(state
);
2653 finish_debugger_change(team
);
2655 // notify the nub thread
2656 kill_interruptable_write_port(nubPort
, B_DEBUG_MESSAGE_HANDED_OVER
,
2659 // notify the old debugger
2660 error
= write_port_etc(oldDebuggerPort
,
2661 B_DEBUGGER_MESSAGE_HANDED_OVER
, ¬ification
,
2662 sizeof(notification
), B_RELATIVE_TIMEOUT
, 0);
2663 if (error
!= B_OK
) {
2664 TRACE(("install_team_debugger(): Failed to send message to old "
2665 "debugger: %s\n", strerror(error
)));
2668 TRACE(("install_team_debugger() done: handed over to debugger: team: "
2669 "%" B_PRId32
", port: %" B_PRId32
"\n", debuggerTeam
,
2675 if (done
|| error
!= B_OK
) {
2676 TRACE(("install_team_debugger() done1: %" B_PRId32
"\n",
2677 (error
== B_OK
? result
: error
)));
2678 finish_debugger_change(team
);
2679 return (error
== B_OK
? result
: error
);
2682 // create the debugger write lock semaphore
2683 char nameBuffer
[B_OS_NAME_LENGTH
];
2684 snprintf(nameBuffer
, sizeof(nameBuffer
), "team %" B_PRId32
" debugger port "
2686 sem_id debuggerWriteLock
= create_sem(1, nameBuffer
);
2687 if (debuggerWriteLock
< 0)
2688 error
= debuggerWriteLock
;
2690 // create the nub port
2691 snprintf(nameBuffer
, sizeof(nameBuffer
), "team %" B_PRId32
" debug", teamID
);
2692 if (error
== B_OK
) {
2693 nubPort
= create_port(1, nameBuffer
);
2700 // make the debugger team the port owner; thus we know, if the debugger is
2701 // gone and can cleanup
2703 error
= set_port_owner(nubPort
, debuggerTeam
);
2705 // create the breakpoint manager
2706 BreakpointManager
* breakpointManager
= NULL
;
2707 if (error
== B_OK
) {
2708 breakpointManager
= new(std::nothrow
) BreakpointManager
;
2709 if (breakpointManager
!= NULL
)
2710 error
= breakpointManager
->Init();
2712 error
= B_NO_MEMORY
;
2715 // spawn the nub thread
2716 thread_id nubThread
= -1;
2717 if (error
== B_OK
) {
2718 snprintf(nameBuffer
, sizeof(nameBuffer
), "team %" B_PRId32
" debug task",
2720 nubThread
= spawn_kernel_thread_etc(debug_nub_thread
, nameBuffer
,
2721 B_NORMAL_PRIORITY
, NULL
, teamID
);
2726 // now adjust the debug info accordingly
2727 if (error
== B_OK
) {
2728 TeamLocker
teamLocker(team
);
2729 state
= disable_interrupts();
2730 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2732 team
->debug_info
.breakpoint_manager
= breakpointManager
;
2733 install_team_debugger_init_debug_infos(team
, debuggerTeam
,
2734 debuggerPort
, nubPort
, nubThread
, debuggerWriteLock
,
2737 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2738 restore_interrupts(state
);
2741 finish_debugger_change(team
);
2743 // if everything went fine, resume the nub thread, otherwise clean up
2744 if (error
== B_OK
) {
2745 resume_thread(nubThread
);
2747 // delete port and terminate thread
2749 set_port_owner(nubPort
, B_CURRENT_TEAM
);
2750 delete_port(nubPort
);
2752 if (nubThread
>= 0) {
2754 wait_for_thread(nubThread
, &result
);
2757 delete breakpointManager
;
2760 TRACE(("install_team_debugger() done2: %" B_PRId32
"\n",
2761 (error
== B_OK
? result
: error
)));
2762 return (error
== B_OK
? result
: error
);
2767 ensure_debugger_installed()
2769 port_id port
= install_team_debugger(B_CURRENT_TEAM
, -1,
2770 thread_get_current_thread_id(), true, true);
2771 return port
>= 0 ? B_OK
: port
;
2779 _user_debugger(const char *userMessage
)
2781 // install the default debugger, if there is none yet
2782 status_t error
= ensure_debugger_installed();
2783 if (error
!= B_OK
) {
2784 // time to commit suicide
2786 ssize_t length
= user_strlcpy(buffer
, userMessage
, sizeof(buffer
));
2788 dprintf("_user_debugger(): Failed to install debugger. Message is: "
2791 dprintf("_user_debugger(): Failed to install debugger. Message is: "
2792 "%p (%s)\n", userMessage
, strerror(length
));
2797 // prepare the message
2798 debug_debugger_call message
;
2799 message
.message
= (void*)userMessage
;
2801 thread_hit_debug_event(B_DEBUGGER_MESSAGE_DEBUGGER_CALL
, &message
,
2802 sizeof(message
), true);
2807 _user_disable_debugger(int state
)
2809 Team
*team
= thread_get_current_thread()->team
;
2811 TRACE(("_user_disable_debugger(%d): team: %" B_PRId32
"\n", state
,
2814 cpu_status cpuState
= disable_interrupts();
2815 GRAB_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2819 oldFlags
= atomic_or(&team
->debug_info
.flags
,
2820 B_TEAM_DEBUG_DEBUGGER_DISABLED
);
2822 oldFlags
= atomic_and(&team
->debug_info
.flags
,
2823 ~B_TEAM_DEBUG_DEBUGGER_DISABLED
);
2826 RELEASE_TEAM_DEBUG_INFO_LOCK(team
->debug_info
);
2827 restore_interrupts(cpuState
);
2829 // TODO: Check, if the return value is really the old state.
2830 return !(oldFlags
& B_TEAM_DEBUG_DEBUGGER_DISABLED
);
2835 _user_install_default_debugger(port_id debuggerPort
)
2837 // if supplied, check whether the port is a valid port
2838 if (debuggerPort
>= 0) {
2840 status_t error
= get_port_info(debuggerPort
, &portInfo
);
2844 // the debugger team must not be the kernel team
2845 if (portInfo
.team
== team_get_kernel_team_id())
2846 return B_NOT_ALLOWED
;
2849 atomic_set(&sDefaultDebuggerPort
, debuggerPort
);
2856 _user_install_team_debugger(team_id teamID
, port_id debuggerPort
)
2858 return install_team_debugger(teamID
, debuggerPort
, -1, false, false);
2863 _user_remove_team_debugger(team_id teamID
)
2866 ConditionVariable debugChangeCondition
;
2867 status_t error
= prepare_debugger_change(teamID
, debugChangeCondition
,
2872 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
2874 thread_id nubThread
= -1;
2875 port_id nubPort
= -1;
2877 if (team
->debug_info
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
2878 // there's a debugger installed
2879 nubThread
= team
->debug_info
.nub_thread
;
2880 nubPort
= team
->debug_info
.nub_port
;
2882 // no debugger installed
2883 error
= B_BAD_VALUE
;
2886 debugInfoLocker
.Unlock();
2888 // Delete the nub port -- this will cause the nub thread to terminate and
2889 // remove the debugger.
2891 delete_port(nubPort
);
2893 finish_debugger_change(team
);
2895 // wait for the nub thread
2897 wait_for_thread(nubThread
, NULL
);
2904 _user_debug_thread(thread_id threadID
)
2906 TRACE(("[%" B_PRId32
"] _user_debug_thread(%" B_PRId32
")\n",
2907 find_thread(NULL
), threadID
));
2910 Thread
* thread
= Thread::GetAndLock(threadID
);
2912 return B_BAD_THREAD_ID
;
2913 BReference
<Thread
> threadReference(thread
, true);
2914 ThreadLocker
threadLocker(thread
, true);
2916 // we can't debug the kernel team
2917 if (thread
->team
== team_get_kernel_team())
2918 return B_NOT_ALLOWED
;
2920 InterruptsLocker interruptsLocker
;
2921 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
2923 // If the thread is already dying, it's too late to debug it.
2924 if ((thread
->debug_info
.flags
& B_THREAD_DEBUG_DYING
) != 0)
2925 return B_BAD_THREAD_ID
;
2927 // don't debug the nub thread
2928 if ((thread
->debug_info
.flags
& B_THREAD_DEBUG_NUB_THREAD
) != 0)
2929 return B_NOT_ALLOWED
;
2931 // already marked stopped or being told to stop?
2932 if ((thread
->debug_info
.flags
2933 & (B_THREAD_DEBUG_STOPPED
| B_THREAD_DEBUG_STOP
)) != 0) {
2937 // set the flag that tells the thread to stop as soon as possible
2938 atomic_or(&thread
->debug_info
.flags
, B_THREAD_DEBUG_STOP
);
2940 update_thread_user_debug_flag(thread
);
2942 // send the thread a SIGNAL_DEBUG_THREAD, so it is interrupted (or
2944 threadDebugInfoLocker
.Unlock();
2945 ReadSpinLocker
teamLocker(thread
->team_lock
);
2946 SpinLocker
locker(thread
->team
->signal_lock
);
2948 send_signal_to_thread_locked(thread
, SIGNAL_DEBUG_THREAD
, NULL
, 0);
2955 _user_wait_for_debugger(void)
2957 debug_thread_debugged message
;
2958 thread_hit_debug_event(B_DEBUGGER_MESSAGE_THREAD_DEBUGGED
, &message
,
2959 sizeof(message
), false);
2964 _user_set_debugger_breakpoint(void *address
, uint32 type
, int32 length
,
2967 // check the address and size
2968 if (address
== NULL
|| !BreakpointManager::CanAccessAddress(address
, false))
2969 return B_BAD_ADDRESS
;
2970 if (watchpoint
&& length
< 0)
2973 // check whether a debugger is installed already
2974 team_debug_info teamDebugInfo
;
2975 get_team_debug_info(teamDebugInfo
);
2976 if (teamDebugInfo
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
)
2979 // We can't help it, here's a small but relatively harmless race condition,
2980 // since a debugger could be installed in the meantime. The worst case is
2981 // that we install a break/watchpoint the debugger doesn't know about.
2983 // set the break/watchpoint
2986 result
= arch_set_watchpoint(address
, type
, length
);
2988 result
= arch_set_breakpoint(address
);
2991 update_threads_breakpoints_flag();
2998 _user_clear_debugger_breakpoint(void *address
, bool watchpoint
)
3000 // check the address
3001 if (address
== NULL
|| !BreakpointManager::CanAccessAddress(address
, false))
3002 return B_BAD_ADDRESS
;
3004 // check whether a debugger is installed already
3005 team_debug_info teamDebugInfo
;
3006 get_team_debug_info(teamDebugInfo
);
3007 if (teamDebugInfo
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
)
3010 // We can't help it, here's a small but relatively harmless race condition,
3011 // since a debugger could be installed in the meantime. The worst case is
3012 // that we clear a break/watchpoint the debugger has just installed.
3014 // clear the break/watchpoint
3017 result
= arch_clear_watchpoint(address
);
3019 result
= arch_clear_breakpoint(address
);
3022 update_threads_breakpoints_flag();