1 /* Ada Ravenscar thread support.
3 Copyright (C) 2004-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbthread.h"
27 #include "ravenscar-thread.h"
28 #include "observable.h"
33 #include <unordered_map>
35 /* This module provides support for "Ravenscar" tasks (Ada) when
36 debugging on bare-metal targets.
38 The typical situation is when debugging a bare-metal target over
39 the remote protocol. In that situation, the system does not know
40 about high-level concepts such as threads, only about some code
41 running on one or more CPUs. And since the remote protocol does not
42 provide any handling for CPUs, the de facto standard for handling
43 them is to have one thread per CPU, where the thread's ptid has
44 its lwp field set to the CPU number (eg: 1 for the first CPU,
45 2 for the second one, etc). This module will make that assumption.
47 This module then creates and maintains the list of threads based
48 on the list of Ada tasks, with one thread per Ada task. The convention
49 is that threads corresponding to the CPUs (see assumption above)
50 have a ptid_t of the form (PID, LWP, 0), while threads corresponding
51 to our Ada tasks have a ptid_t of the form (PID, 0, TID) where TID
52 is the Ada task's ID as extracted from Ada runtime information.
54 Switching to a given Ada task (or its underlying thread) is performed
55 by fetching the registers of that task from the memory area where
56 the registers were saved. For any of the other operations, the
57 operation is performed by first finding the CPU on which the task
58 is running, switching to its corresponding ptid, and then performing
59 the operation on that ptid using the target beneath us. */
61 /* If true, ravenscar task support is enabled. */
62 static bool ravenscar_task_support
= true;
64 static const char running_thread_name
[] = "__gnat_running_thread_table";
66 static const char known_tasks_name
[] = "system__tasking__debug__known_tasks";
67 static const char first_task_name
[] = "system__tasking__debug__first_task";
69 static const char ravenscar_runtime_initializer
[]
70 = "system__bb__threads__initialize";
72 static const target_info ravenscar_target_info
= {
74 N_("Ravenscar tasks."),
75 N_("Ravenscar tasks support.")
78 struct ravenscar_thread_target final
: public target_ops
80 ravenscar_thread_target ()
81 : m_base_ptid (inferior_ptid
)
85 const target_info
&info () const override
86 { return ravenscar_target_info
; }
88 strata
stratum () const override
{ return thread_stratum
; }
90 ptid_t
wait (ptid_t
, struct target_waitstatus
*, target_wait_flags
) override
;
91 void resume (ptid_t
, int, enum gdb_signal
) override
;
93 void fetch_registers (struct regcache
*, int) override
;
94 void store_registers (struct regcache
*, int) override
;
96 void prepare_to_store (struct regcache
*) override
;
98 bool stopped_by_sw_breakpoint () override
;
100 bool stopped_by_hw_breakpoint () override
;
102 bool stopped_by_watchpoint () override
;
104 bool stopped_data_address (CORE_ADDR
*) override
;
106 enum target_xfer_status
xfer_partial (enum target_object object
,
109 const gdb_byte
*writebuf
,
110 ULONGEST offset
, ULONGEST len
,
111 ULONGEST
*xfered_len
) override
;
113 bool thread_alive (ptid_t ptid
) override
;
115 int core_of_thread (ptid_t ptid
) override
;
117 void update_thread_list () override
;
119 std::string
pid_to_str (ptid_t
) override
;
121 ptid_t
get_ada_task_ptid (long lwp
, ULONGEST thread
) override
;
123 struct btrace_target_info
*enable_btrace (thread_info
*tp
,
124 const struct btrace_config
*conf
)
127 process_stratum_target
*proc_target
128 = as_process_stratum_target (this->beneath ());
129 ptid_t underlying
= get_base_thread_from_ravenscar_task (tp
->ptid
);
130 tp
= proc_target
->find_thread (underlying
);
132 return beneath ()->enable_btrace (tp
, conf
);
135 void mourn_inferior () override
;
137 void close () override
142 thread_info
*add_active_thread ();
146 /* PTID of the last thread that received an event.
147 This can be useful to determine the associated task that received
148 the event, to make it the current task. */
151 ptid_t
active_task (int cpu
);
152 bool task_is_currently_active (ptid_t ptid
);
153 bool runtime_initialized ();
154 int get_thread_base_cpu (ptid_t ptid
);
155 ptid_t
get_base_thread_from_ravenscar_task (ptid_t ptid
);
156 void add_thread (struct ada_task_info
*task
);
158 /* Like switch_to_thread, but uses the base ptid for the thread. */
159 void set_base_thread_from_ravenscar_task (ptid_t ptid
)
161 process_stratum_target
*proc_target
162 = as_process_stratum_target (this->beneath ());
163 ptid_t underlying
= get_base_thread_from_ravenscar_task (ptid
);
164 switch_to_thread (proc_target
->find_thread (underlying
));
167 /* Some targets use lazy FPU initialization. On these, the FP
168 registers for a given task might be uninitialized, or stored in
169 the per-task context, or simply be the live registers on the CPU.
170 This enum is used to encode this information. */
173 /* This target doesn't do anything special for FP registers -- if
174 any exist, they are treated just identical to non-FP
177 /* This target uses the lazy FP scheme, and the FP registers are
178 taken from the CPU. This can happen for any task, because if a
179 task switch occurs, the registers aren't immediately written to
180 the per-task context -- this is deferred until the current task
181 causes an FPU trap. */
183 /* This target uses the lazy FP scheme, and the FP registers are
184 not available. Maybe this task never initialized the FPU, or
185 maybe GDB couldn't find the required symbol. */
189 /* Return the FPU state. */
190 fpu_state
get_fpu_state (struct regcache
*regcache
,
191 const ravenscar_arch_ops
*arch_ops
);
193 /* This maps a TID to the CPU on which it was running. This is
194 needed because sometimes the runtime will report an active task
195 that hasn't yet been put on the list of tasks that is read by
197 std::unordered_map
<ULONGEST
, int> m_cpu_map
;
200 /* Return true iff PTID corresponds to a ravenscar task. */
203 is_ravenscar_task (ptid_t ptid
)
205 /* By construction, ravenscar tasks have their LWP set to zero.
206 Also make sure that the TID is nonzero, as some remotes, when
207 asked for the list of threads, will return the first thread
208 as having its TID set to zero. For instance, TSIM version
209 2.0.48 for LEON3 sends 'm0' as a reply to the 'qfThreadInfo'
210 query, which the remote protocol layer then treats as a thread
211 whose TID is 0. This is obviously not a ravenscar task. */
212 return ptid
.lwp () == 0 && ptid
.tid () != 0;
215 /* Given PTID, which can be either a ravenscar task or a CPU thread,
216 return which CPU that ptid is running on.
218 This assume that PTID is a valid ptid_t. Otherwise, a gdb_assert
219 will be triggered. */
222 ravenscar_thread_target::get_thread_base_cpu (ptid_t ptid
)
226 if (is_ravenscar_task (ptid
))
228 /* Prefer to not read inferior memory if possible, to avoid
229 reentrancy problems with xfer_partial. */
230 auto iter
= m_cpu_map
.find (ptid
.tid ());
232 if (iter
!= m_cpu_map
.end ())
233 base_cpu
= iter
->second
;
236 struct ada_task_info
*task_info
= ada_get_task_info_from_ptid (ptid
);
238 gdb_assert (task_info
!= NULL
);
239 base_cpu
= task_info
->base_cpu
;
244 /* We assume that the LWP of the PTID is equal to the CPU number. */
245 base_cpu
= ptid
.lwp ();
251 /* Given a ravenscar task (identified by its ptid_t PTID), return true
252 if this task is the currently active task on the cpu that task is
255 In other words, this function determine which CPU this task is
256 currently running on, and then return nonzero if the CPU in question
257 is executing the code for that task. If that's the case, then
258 that task's registers are in the CPU bank. Otherwise, the task
259 is currently suspended, and its registers have been saved in memory. */
262 ravenscar_thread_target::task_is_currently_active (ptid_t ptid
)
264 ptid_t active_task_ptid
= active_task (get_thread_base_cpu (ptid
));
266 return ptid
== active_task_ptid
;
269 /* Return the CPU thread (as a ptid_t) on which the given ravenscar
272 This is the thread that corresponds to the CPU on which the task
276 ravenscar_thread_target::get_base_thread_from_ravenscar_task (ptid_t ptid
)
280 if (!is_ravenscar_task (ptid
))
283 base_cpu
= get_thread_base_cpu (ptid
);
284 return ptid_t (ptid
.pid (), base_cpu
);
287 /* Fetch the ravenscar running thread from target memory, make sure
288 there's a corresponding thread in the thread list, and return it.
289 If the runtime is not initialized, return NULL. */
292 ravenscar_thread_target::add_active_thread ()
294 process_stratum_target
*proc_target
295 = as_process_stratum_target (this->beneath ());
299 gdb_assert (!is_ravenscar_task (m_base_ptid
));
300 base_cpu
= get_thread_base_cpu (m_base_ptid
);
302 if (!runtime_initialized ())
305 /* It's possible for runtime_initialized to return true but for it
306 not to be fully initialized. For example, this can happen for a
307 breakpoint placed at the task's beginning. */
308 ptid_t active_ptid
= active_task (base_cpu
);
309 if (active_ptid
== null_ptid
)
312 /* The running thread may not have been added to
313 system.tasking.debug's list yet; so ravenscar_update_thread_list
314 may not always add it to the thread list. Add it here. */
315 thread_info
*active_thr
= proc_target
->find_thread (active_ptid
);
316 if (active_thr
== nullptr)
318 active_thr
= ::add_thread (proc_target
, active_ptid
);
319 m_cpu_map
[active_ptid
.tid ()] = base_cpu
;
324 /* The Ravenscar Runtime exports a symbol which contains the ID of
325 the thread that is currently running. Try to locate that symbol
326 and return its associated minimal symbol.
327 Return NULL if not found. */
329 static struct bound_minimal_symbol
330 get_running_thread_msymbol ()
332 struct bound_minimal_symbol msym
;
334 msym
= lookup_minimal_symbol (running_thread_name
, NULL
, NULL
);
336 /* Older versions of the GNAT runtime were using a different
337 (less ideal) name for the symbol where the active thread ID
338 is stored. If we couldn't find the symbol using the latest
339 name, then try the old one. */
340 msym
= lookup_minimal_symbol ("running_thread", NULL
, NULL
);
345 /* Return True if the Ada Ravenscar run-time can be found in the
349 has_ravenscar_runtime ()
351 struct bound_minimal_symbol msym_ravenscar_runtime_initializer
352 = lookup_minimal_symbol (ravenscar_runtime_initializer
, NULL
, NULL
);
353 struct bound_minimal_symbol msym_known_tasks
354 = lookup_minimal_symbol (known_tasks_name
, NULL
, NULL
);
355 struct bound_minimal_symbol msym_first_task
356 = lookup_minimal_symbol (first_task_name
, NULL
, NULL
);
357 struct bound_minimal_symbol msym_running_thread
358 = get_running_thread_msymbol ();
360 return (msym_ravenscar_runtime_initializer
.minsym
361 && (msym_known_tasks
.minsym
|| msym_first_task
.minsym
)
362 && msym_running_thread
.minsym
);
365 /* Return True if the Ada Ravenscar run-time can be found in the
366 application, and if it has been initialized on target. */
369 ravenscar_thread_target::runtime_initialized ()
371 return active_task (1) != null_ptid
;
374 /* Return the ID of the thread that is currently running.
375 Return 0 if the ID could not be determined. */
378 get_running_thread_id (int cpu
)
380 struct bound_minimal_symbol object_msym
= get_running_thread_msymbol ();
384 CORE_ADDR object_addr
;
385 struct type
*builtin_type_void_data_ptr
386 = builtin_type (current_inferior ()->arch ())->builtin_data_ptr
;
388 if (!object_msym
.minsym
)
391 object_size
= builtin_type_void_data_ptr
->length ();
392 object_addr
= (object_msym
.value_address ()
393 + (cpu
- 1) * object_size
);
394 buf_size
= object_size
;
395 buf
= (gdb_byte
*) alloca (buf_size
);
396 read_memory (object_addr
, buf
, buf_size
);
397 return extract_typed_address (buf
, builtin_type_void_data_ptr
);
401 ravenscar_thread_target::resume (ptid_t ptid
, int step
,
402 enum gdb_signal siggnal
)
404 /* If we see a wildcard resume, we simply pass that on. Otherwise,
405 arrange to resume the base ptid. */
406 inferior_ptid
= m_base_ptid
;
409 /* We only have one process, so resume all threads of it. */
410 ptid
= minus_one_ptid
;
412 else if (ptid
!= minus_one_ptid
)
414 beneath ()->resume (ptid
, step
, siggnal
);
418 ravenscar_thread_target::wait (ptid_t ptid
,
419 struct target_waitstatus
*status
,
420 target_wait_flags options
)
422 process_stratum_target
*beneath
423 = as_process_stratum_target (this->beneath ());
426 if (ptid
!= minus_one_ptid
)
428 event_ptid
= beneath
->wait (ptid
, status
, 0);
429 /* Find any new threads that might have been created, and return the
432 Only do it if the program is still alive, though. Otherwise,
433 this causes problems when debugging through the remote protocol,
434 because we might try switching threads (and thus sending packets)
435 after the remote has disconnected. */
436 if (status
->kind () != TARGET_WAITKIND_EXITED
437 && status
->kind () != TARGET_WAITKIND_SIGNALLED
438 && runtime_initialized ())
440 m_base_ptid
= event_ptid
;
441 this->update_thread_list ();
442 thread_info
*thr
= this->add_active_thread ();
449 /* Add the thread associated to the given TASK to the thread list
450 (if the thread has already been added, this is a no-op). */
453 ravenscar_thread_target::add_thread (struct ada_task_info
*task
)
455 if (current_inferior ()->find_thread (task
->ptid
) == NULL
)
457 ::add_thread (current_inferior ()->process_target (), task
->ptid
);
458 m_cpu_map
[task
->ptid
.tid ()] = task
->base_cpu
;
463 ravenscar_thread_target::update_thread_list ()
465 /* iterate_over_live_ada_tasks requires that inferior_ptid be set,
466 but this isn't always the case in target methods. So, we ensure
468 scoped_restore save_ptid
= make_scoped_restore (&inferior_ptid
,
471 /* Do not clear the thread list before adding the Ada task, to keep
472 the thread that the process stratum has included into it
473 (m_base_ptid) and the running thread, that may not have been included
474 to system.tasking.debug's list yet. */
476 iterate_over_live_ada_tasks ([this] (struct ada_task_info
*task
)
478 this->add_thread (task
);
483 ravenscar_thread_target::active_task (int cpu
)
485 CORE_ADDR tid
= get_running_thread_id (cpu
);
490 return ptid_t (m_base_ptid
.pid (), 0, tid
);
494 ravenscar_thread_target::thread_alive (ptid_t ptid
)
496 /* Ravenscar tasks are non-terminating. */
501 ravenscar_thread_target::pid_to_str (ptid_t ptid
)
503 if (!is_ravenscar_task (ptid
))
504 return beneath ()->pid_to_str (ptid
);
506 return string_printf ("Ravenscar Thread 0x%s",
507 phex_nz (ptid
.tid (), sizeof (ULONGEST
)));
511 ravenscar_arch_ops::get_stack_base (struct regcache
*regcache
) const
513 struct gdbarch
*gdbarch
= regcache
->arch ();
514 const int sp_regnum
= gdbarch_sp_regnum (gdbarch
);
515 ULONGEST stack_address
;
516 regcache_cooked_read_unsigned (regcache
, sp_regnum
, &stack_address
);
517 return (CORE_ADDR
) stack_address
;
521 ravenscar_arch_ops::supply_one_register (struct regcache
*regcache
,
523 CORE_ADDR descriptor
,
524 CORE_ADDR stack_base
) const
527 if (regnum
>= first_stack_register
&& regnum
<= last_stack_register
)
531 addr
+= offsets
[regnum
];
533 struct gdbarch
*gdbarch
= regcache
->arch ();
534 int size
= register_size (gdbarch
, regnum
);
535 gdb_byte
*buf
= (gdb_byte
*) alloca (size
);
536 read_memory (addr
, buf
, size
);
537 regcache
->raw_supply (regnum
, buf
);
541 ravenscar_arch_ops::fetch_register (struct regcache
*regcache
,
544 gdb_assert (regnum
!= -1);
546 struct gdbarch
*gdbarch
= regcache
->arch ();
547 /* The tid is the thread_id field, which is a pointer to the thread. */
548 CORE_ADDR thread_descriptor_address
549 = (CORE_ADDR
) regcache
->ptid ().tid ();
552 CORE_ADDR stack_address
= 0;
553 if (regnum
>= first_stack_register
&& regnum
<= last_stack_register
)
555 /* We must supply SP for get_stack_base, so recurse. */
556 sp_regno
= gdbarch_sp_regnum (gdbarch
);
557 gdb_assert (!(sp_regno
>= first_stack_register
558 && sp_regno
<= last_stack_register
));
559 fetch_register (regcache
, sp_regno
);
560 stack_address
= get_stack_base (regcache
);
563 if (regnum
< offsets
.size () && offsets
[regnum
] != -1)
564 supply_one_register (regcache
, regnum
, thread_descriptor_address
,
569 ravenscar_arch_ops::store_one_register (struct regcache
*regcache
, int regnum
,
570 CORE_ADDR descriptor
,
571 CORE_ADDR stack_base
) const
574 if (regnum
>= first_stack_register
&& regnum
<= last_stack_register
)
578 addr
+= offsets
[regnum
];
580 struct gdbarch
*gdbarch
= regcache
->arch ();
581 int size
= register_size (gdbarch
, regnum
);
582 gdb_byte
*buf
= (gdb_byte
*) alloca (size
);
583 regcache
->raw_collect (regnum
, buf
);
584 write_memory (addr
, buf
, size
);
588 ravenscar_arch_ops::store_register (struct regcache
*regcache
,
591 gdb_assert (regnum
!= -1);
593 /* The tid is the thread_id field, which is a pointer to the thread. */
594 CORE_ADDR thread_descriptor_address
595 = (CORE_ADDR
) regcache
->ptid ().tid ();
597 CORE_ADDR stack_address
= 0;
598 if (regnum
>= first_stack_register
&& regnum
<= last_stack_register
)
599 stack_address
= get_stack_base (regcache
);
601 if (regnum
< offsets
.size () && offsets
[regnum
] != -1)
602 store_one_register (regcache
, regnum
, thread_descriptor_address
,
606 /* Temporarily set the ptid of a regcache to some other value. When
607 this object is destroyed, the regcache's original ptid is
610 class temporarily_change_regcache_ptid
614 temporarily_change_regcache_ptid (struct regcache
*regcache
, ptid_t new_ptid
)
615 : m_regcache (regcache
),
616 m_save_ptid (regcache
->ptid ())
618 m_regcache
->set_ptid (new_ptid
);
621 ~temporarily_change_regcache_ptid ()
623 m_regcache
->set_ptid (m_save_ptid
);
629 struct regcache
*m_regcache
;
630 /* The saved ptid. */
634 ravenscar_thread_target::fpu_state
635 ravenscar_thread_target::get_fpu_state (struct regcache
*regcache
,
636 const ravenscar_arch_ops
*arch_ops
)
638 /* We want to return true if the special FP register handling is
639 needed. If this target doesn't have lazy FP, then no special
640 treatment is ever needed. */
641 if (!arch_ops
->on_demand_fp ())
642 return NOTHING_SPECIAL
;
644 bound_minimal_symbol fpu_context
645 = lookup_minimal_symbol ("system__bb__cpu_primitives__current_fpu_context",
647 /* If the symbol can't be found, just fall back. */
648 if (fpu_context
.minsym
== nullptr)
649 return NO_FP_REGISTERS
;
652 = builtin_type (current_inferior ()->arch ())->builtin_data_ptr
;
653 ptr_type
= lookup_pointer_type (ptr_type
);
654 value
*val
= value_from_pointer (ptr_type
, fpu_context
.value_address ());
656 int cpu
= get_thread_base_cpu (regcache
->ptid ());
657 /* The array index type has a lower bound of 1 -- it is Ada code --
658 so subtract 1 here. */
659 val
= value_ptradd (val
, cpu
- 1);
661 val
= value_ind (val
);
662 CORE_ADDR fpu_task
= value_as_long (val
);
664 /* The tid is the thread_id field, which is a pointer to the thread. */
665 CORE_ADDR thread_descriptor_address
666 = (CORE_ADDR
) regcache
->ptid ().tid ();
667 if (fpu_task
== (thread_descriptor_address
668 + arch_ops
->get_fpu_context_offset ()))
669 return LIVE_FP_REGISTERS
;
671 int v_init_offset
= arch_ops
->get_v_init_offset ();
673 read_memory (thread_descriptor_address
+ v_init_offset
, &init
, 1);
674 return init
? NOTHING_SPECIAL
: NO_FP_REGISTERS
;
678 ravenscar_thread_target::fetch_registers (struct regcache
*regcache
,
681 ptid_t ptid
= regcache
->ptid ();
683 if (runtime_initialized () && is_ravenscar_task (ptid
))
685 struct gdbarch
*gdbarch
= regcache
->arch ();
686 bool is_active
= task_is_currently_active (ptid
);
687 struct ravenscar_arch_ops
*arch_ops
= gdbarch_ravenscar_ops (gdbarch
);
688 std::optional
<fpu_state
> fp_state
;
690 int low_reg
= regnum
== -1 ? 0 : regnum
;
691 int high_reg
= regnum
== -1 ? gdbarch_num_regs (gdbarch
) : regnum
+ 1;
693 ptid_t base
= get_base_thread_from_ravenscar_task (ptid
);
694 for (int i
= low_reg
; i
< high_reg
; ++i
)
696 bool use_beneath
= false;
697 if (arch_ops
->is_fp_register (i
))
699 if (!fp_state
.has_value ())
700 fp_state
= get_fpu_state (regcache
, arch_ops
);
701 if (*fp_state
== NO_FP_REGISTERS
)
703 if (*fp_state
== LIVE_FP_REGISTERS
704 || (is_active
&& *fp_state
== NOTHING_SPECIAL
))
708 use_beneath
= is_active
;
712 temporarily_change_regcache_ptid
changer (regcache
, base
);
713 beneath ()->fetch_registers (regcache
, i
);
716 arch_ops
->fetch_register (regcache
, i
);
720 beneath ()->fetch_registers (regcache
, regnum
);
724 ravenscar_thread_target::store_registers (struct regcache
*regcache
,
727 ptid_t ptid
= regcache
->ptid ();
729 if (runtime_initialized () && is_ravenscar_task (ptid
))
731 struct gdbarch
*gdbarch
= regcache
->arch ();
732 bool is_active
= task_is_currently_active (ptid
);
733 struct ravenscar_arch_ops
*arch_ops
= gdbarch_ravenscar_ops (gdbarch
);
734 std::optional
<fpu_state
> fp_state
;
736 int low_reg
= regnum
== -1 ? 0 : regnum
;
737 int high_reg
= regnum
== -1 ? gdbarch_num_regs (gdbarch
) : regnum
+ 1;
739 ptid_t base
= get_base_thread_from_ravenscar_task (ptid
);
740 for (int i
= low_reg
; i
< high_reg
; ++i
)
742 bool use_beneath
= false;
743 if (arch_ops
->is_fp_register (i
))
745 if (!fp_state
.has_value ())
746 fp_state
= get_fpu_state (regcache
, arch_ops
);
747 if (*fp_state
== NO_FP_REGISTERS
)
749 if (*fp_state
== LIVE_FP_REGISTERS
750 || (is_active
&& *fp_state
== NOTHING_SPECIAL
))
754 use_beneath
= is_active
;
758 temporarily_change_regcache_ptid
changer (regcache
, base
);
759 beneath ()->store_registers (regcache
, i
);
762 arch_ops
->store_register (regcache
, i
);
766 beneath ()->store_registers (regcache
, regnum
);
770 ravenscar_thread_target::prepare_to_store (struct regcache
*regcache
)
772 ptid_t ptid
= regcache
->ptid ();
774 if (runtime_initialized () && is_ravenscar_task (ptid
))
776 if (task_is_currently_active (ptid
))
778 ptid_t base
= get_base_thread_from_ravenscar_task (ptid
);
779 temporarily_change_regcache_ptid
changer (regcache
, base
);
780 beneath ()->prepare_to_store (regcache
);
788 beneath ()->prepare_to_store (regcache
);
791 /* Implement the to_stopped_by_sw_breakpoint target_ops "method". */
794 ravenscar_thread_target::stopped_by_sw_breakpoint ()
796 scoped_restore_current_thread saver
;
797 set_base_thread_from_ravenscar_task (inferior_ptid
);
798 return beneath ()->stopped_by_sw_breakpoint ();
801 /* Implement the to_stopped_by_hw_breakpoint target_ops "method". */
804 ravenscar_thread_target::stopped_by_hw_breakpoint ()
806 scoped_restore_current_thread saver
;
807 set_base_thread_from_ravenscar_task (inferior_ptid
);
808 return beneath ()->stopped_by_hw_breakpoint ();
811 /* Implement the to_stopped_by_watchpoint target_ops "method". */
814 ravenscar_thread_target::stopped_by_watchpoint ()
816 scoped_restore_current_thread saver
;
817 set_base_thread_from_ravenscar_task (inferior_ptid
);
818 return beneath ()->stopped_by_watchpoint ();
821 /* Implement the to_stopped_data_address target_ops "method". */
824 ravenscar_thread_target::stopped_data_address (CORE_ADDR
*addr_p
)
826 scoped_restore_current_thread saver
;
827 set_base_thread_from_ravenscar_task (inferior_ptid
);
828 return beneath ()->stopped_data_address (addr_p
);
832 ravenscar_thread_target::mourn_inferior ()
834 m_base_ptid
= null_ptid
;
835 target_ops
*beneath
= this->beneath ();
836 current_inferior ()->unpush_target (this);
837 beneath
->mourn_inferior ();
840 /* Implement the to_core_of_thread target_ops "method". */
843 ravenscar_thread_target::core_of_thread (ptid_t ptid
)
845 scoped_restore_current_thread saver
;
846 set_base_thread_from_ravenscar_task (inferior_ptid
);
847 return beneath ()->core_of_thread (inferior_ptid
);
850 /* Implement the target xfer_partial method. */
852 enum target_xfer_status
853 ravenscar_thread_target::xfer_partial (enum target_object object
,
856 const gdb_byte
*writebuf
,
857 ULONGEST offset
, ULONGEST len
,
858 ULONGEST
*xfered_len
)
860 scoped_restore save_ptid
= make_scoped_restore (&inferior_ptid
);
861 /* Calling get_base_thread_from_ravenscar_task can read memory from
862 the inferior. However, that function is written to prefer our
863 internal map, so it should not result in recursive calls in
865 inferior_ptid
= get_base_thread_from_ravenscar_task (inferior_ptid
);
866 return beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
867 offset
, len
, xfered_len
);
870 /* Observer on inferior_created: push ravenscar thread stratum if needed. */
873 ravenscar_inferior_created (inferior
*inf
)
877 if (!ravenscar_task_support
878 || gdbarch_ravenscar_ops (current_inferior ()->arch ()) == NULL
879 || !has_ravenscar_runtime ())
882 err_msg
= ada_get_tcb_types_info ();
885 warning (_("%s. Task/thread support disabled."), err_msg
);
889 ravenscar_thread_target
*rtarget
= new ravenscar_thread_target ();
890 inf
->push_target (target_ops_up (rtarget
));
891 thread_info
*thr
= rtarget
->add_active_thread ();
893 switch_to_thread (thr
);
897 ravenscar_thread_target::get_ada_task_ptid (long lwp
, ULONGEST thread
)
899 return ptid_t (m_base_ptid
.pid (), 0, thread
);
902 /* Command-list for the "set/show ravenscar" prefix command. */
903 static struct cmd_list_element
*set_ravenscar_list
;
904 static struct cmd_list_element
*show_ravenscar_list
;
906 /* Implement the "show ravenscar task-switching" command. */
909 show_ravenscar_task_switching_command (struct ui_file
*file
, int from_tty
,
910 struct cmd_list_element
*c
,
913 if (ravenscar_task_support
)
914 gdb_printf (file
, _("\
915 Support for Ravenscar task/thread switching is enabled\n"));
917 gdb_printf (file
, _("\
918 Support for Ravenscar task/thread switching is disabled\n"));
921 /* Module startup initialization function, automagically called by
924 void _initialize_ravenscar ();
926 _initialize_ravenscar ()
928 /* Notice when the inferior is created in order to push the
929 ravenscar ops if needed. */
930 gdb::observers::inferior_created
.attach (ravenscar_inferior_created
,
933 add_setshow_prefix_cmd
934 ("ravenscar", no_class
,
935 _("Prefix command for changing Ravenscar-specific settings."),
936 _("Prefix command for showing Ravenscar-specific settings."),
937 &set_ravenscar_list
, &show_ravenscar_list
,
938 &setlist
, &showlist
);
940 add_setshow_boolean_cmd ("task-switching", class_obscure
,
941 &ravenscar_task_support
, _("\
942 Enable or disable support for GNAT Ravenscar tasks."), _("\
943 Show whether support for GNAT Ravenscar tasks is enabled."),
945 Enable or disable support for task/thread switching with the GNAT\n\
946 Ravenscar run-time library for bareboard configuration."),
947 NULL
, show_ravenscar_task_switching_command
,
948 &set_ravenscar_list
, &show_ravenscar_list
);