FreeBSD: add file descriptor tracking for _umtx_op
[valgrind.git] / coregrind / m_gdbserver / target.c
blobf9f32f4aa5b7f3d17a9a914975b4204c08b0b322
1 /* Target operations for the remote server for GDB.
2 Copyright (C) 2002, 2004, 2005, 2011
3 Free Software Foundation, Inc.
5 Contributed by MontaVista Software.
7 This file is part of GDB.
8 It has been modified to integrate it in valgrind
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
25 #include "server.h"
26 #include "target.h"
27 #include "regdef.h"
28 #include "regcache.h"
29 #include "valgrind_low.h"
30 #include "gdb/signals.h"
31 #include "pub_core_aspacemgr.h"
32 #include "pub_core_machine.h"
33 #include "pub_core_threadstate.h"
34 #include "pub_core_transtab.h"
35 #include "pub_core_gdbserver.h"
36 #include "pub_core_debuginfo.h"
39 /* the_low_target defines the architecture specific aspects depending
40 on the cpu */
41 static struct valgrind_target_ops the_low_target;
43 static
44 char *image_ptid(unsigned long ptid)
46 static char result[50]; // large enough
47 VG_(sprintf) (result, "id %lu", ptid);
48 return result;
50 #define get_thread(inf) ((struct thread_info *)(inf))
51 static
52 void remove_thread_if_not_in_vg_threads (struct inferior_list_entry *inf)
54 struct thread_info *thread = get_thread (inf);
55 if (!VG_(lwpid_to_vgtid)(thread_to_gdb_id(thread))) {
56 dlog(1, "removing gdb ptid %s\n",
57 image_ptid(thread_to_gdb_id(thread)));
58 remove_thread (thread);
62 /* synchronize threads known by valgrind and threads known by gdbserver */
63 static
64 void valgrind_update_threads (int pid)
66 ThreadId tid;
67 ThreadState *ts;
68 unsigned long ptid;
69 struct thread_info *ti;
71 /* call remove_thread for all gdb threads not in valgrind threads */
72 for_each_inferior (&all_threads, remove_thread_if_not_in_vg_threads);
74 /* call add_thread for all valgrind threads not known in gdb all_threads */
75 for (tid = 1; tid < VG_N_THREADS; tid++) {
77 #define LOCAL_THREAD_TRACE " ti* %p vgtid %u status %s as gdb ptid %s lwpid %d\n", \
78 ti, tid, VG_(name_of_ThreadStatus) (ts->status), \
79 image_ptid (ptid), ts->os_state.lwpid
81 if (VG_(is_valid_tid) (tid)) {
82 ts = VG_(get_ThreadState) (tid);
83 ptid = ts->os_state.lwpid;
84 ti = gdb_id_to_thread (ptid);
85 if (!ti) {
86 /* we do not report the threads which are not yet fully
87 initialized otherwise this creates duplicated threads
88 in gdb: once with pid xxx lwpid 0, then after that
89 with pid xxx lwpid yyy. */
90 if (ts->status != VgTs_Init) {
91 dlog(1, "adding_thread" LOCAL_THREAD_TRACE);
92 add_thread (ptid, ts, ptid);
94 } else {
95 dlog(2, "(known thread)" LOCAL_THREAD_TRACE);
98 #undef LOCAL_THREAD_TRACE
102 static
103 struct reg* build_shadow_arch (struct reg *reg_defs, int n) {
104 int i, r;
105 static const char *postfix[3] = { "", "s1", "s2" };
106 struct reg *new_regs = malloc(3 * n * sizeof(reg_defs[0]));
107 int reg_set_len = reg_defs[n-1].offset + reg_defs[n-1].size;
109 for (i = 0; i < 3; i++) {
110 for (r = 0; r < n; r++) {
111 char *regname = malloc(strlen(reg_defs[r].name)
112 + strlen (postfix[i]) + 1);
113 strcpy (regname, reg_defs[r].name);
114 strcat (regname, postfix[i]);
115 new_regs[i*n + r].name = regname;
116 new_regs[i*n + r].offset = i*reg_set_len + reg_defs[r].offset;
117 new_regs[i*n + r].size = reg_defs[r].size;
118 dlog(1,
119 "%-10s Nr %d offset(bit) %d offset(byte) %d size(bit) %d\n",
120 new_regs[i*n + r].name, i*n + r, new_regs[i*n + r].offset,
121 (new_regs[i*n + r].offset) / 8, new_regs[i*n + r].size);
125 return new_regs;
129 static CORE_ADDR stopped_data_address = 0;
130 void VG_(set_watchpoint_stop_address) (Addr addr)
132 stopped_data_address = addr;
135 int valgrind_stopped_by_watchpoint (void)
137 return stopped_data_address != 0;
140 CORE_ADDR valgrind_stopped_data_address (void)
142 return stopped_data_address;
145 /* pc at which we last stopped */
146 static CORE_ADDR stop_pc;
148 /* pc at which we resume.
149 If stop_pc != resume_pc, it means
150 gdb/gdbserver has changed the pc so as to have either
151 a "continue by jumping at that address"
152 or a "continue at that address to call some code from gdb".
154 static CORE_ADDR resume_pc;
156 static vki_siginfo_t vki_signal_to_report;
157 static vki_siginfo_t vki_signal_to_deliver;
159 void gdbserver_signal_encountered (const vki_siginfo_t *info)
161 vki_signal_to_report = *info;
162 vki_signal_to_deliver = *info;
165 void gdbserver_pending_signal_to_report (vki_siginfo_t *info)
167 *info = vki_signal_to_report;
170 Bool gdbserver_deliver_signal (vki_siginfo_t *info)
172 if (info->si_signo != vki_signal_to_deliver.si_signo)
173 dlog(1, "GDB changed signal info %d to_report %d to_deliver %d\n",
174 info->si_signo, vki_signal_to_report.si_signo,
175 vki_signal_to_deliver.si_signo);
176 *info = vki_signal_to_deliver;
177 return vki_signal_to_deliver.si_signo != 0;
180 static Bool before_syscall;
181 static Int sysno_to_report = -1;
182 void gdbserver_syscall_encountered (Bool before, Int sysno)
184 before_syscall = before;
185 sysno_to_report = sysno;
188 Int valgrind_stopped_by_syscall (void)
190 return sysno_to_report;
193 Bool valgrind_stopped_before_syscall(void)
195 vg_assert (sysno_to_report >= 0);
196 return before_syscall;
200 static unsigned char exit_status_to_report;
201 static int exit_code_to_report;
202 void gdbserver_process_exit_encountered (unsigned char status, Int code)
204 vg_assert (status == 'W' || status == 'X');
205 exit_status_to_report = status;
206 exit_code_to_report = code;
209 static
210 const HChar* sym (Addr addr)
212 // Tracing/debugging so cur_ep is reasonable.
213 const DiEpoch cur_ep = VG_(current_DiEpoch)();
215 return VG_(describe_IP) (cur_ep, addr, NULL);
218 ThreadId vgdb_interrupted_tid = 0;
220 /* 0 => not single stepping.
221 1 => single stepping asked by gdb
222 2 => single stepping asked by valgrind (watchpoint) */
223 static int stepping = 0;
225 Addr valgrind_get_ignore_break_once(void)
227 if (valgrind_single_stepping())
228 return resume_pc;
229 else
230 return 0;
233 void valgrind_set_single_stepping(Bool set)
235 if (set)
236 stepping = 2;
237 else
238 stepping = 0;
241 Bool valgrind_single_stepping(void)
243 if (stepping)
244 return True;
245 else
246 return False;
249 int valgrind_thread_alive (unsigned long tid)
251 struct thread_info *ti = gdb_id_to_thread(tid);
252 ThreadState *tst;
254 if (ti != NULL) {
255 tst = (ThreadState *) inferior_target_data (ti);
256 return tst->status != VgTs_Zombie;
258 else {
259 return 0;
263 void valgrind_resume (struct thread_resume *resume_info)
265 dlog(1,
266 "resume_info step %d sig %d stepping %d\n",
267 resume_info->step,
268 resume_info->sig,
269 stepping);
270 if (valgrind_stopped_by_watchpoint()) {
271 dlog(1, "clearing watchpoint stopped_data_address %p\n",
272 C2v(stopped_data_address));
273 VG_(set_watchpoint_stop_address) ((Addr) 0);
275 if (valgrind_stopped_by_syscall () >= 0) {
276 dlog(1, "clearing stopped by syscall %d\n",
277 valgrind_stopped_by_syscall ());
278 gdbserver_syscall_encountered (False, -1);
281 vki_signal_to_deliver.si_signo = resume_info->sig;
282 /* signal was reported to GDB, GDB told us to resume execution.
283 So, reset the signal to report to 0. */
284 VG_(memset) (&vki_signal_to_report, 0, sizeof(vki_signal_to_report));
286 stepping = resume_info->step;
287 resume_pc = (*the_low_target.get_pc) ();
288 if (resume_pc != stop_pc) {
289 dlog(1,
290 "stop_pc %p changed to be resume_pc %s\n",
291 C2v(stop_pc), sym(resume_pc));
295 unsigned char valgrind_wait (char *ourstatus)
297 int pid;
298 unsigned long wptid;
299 ThreadState *tst;
300 enum target_signal sig;
301 int code;
303 pid = VG_(getpid) ();
304 dlog(1, "enter valgrind_wait pid %d\n", pid);
306 valgrind_update_threads(pid);
308 /* First see if we are done with this process. */
309 if (exit_status_to_report != 0) {
310 *ourstatus = exit_status_to_report;
311 exit_status_to_report = 0;
313 if (*ourstatus == 'W') {
314 code = exit_code_to_report;
315 exit_code_to_report = 0;
316 dlog(1, "exit valgrind_wait status W exit code %d\n", code);
317 return code;
320 if (*ourstatus == 'X') {
321 sig = target_signal_from_host(exit_code_to_report);
322 exit_code_to_report = 0;
323 dlog(1, "exit valgrind_wait status X signal %u\n", sig);
324 return sig;
328 /* in valgrind, we consider that a wait always succeeds with STOPPED 'T'
329 and with a signal TRAP (i.e. a breakpoint), unless there is
330 a signal to report. */
331 *ourstatus = 'T';
332 if (vki_signal_to_report.si_signo == 0)
333 sig = TARGET_SIGNAL_TRAP;
334 else
335 sig = target_signal_from_host(vki_signal_to_report.si_signo);
337 if (vgdb_interrupted_tid != 0)
338 tst = VG_(get_ThreadState) (vgdb_interrupted_tid);
339 else
340 tst = VG_(get_ThreadState) (VG_(running_tid));
341 wptid = tst->os_state.lwpid;
342 /* we can only change the current_inferior when the wptid references
343 an existing thread. Otherwise, we are still in the init phase.
344 (hack similar to main thread hack in valgrind_update_threads) */
345 if (tst->os_state.lwpid)
346 current_inferior = gdb_id_to_thread (wptid);
347 stop_pc = (*the_low_target.get_pc) ();
349 dlog(1,
350 "exit valgrind_wait status T ptid %s stop_pc %s signal %u\n",
351 image_ptid (wptid), sym (stop_pc), sig);
352 return sig;
355 /* Fetch one register from valgrind VEX guest state. */
356 void valgrind_fetch_register (int regno, unsigned char *buf)
358 int size;
359 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
360 ThreadId tid = tst->tid;
362 if (regno < 0 || regno >= the_low_target.num_regs) {
363 dlog(0, "error fetch_register regno %d max %d\n",
364 regno, the_low_target.num_regs);
365 return;
367 size = register_size (regno);
368 if (size > 0) {
369 Bool mod;
370 VG_(memset) (buf, 0, size); // registers not fetched will be seen as 0.
371 (*the_low_target.transfer_register) (tid, regno, buf,
372 valgrind_to_gdbserver, size, &mod);
373 // Note: the *mod received from transfer_register is not interesting.
374 if (mod && VG_(debugLog_getLevel)() > 1) {
375 char bufimage [2*size + 1];
376 heximage (bufimage, (char*) buf, size);
377 dlog(3, "fetched register %d size %d name %s value %s tid %u status %s\n",
378 regno, size, the_low_target.reg_defs[regno].name, bufimage,
379 tid, VG_(name_of_ThreadStatus) (tst->status));
384 /* Store register REGNO value back into the inferior VEX state. */
385 void valgrind_store_register (int regno, const unsigned char *buf)
387 int size;
388 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
389 ThreadId tid = tst->tid;
391 if (regno < 0 || regno >= the_low_target.num_regs) {
392 dlog(0, "error store_register regno %d max %d\n",
393 regno, the_low_target.num_regs);
394 return;
397 size = register_size (regno);
398 if (size > 0) {
399 Bool mod;
400 Addr old_SP, new_SP;
402 if (regno == the_low_target.stack_pointer_regno) {
403 /* When the stack pointer register is changed such that
404 the stack is extended, we better inform the tool of the
405 stack increase. This is needed in particular to avoid
406 spurious Memcheck errors during Inferior calls. So, we
407 save in old_SP the SP before the change. A change of
408 stack pointer is also assumed to have initialised this
409 new stack space. For the typical example of an inferior
410 call, gdb writes arguments on the stack, and then
411 changes the stack pointer. As the stack increase tool
412 function might mark it as undefined, we have to call it
413 at the good moment. */
414 VG_(memset) ((void *) &old_SP, 0, size);
415 (*the_low_target.transfer_register) (tid, regno, (void *) &old_SP,
416 valgrind_to_gdbserver, size, &mod);
419 char buf_copy[size];
420 /* copy buf to buf_copy to avoid warnings passing a const to transfer_register.
421 This is ok as transfer_register called with gdbserver_to_valgrind will read from
422 buf and write to VEX state. */
423 VG_(memcpy) (buf_copy, buf, size);
425 (*the_low_target.transfer_register) (tid, regno, buf_copy,
426 gdbserver_to_valgrind, size, &mod);
427 if (mod && VG_(debugLog_getLevel)() > 1) {
428 char bufimage [2*size + 1];
429 heximage (bufimage, buf_copy, size);
430 dlog(2,
431 "stored register %d size %d name %s value %s "
432 "tid %u status %s\n",
433 regno, size, the_low_target.reg_defs[regno].name, bufimage,
434 tid, VG_(name_of_ThreadStatus) (tst->status));
436 if (regno == the_low_target.stack_pointer_regno) {
437 VG_(memcpy) (&new_SP, buf, size);
438 if (old_SP > new_SP) {
439 Word delta = (Word)new_SP - (Word)old_SP;
440 dlog(1,
441 " stack increase by stack pointer changed from %p to %p "
442 "delta %ld\n",
443 (void*) old_SP, (void *) new_SP,
444 delta);
445 VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, 0 );
446 VG_TRACK( new_mem_stack, new_SP, -delta );
447 VG_TRACK( post_mem_write, Vg_CoreClientReq, tid,
448 new_SP, -delta);
454 Bool hostvisibility = False;
456 int valgrind_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
458 const void *sourceaddr = C2v (memaddr);
459 dlog(3, "reading memory %p size %d\n", sourceaddr, len);
460 if (VG_(am_is_valid_for_client) ((Addr) sourceaddr,
461 len, VKI_PROT_READ)
462 || (hostvisibility
463 && VG_(am_is_valid_for_valgrind) ((Addr) sourceaddr,
464 len, VKI_PROT_READ))) {
465 VG_(memcpy) (myaddr, sourceaddr, len);
466 return 0;
467 } else {
468 dlog(1, "error reading memory %p size %d\n", sourceaddr, len);
469 return -1;
473 int valgrind_write_memory (CORE_ADDR memaddr,
474 const unsigned char *myaddr, int len)
476 Bool is_valid_client_memory;
477 void *targetaddr = C2v (memaddr);
478 dlog(3, "writing memory %p size %d\n", targetaddr, len);
479 is_valid_client_memory
480 = VG_(am_is_valid_for_client) ((Addr)targetaddr, len, VKI_PROT_WRITE);
481 if (is_valid_client_memory
482 || (hostvisibility
483 && VG_(am_is_valid_for_valgrind) ((Addr) targetaddr,
484 len, VKI_PROT_READ))) {
485 if (len > 0) {
486 VG_(memcpy) (targetaddr, myaddr, len);
487 if (is_valid_client_memory && VG_(tdict).track_post_mem_write) {
488 /* Inform the tool of the post memwrite. Note that we do the
489 minimum necessary to avoid complains from e.g.
490 memcheck. The idea is that the debugger is as least
491 intrusive as possible. So, we do not inform of the pre
492 mem write (and in any case, this would cause problems with
493 memcheck that does not like our CorePart in
494 pre_mem_write. */
495 ThreadState *tst =
496 (ThreadState *) inferior_target_data (current_inferior);
497 ThreadId tid = tst->tid;
498 VG_(tdict).track_post_mem_write( Vg_CoreClientReq, tid,
499 (Addr) targetaddr, len );
502 return 0;
503 } else {
504 dlog(1, "error writing memory %p size %d\n", targetaddr, len);
505 return -1;
509 /* insert or remove a breakpoint */
510 static
511 int valgrind_point (Bool insert, char type, CORE_ADDR addr, int len)
513 PointKind kind;
514 switch (type) {
515 case '0': /* implemented by inserting checks at each instruction in sb */
516 kind = software_breakpoint;
517 break;
518 case '1': /* hw breakpoint, same implementation as sw breakpoint */
519 kind = hardware_breakpoint;
520 break;
521 case '2':
522 kind = write_watchpoint;
523 break;
524 case '3':
525 kind = read_watchpoint;
526 break;
527 case '4':
528 kind = access_watchpoint;
529 break;
530 default:
531 vg_assert (0);
534 /* Attention: gdbserver convention differs: 0 means ok; 1 means not ok */
535 if (VG_(gdbserver_point) (kind, insert, addr, len))
536 return 0;
537 else
538 return 1; /* error or unsupported */
541 const char* valgrind_target_xml (Bool shadow_mode)
543 return (*the_low_target.target_xml) (shadow_mode);
546 int valgrind_insert_watchpoint (char type, CORE_ADDR addr, int len)
548 return valgrind_point (/* insert */ True, type, addr, len);
551 int valgrind_remove_watchpoint (char type, CORE_ADDR addr, int len)
553 return valgrind_point (/* insert*/ False, type, addr, len);
556 /* Returns the (platform specific) offset of lm_modid field in the link map
557 struct.
558 Stores the offset in *result and returns True if offset can be determined.
559 Returns False otherwise. *result is not to be used then. */
560 static Bool getplatformoffset (SizeT *result)
562 static Bool getplatformoffset_called = False;
564 static Bool lm_modid_offset_found = False;
565 static SizeT lm_modid_offset = 1u << 31; // Rubbish initial value.
566 // lm_modid_offset is a magic offset, retrieved using an external program.
568 if (!getplatformoffset_called) {
569 getplatformoffset_called = True;
570 const HChar *platform = VG_PLATFORM;
571 const HChar *cmdformat = "%s/%s-%s -o %s";
572 const HChar *getoff = "getoff";
573 HChar outfile[VG_(mkstemp_fullname_bufsz) (VG_(strlen)(getoff))];
574 Int fd = VG_(mkstemp) (getoff, outfile);
575 if (fd == -1)
576 return False;
577 HChar cmd[ VG_(strlen)(cmdformat)
578 + VG_(strlen)(VG_(libdir)) - 2
579 + VG_(strlen)(getoff) - 2
580 + VG_(strlen)(platform) - 2
581 + VG_(strlen)(outfile) - 2
582 + 1];
583 UInt cmdlen;
584 struct vg_stat stat_buf;
585 Int ret;
587 cmdlen = VG_(snprintf)(cmd, sizeof(cmd),
588 cmdformat,
589 VG_(libdir), getoff, platform, outfile);
590 vg_assert (cmdlen == sizeof(cmd) - 1);
591 ret = VG_(system) (cmd);
592 if (ret != 0 || VG_(debugLog_getLevel)() >= 1)
593 VG_(dmsg) ("command %s exit code %d\n", cmd, ret);
594 ret = VG_(fstat)( fd, &stat_buf );
595 if (ret != 0)
596 VG_(dmsg) ("error VG_(fstat) %d %s\n", fd, outfile);
597 else {
598 HChar *w;
599 HChar *ssaveptr;
600 HChar *os;
601 HChar *str;
602 HChar *endptr;
604 os = malloc (stat_buf.size+1);
605 vg_assert (os);
606 ret = VG_(read)(fd, os, stat_buf.size);
607 vg_assert(ret == stat_buf.size);
608 os[ret] = '\0';
609 str = os;
610 while ((w = VG_(strtok_r)(str, " \n", &ssaveptr)) != NULL) {
611 if (VG_(strcmp) (w, "lm_modid_offset") == 0) {
612 w = VG_(strtok_r)(NULL, " \n", &ssaveptr);
613 lm_modid_offset = (SizeT) VG_(strtoull16) ( w, &endptr );
614 if (endptr == w)
615 VG_(dmsg) ("%s lm_modid_offset unexpected hex value %s\n",
616 cmd, w);
617 else
618 lm_modid_offset_found = True;
619 } else {
620 VG_(dmsg) ("%s produced unexpected %s\n", cmd, w);
622 str = NULL; // ensure next VG_(strtok_r) continues the parsing.
624 VG_(free) (os);
627 VG_(close)(fd);
628 ret = VG_(unlink)( outfile );
629 if (ret != 0)
630 VG_(umsg) ("error: could not unlink %s\n", outfile);
633 *result = lm_modid_offset;
634 return lm_modid_offset_found;
637 Bool valgrind_get_tls_addr (ThreadState *tst,
638 CORE_ADDR offset,
639 CORE_ADDR lm,
640 CORE_ADDR *tls_addr)
642 CORE_ADDR **dtv_loc;
643 CORE_ADDR *dtv;
644 SizeT lm_modid_offset;
645 unsigned long int modid;
647 #define CHECK_DEREF(addr, len, name) \
648 if (!VG_(am_is_valid_for_client) ((Addr)(addr), (len), VKI_PROT_READ)) { \
649 dlog(0, "get_tls_addr: %s at %p len %lu not addressable\n", \
650 name, (void*)(addr), (unsigned long)(len)); \
651 return False; \
654 *tls_addr = 0;
656 if (the_low_target.target_get_dtv == NULL) {
657 dlog(1, "low level dtv support not available\n");
658 return False;
661 if (!getplatformoffset (&lm_modid_offset)) {
662 dlog(0, "link_map modid field offset not available\n");
663 return False;
665 dlog (2, "link_map modid offset %p\n", (void*)lm_modid_offset);
666 vg_assert (lm_modid_offset < 0x10000); // let's say
668 dtv_loc = (*the_low_target.target_get_dtv)(tst);
669 if (dtv_loc == NULL) {
670 dlog(0, "low level dtv support returned NULL\n");
671 return False;
674 CHECK_DEREF(dtv_loc, sizeof(CORE_ADDR), "dtv_loc");
675 dtv = *dtv_loc;
677 // Check we can read at least 2 address at the beginning of dtv.
678 CHECK_DEREF(dtv, 2*sizeof(CORE_ADDR), "dtv 2 first entries");
679 dlog (2, "tid %u dtv %p\n", tst->tid, (void*)dtv);
681 // Check we can read the modid
682 CHECK_DEREF(lm+lm_modid_offset, sizeof(unsigned long int), "link_map modid");
683 modid = *(unsigned long int *)(lm+lm_modid_offset);
684 dlog (2, "tid %u modid %lu\n", tst->tid, modid);
686 // Check we can access the dtv entry for modid
687 CHECK_DEREF(dtv + 2 * modid, sizeof(CORE_ADDR), "dtv[2*modid]");
689 // Compute the base address of the tls block.
690 *tls_addr = *(dtv + 2 * modid);
692 if (*tls_addr & 1) {
693 /* This means that computed address is not valid, most probably
694 because given module uses Static TLS.
695 However, the best we can is to try to compute address using
696 static TLS. This is what libthread_db does.
697 Ref. GLIBC/nptl_db/td_thr_tlsbase.c:td_thr_tlsbase().
700 CORE_ADDR tls_offset_addr;
701 PtrdiffT tls_offset;
703 dlog(2, "tls_addr (%p & 1) => computing tls_addr using static TLS\n",
704 (void*) *tls_addr);
706 /* Assumes that tls_offset is placed right before tls_modid.
707 To check the assumption, start a gdb on none/tests/tls and do:
708 p &((struct link_map*)0x0)->l_tls_modid
709 p &((struct link_map*)0x0)->l_tls_offset
710 Instead of assuming this, we could calculate this similarly to
711 lm_modid_offset, by extending getplatformoffset to support querying
712 more than one offset.
714 tls_offset_addr = lm + lm_modid_offset - sizeof(PtrdiffT);
716 // Check we can read the tls_offset.
717 CHECK_DEREF(tls_offset_addr, sizeof(PtrdiffT), "link_map tls_offset");
718 tls_offset = *(PtrdiffT *)(tls_offset_addr);
719 dlog(2, "tls_offset_addr %p tls_offset %ld\n",
720 (void*)tls_offset_addr, (long)tls_offset);
722 /* Following two values represent platform dependent constants
723 NO_TLS_OFFSET and FORCED_DYNAMIC_TLS_OFFSET, respectively. */
724 if ((tls_offset == -1) || (tls_offset == -2)) {
725 dlog(2, "link_map tls_offset is not valid for static TLS\n");
726 return False;
729 // This calculation is also platform dependent.
730 #if defined(VGA_mips32) || defined(VGA_mips64)
731 *tls_addr = ((CORE_ADDR)dtv_loc + 2 * sizeof(CORE_ADDR) + tls_offset);
732 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
733 *tls_addr = ((CORE_ADDR)dtv_loc + sizeof(CORE_ADDR) + tls_offset);
734 #elif defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x)
735 *tls_addr = (CORE_ADDR)dtv_loc - tls_offset - sizeof(CORE_ADDR);
736 #else
737 // ppc32, arm, arm64
738 dlog(0, "target.c is missing platform code for static TLS\n");
739 return False;
740 #endif
743 // Finally, add tls variable offset to tls block base address.
744 *tls_addr += offset;
746 return True;
748 #undef CHECK_DEREF
751 /* returns a pointer to the architecture state corresponding to
752 the provided register set: 0 => normal guest registers,
753 1 => shadow1
754 2 => shadow2
756 VexGuestArchState* get_arch (int set, ThreadState* tst)
758 switch (set) {
759 case 0: return &tst->arch.vex;
760 case 1: return &tst->arch.vex_shadow1;
761 case 2: return &tst->arch.vex_shadow2;
762 default: vg_assert(0);
766 static int non_shadow_num_regs = 0;
767 static struct reg *non_shadow_reg_defs = NULL;
768 void initialize_shadow_low(Bool shadow_mode)
770 if (non_shadow_reg_defs == NULL) {
771 non_shadow_reg_defs = the_low_target.reg_defs;
772 non_shadow_num_regs = the_low_target.num_regs;
775 if (the_low_target.reg_defs != non_shadow_reg_defs) {
776 free (the_low_target.reg_defs);
778 if (shadow_mode) {
779 the_low_target.num_regs = 3 * non_shadow_num_regs;
780 the_low_target.reg_defs = build_shadow_arch (non_shadow_reg_defs, non_shadow_num_regs);
781 } else {
782 the_low_target.num_regs = non_shadow_num_regs;
783 the_low_target.reg_defs = non_shadow_reg_defs;
785 set_register_cache (the_low_target.reg_defs, the_low_target.num_regs);
788 void set_desired_inferior (int use_general)
790 struct thread_info *found;
792 if (use_general == 1) {
793 found = (struct thread_info *) find_inferior_id (&all_threads,
794 general_thread);
795 } else {
796 found = NULL;
798 /* If we are continuing any (all) thread(s), use step_thread
799 to decide which thread to step and/or send the specified
800 signal to. */
801 if ((step_thread != 0 && step_thread != -1)
802 && (cont_thread == 0 || cont_thread == -1))
803 found = (struct thread_info *) find_inferior_id (&all_threads,
804 step_thread);
806 if (found == NULL)
807 found = (struct thread_info *) find_inferior_id (&all_threads,
808 cont_thread);
811 if (found == NULL)
812 current_inferior = (struct thread_info *) all_threads.head;
813 else
814 current_inferior = found;
816 ThreadState *tst = (ThreadState *) inferior_target_data (current_inferior);
817 ThreadId tid = tst->tid;
818 dlog(1, "set_desired_inferior use_general %d found %p tid %u lwpid %d\n",
819 use_general, found, tid, tst->os_state.lwpid);
823 void* VG_(dmemcpy) ( void *d, const void *s, SizeT sz, Bool *mod )
825 if (VG_(memcmp) (d, s, sz)) {
826 *mod = True;
827 return VG_(memcpy) (d, s, sz);
828 } else {
829 *mod = False;
830 return d;
834 void VG_(transfer) (void *valgrind,
835 void *gdbserver,
836 transfer_direction dir,
837 SizeT sz,
838 Bool *mod)
840 if (dir == valgrind_to_gdbserver)
841 VG_(dmemcpy) (gdbserver, valgrind, sz, mod);
842 else if (dir == gdbserver_to_valgrind)
843 VG_(dmemcpy) (valgrind, gdbserver, sz, mod);
844 else
845 vg_assert (0);
848 void valgrind_initialize_target(void)
850 #if defined(VGA_x86)
851 x86_init_architecture(&the_low_target);
852 #elif defined(VGA_amd64)
853 amd64_init_architecture(&the_low_target);
854 #elif defined(VGA_arm)
855 arm_init_architecture(&the_low_target);
856 #elif defined(VGA_arm64)
857 arm64_init_architecture(&the_low_target);
858 #elif defined(VGA_ppc32)
859 ppc32_init_architecture(&the_low_target);
860 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
861 ppc64_init_architecture(&the_low_target);
862 #elif defined(VGA_s390x)
863 s390x_init_architecture(&the_low_target);
864 #elif defined(VGA_mips32)
865 mips32_init_architecture(&the_low_target);
866 #elif defined(VGA_mips64)
867 mips64_init_architecture(&the_low_target);
868 #elif defined(VGA_nanomips)
869 nanomips_init_architecture(&the_low_target);
870 #else
871 #error "architecture missing in target.c valgrind_initialize_target"
872 #endif