Add support for the Linux io_uring system calls
[valgrind.git] / coregrind / m_execontext.c
blob227c3c3dc71292964527939478a53b740ae6f292
2 /*--------------------------------------------------------------------*/
3 /*--- Store and compare stack backtraces m_execontext.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2000-2017 Julian Seward
11 jseward@acm.org
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #include "pub_core_basics.h"
30 #include "pub_core_debuglog.h"
31 #include "pub_core_libcassert.h"
32 #include "pub_core_libcprint.h" // For VG_(message)()
33 #include "pub_core_mallocfree.h"
34 #include "pub_core_options.h"
35 #include "pub_core_stacktrace.h"
36 #include "pub_core_machine.h" // VG_(get_IP)
37 #include "pub_core_threadstate.h" // VG_(is_valid_tid)
38 #include "pub_core_execontext.h" // self
40 /*------------------------------------------------------------*/
41 /*--- Low-level ExeContext storage. ---*/
42 /*------------------------------------------------------------*/
44 /* Depending on VgRes, the first 2, 4 or all IP values are used in
45 comparisons to remove duplicate errors, and for comparing against
46 suppression specifications. If not used in comparison, the rest
47 are purely informational (but often important).
49 The contexts are stored in a traditional chained hash table, so as
50 to allow quick determination of whether a new context already
51 exists. The hash table starts small and expands dynamically, so as
52 to keep the load factor below 1.0.
54 The idea is only to ever store any one context once, so as to save
55 space and make exact comparisons faster. */
58 /* Primes for the hash table */
60 #define N_EC_PRIMES 18
62 static SizeT ec_primes[N_EC_PRIMES] = {
63 769UL, 1543UL, 3079UL, 6151UL,
64 12289UL, 24593UL, 49157UL, 98317UL,
65 196613UL, 393241UL, 786433UL, 1572869UL,
66 3145739UL, 6291469UL, 12582917UL, 25165843UL,
67 50331653UL, 100663319UL
71 /* Each element is present in a hash chain, and also contains a
72 variable length array of guest code addresses (the useful part). */
74 struct _ExeContext {
75 struct _ExeContext* chain;
76 /* A 32-bit unsigned integer that uniquely identifies this
77 ExeContext. Memcheck uses these for origin tracking. Values
78 must be nonzero (else Memcheck's origin tracking is hosed), must
79 be a multiple of four, and must be unique. Hence they start at
80 4. */
81 UInt ecu;
82 /* epoch in which the ExeContext can be symbolised. In other words, epoch
83 identifies the set of debug info to use to symbolise the Addr in ips
84 using e.g. VG_(get_filename), VG_(get_fnname), ...
85 Note 1: 2 ExeContexts are equal when their ips array are equal and
86 their epoch are equal.
87 Note 2: a freshly created ExeContext has a DiEpoch_INVALID epoch.
88 DiEpoch_INVALID is used as a special value to indicate that ExeContext
89 is valid in the current epoch. VG_(get_ExeContext_epoch) translates
90 this invalid value in the real current epoch.
91 When a debug info is archived, the set of ExeContext is scanned :
92 If an ExeContext with epoch == DiEpoch_INVALID has one or more
93 ips Addr corresponding to the just archived debug info, the ExeContext
94 epoch is changed to the last epoch identifying the set containing the
95 archived debug info. */
96 DiEpoch epoch;
97 /* Variable-length array. The size is 'n_ips'; at
98 least 1, at most VG_DEEPEST_BACKTRACE. [0] is the current IP,
99 [1] is its caller, [2] is the caller of [1], etc. */
100 UInt n_ips;
101 Addr ips[0];
105 /* This is the dynamically expanding hash table. */
106 static ExeContext** ec_htab; /* array [ec_htab_size] of ExeContext* */
107 static SizeT ec_htab_size; /* one of the values in ec_primes */
108 static SizeT ec_htab_size_idx; /* 0 .. N_EC_PRIMES-1 */
110 /* ECU serial number */
111 static UInt ec_next_ecu = 4; /* We must never issue zero */
113 static ExeContext* null_ExeContext;
115 /* Stats only: the number of times the system was searched to locate a
116 context. */
117 static ULong ec_searchreqs;
119 /* Stats only: the number of full context comparisons done. */
120 static ULong ec_searchcmps;
122 /* Stats only: total number of stored contexts. */
123 static ULong ec_totstored;
125 /* Number of 2, 4 and (fast) full cmps done. */
126 static ULong ec_cmp2s;
127 static ULong ec_cmp4s;
128 static ULong ec_cmpAlls;
131 /*------------------------------------------------------------*/
132 /*--- ExeContext functions. ---*/
133 /*------------------------------------------------------------*/
135 static ExeContext* record_ExeContext_wrk2 ( const Addr* ips, UInt n_ips );
137 /* Initialise this subsystem. */
138 static void init_ExeContext_storage ( void )
140 Int i;
141 static Bool init_done = False;
142 if (LIKELY(init_done))
143 return;
144 ec_searchreqs = 0;
145 ec_searchcmps = 0;
146 ec_totstored = 0;
147 ec_cmp2s = 0;
148 ec_cmp4s = 0;
149 ec_cmpAlls = 0;
151 ec_htab_size_idx = 0;
152 ec_htab_size = ec_primes[ec_htab_size_idx];
153 ec_htab = VG_(malloc)("execontext.iEs1",
154 sizeof(ExeContext*) * ec_htab_size);
155 for (i = 0; i < ec_htab_size; i++)
156 ec_htab[i] = NULL;
159 Addr ips[1];
160 ips[0] = 0;
161 null_ExeContext = record_ExeContext_wrk2(ips, 1);
162 // null execontext must be the first one created and get ecu 4.
163 vg_assert(null_ExeContext->ecu == 4);
166 init_done = True;
169 DiEpoch VG_(get_ExeContext_epoch)( const ExeContext* e )
171 if (is_DiEpoch_INVALID (e->epoch))
172 return VG_(current_DiEpoch)();
173 else
174 return e->epoch;
177 /* Print stats. */
178 void VG_(print_ExeContext_stats) ( Bool with_stacktraces )
180 Int i;
181 ULong total_n_ips;
182 ExeContext* ec;
184 init_ExeContext_storage();
186 if (with_stacktraces) {
187 VG_(message)(Vg_DebugMsg, " exectx: Printing contexts stacktraces\n");
188 for (i = 0; i < ec_htab_size; i++) {
189 for (ec = ec_htab[i]; ec; ec = ec->chain) {
190 VG_(message)(Vg_DebugMsg,
191 " exectx: stacktrace ecu %u epoch %u n_ips %u\n",
192 ec->ecu, ec->epoch.n, ec->n_ips);
193 VG_(pp_StackTrace)( VG_(get_ExeContext_epoch)(ec),
194 ec->ips, ec->n_ips );
197 VG_(message)(Vg_DebugMsg,
198 " exectx: Printed %'llu contexts stacktraces\n",
199 ec_totstored);
202 total_n_ips = 0;
203 for (i = 0; i < ec_htab_size; i++) {
204 for (ec = ec_htab[i]; ec; ec = ec->chain)
205 total_n_ips += ec->n_ips;
207 VG_(message)(Vg_DebugMsg,
208 " exectx: %'lu lists, %'llu contexts (avg %3.2f per list)"
209 " (avg %3.2f IP per context)\n",
210 ec_htab_size, ec_totstored, (Double)ec_totstored / (Double)ec_htab_size,
211 (Double)total_n_ips / (Double)ec_totstored
213 VG_(message)(Vg_DebugMsg,
214 " exectx: %'llu searches, %'llu full compares (%'llu per 1000)\n",
215 ec_searchreqs, ec_searchcmps,
216 ec_searchreqs == 0
217 ? 0ULL
218 : ( (ec_searchcmps * 1000ULL) / ec_searchreqs )
220 VG_(message)(Vg_DebugMsg,
221 " exectx: %'llu cmp2, %'llu cmp4, %'llu cmpAll\n",
222 ec_cmp2s, ec_cmp4s, ec_cmpAlls
226 /* Print an ExeContext. */
227 void VG_(pp_ExeContext) ( ExeContext* ec )
229 VG_(pp_StackTrace)( VG_(get_ExeContext_epoch)(ec), ec->ips, ec->n_ips );
232 void VG_(apply_ExeContext)(
233 void(*action)(UInt n, DiEpoch ep, Addr ip, void* opaque),
234 void* opaque, ExeContext* ec)
236 VG_(apply_StackTrace)(action, opaque, VG_(get_ExeContext_epoch)(ec),
237 ec->ips, ec->n_ips);
240 void VG_(archive_ExeContext_in_range) (DiEpoch last_epoch,
241 Addr text_avma, SizeT length )
243 Int i;
244 ExeContext* ec;
245 ULong n_archived = 0;
246 const Addr text_avma_end = text_avma + length - 1;
248 if (VG_(clo_verbosity) > 1)
249 VG_(message)(Vg_DebugMsg, "Scanning and archiving ExeContexts ...\n");
250 for (i = 0; i < ec_htab_size; i++) {
251 for (ec = ec_htab[i]; ec; ec = ec->chain) {
252 if (is_DiEpoch_INVALID (ec->epoch))
253 for (UInt j = 0; j < ec->n_ips; j++) {
254 if (UNLIKELY(ec->ips[j] >= text_avma
255 && ec->ips[j] <= text_avma_end)) {
256 ec->epoch = last_epoch;
257 n_archived++;
258 break;
263 if (VG_(clo_verbosity) > 1)
264 VG_(message)(Vg_DebugMsg,
265 "Scanned %'llu ExeContexts, archived %'llu ExeContexts\n",
266 ec_totstored, n_archived);
269 /* Compare two ExeContexts. Number of callers considered depends on res. */
270 Bool VG_(eq_ExeContext) ( VgRes res, const ExeContext* e1,
271 const ExeContext* e2 )
273 Int i;
275 if (e1 == NULL || e2 == NULL)
276 return False;
278 // Must be at least one address in each trace.
279 vg_assert(e1->n_ips >= 1 && e2->n_ips >= 1);
281 // Note: we compare the epoch in the case below, and not here
282 // to have the ec_cmp* stats correct.
284 switch (res) {
285 case Vg_LowRes:
286 /* Just compare the top two callers. */
287 ec_cmp2s++;
288 for (i = 0; i < 2; i++) {
289 if ( (e1->n_ips <= i) && (e2->n_ips <= i)) return True;
290 if ( (e1->n_ips <= i) && !(e2->n_ips <= i)) return False;
291 if (!(e1->n_ips <= i) && (e2->n_ips <= i)) return False;
292 if (e1->ips[i] != e2->ips[i]) return False;
294 return e1->epoch.n == e2->epoch.n;
296 case Vg_MedRes:
297 /* Just compare the top four callers. */
298 ec_cmp4s++;
299 for (i = 0; i < 4; i++) {
300 if ( (e1->n_ips <= i) && (e2->n_ips <= i)) return True;
301 if ( (e1->n_ips <= i) && !(e2->n_ips <= i)) return False;
302 if (!(e1->n_ips <= i) && (e2->n_ips <= i)) return False;
303 if (e1->ips[i] != e2->ips[i]) return False;
305 return e1->epoch.n == e2->epoch.n;
307 case Vg_HighRes:
308 ec_cmpAlls++;
309 /* Compare them all -- just do pointer comparison. */
310 if (e1 != e2) return False;
311 return True;
313 default:
314 VG_(core_panic)("VG_(eq_ExeContext): unrecognised VgRes");
318 /* VG_(record_ExeContext) is the head honcho here. Take a snapshot of
319 the client's stack. Search our collection of ExeContexts to see if
320 we already have it, and if not, allocate a new one. Either way,
321 return a pointer to the context. If there is a matching context we
322 guarantee to not allocate a new one. Thus we never store
323 duplicates, and so exact equality can be quickly done as equality
324 on the returned ExeContext* values themselves. Inspired by Hugs's
325 Text type.
327 Also checks whether the hash table needs expanding, and expands it
328 if so. */
330 static inline UWord ROLW ( UWord w, Int n )
332 Int bpw = 8 * sizeof(UWord);
333 w = (w << n) | (w >> (bpw-n));
334 return w;
337 static UWord calc_hash ( const Addr* ips, UInt n_ips, UWord htab_sz )
339 UInt i;
340 UWord hash = 0;
341 vg_assert(htab_sz > 0);
342 for (i = 0; i < n_ips; i++) {
343 hash ^= ips[i];
344 hash = ROLW(hash, 19);
346 return hash % htab_sz;
349 static void resize_ec_htab ( void )
351 SizeT i;
352 SizeT new_size;
353 ExeContext** new_ec_htab;
355 vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES);
356 if (ec_htab_size_idx == N_EC_PRIMES-1)
357 return; /* out of primes - can't resize further */
359 new_size = ec_primes[ec_htab_size_idx + 1];
360 new_ec_htab = VG_(malloc)("execontext.reh1",
361 sizeof(ExeContext*) * new_size);
363 VG_(debugLog)(
364 1, "execontext",
365 "resizing htab from size %lu to %lu (idx %lu) Total#ECs=%llu\n",
366 ec_htab_size, new_size, ec_htab_size_idx + 1, ec_totstored);
368 for (i = 0; i < new_size; i++)
369 new_ec_htab[i] = NULL;
371 for (i = 0; i < ec_htab_size; i++) {
372 ExeContext* cur = ec_htab[i];
373 while (cur) {
374 ExeContext* next = cur->chain;
375 UWord hash = calc_hash(cur->ips, cur->n_ips, new_size);
376 vg_assert(hash < new_size);
377 cur->chain = new_ec_htab[hash];
378 new_ec_htab[hash] = cur;
379 cur = next;
383 VG_(free)(ec_htab);
384 ec_htab = new_ec_htab;
385 ec_htab_size = new_size;
386 ec_htab_size_idx++;
389 /* Used by the outer as a marker to separate the frames of the inner valgrind
390 from the frames of the inner guest frames. */
391 static void _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______ (void)
395 /* Do the first part of getting a stack trace: actually unwind the
396 stack, and hand the results off to the duplicate-trace-finder
397 (_wrk2). */
398 static ExeContext* record_ExeContext_wrk ( ThreadId tid, Word first_ip_delta,
399 Bool first_ip_only )
401 Addr ips[VG_(clo_backtrace_size)];
402 UInt n_ips;
404 init_ExeContext_storage();
406 vg_assert(sizeof(void*) == sizeof(UWord));
407 vg_assert(sizeof(void*) == sizeof(Addr));
409 vg_assert(VG_(is_valid_tid)(tid));
411 if (first_ip_only) {
412 n_ips = 1;
413 ips[0] = VG_(get_IP)(tid) + first_ip_delta;
414 } else {
415 n_ips = VG_(get_StackTrace)( tid, ips, VG_(clo_backtrace_size),
416 NULL/*array to dump SP values in*/,
417 NULL/*array to dump FP values in*/,
418 first_ip_delta );
419 if (VG_(inner_threads) != NULL
420 && n_ips + 1 < VG_(clo_backtrace_size)) {
421 /* An inner V has informed us (the outer) of its thread array.
422 Append the inner guest stack trace, if we still have some
423 room in the ips array for the separator and (some) inner
424 guest IPs. */
425 UInt inner_tid;
427 for (inner_tid = 1; inner_tid < VG_N_THREADS; inner_tid++) {
428 if (VG_(threads)[tid].os_state.lwpid
429 == VG_(inner_threads)[inner_tid].os_state.lwpid) {
430 ThreadState* save_outer_vg_threads = VG_(threads);
431 UInt n_ips_inner_guest;
433 /* Append the separator + the inner guest stack trace. */
434 ips[n_ips] = (Addr)
435 _______VVVVVVVV_appended_inner_guest_stack_VVVVVVVV_______;
436 n_ips++;
437 VG_(threads) = VG_(inner_threads);
438 n_ips_inner_guest
439 = VG_(get_StackTrace)( inner_tid,
440 ips + n_ips,
441 VG_(clo_backtrace_size) - n_ips,
442 NULL/*array to dump SP values in*/,
443 NULL/*array to dump FP values in*/,
444 first_ip_delta );
445 n_ips += n_ips_inner_guest;
446 VG_(threads) = save_outer_vg_threads;
447 break;
453 return record_ExeContext_wrk2 ( ips, n_ips );
456 /* Do the second part of getting a stack trace: ips[0 .. n_ips-1]
457 holds a proposed trace. Find or allocate a suitable ExeContext.
458 Note that callers must have done init_ExeContext_storage() before
459 getting to this point. */
460 static ExeContext* record_ExeContext_wrk2 ( const Addr* ips, UInt n_ips )
462 Int i;
463 Bool same;
464 UWord hash;
465 ExeContext* new_ec;
466 ExeContext* list;
467 ExeContext *prev2, *prev;
469 static UInt ctr = 0;
471 vg_assert(n_ips >= 1 && n_ips <= VG_(clo_backtrace_size));
473 /* Now figure out if we've seen this one before. First hash it so
474 as to determine the list number. */
475 hash = calc_hash( ips, n_ips, ec_htab_size );
477 /* And (the expensive bit) look a for matching entry in the list. */
479 ec_searchreqs++;
481 prev2 = NULL;
482 prev = NULL;
483 list = ec_htab[hash];
485 while (True) {
486 if (list == NULL) break;
487 ec_searchcmps++;
488 same = list->n_ips == n_ips && is_DiEpoch_INVALID (list->epoch);
489 for (i = 0; i < n_ips && same ; i++) {
490 same = list->ips[i] == ips[i];
492 if (same) break;
493 prev2 = prev;
494 prev = list;
495 list = list->chain;
498 if (list != NULL) {
499 /* Yay! We found it. Once every 8 searches, move it one step
500 closer to the start of the list to make future searches
501 cheaper. */
502 if (0 == ((ctr++) & 7)) {
503 if (prev2 != NULL && prev != NULL) {
504 /* Found at 3rd or later position in the chain. */
505 vg_assert(prev2->chain == prev);
506 vg_assert(prev->chain == list);
507 prev2->chain = list;
508 prev->chain = list->chain;
509 list->chain = prev;
511 else if (prev2 == NULL && prev != NULL) {
512 /* Found at 2nd position in the chain. */
513 vg_assert(ec_htab[hash] == prev);
514 vg_assert(prev->chain == list);
515 prev->chain = list->chain;
516 list->chain = prev;
517 ec_htab[hash] = list;
520 return list;
523 /* Bummer. We have to allocate a new context record. */
524 ec_totstored++;
526 new_ec = VG_(perm_malloc)( sizeof(struct _ExeContext)
527 + n_ips * sizeof(Addr),
528 vg_alignof(struct _ExeContext));
530 for (i = 0; i < n_ips; i++)
531 new_ec->ips[i] = ips[i];
533 vg_assert(VG_(is_plausible_ECU)(ec_next_ecu));
534 new_ec->ecu = ec_next_ecu;
535 ec_next_ecu += 4;
536 if (ec_next_ecu == 0) {
537 /* Urr. Now we're hosed; we emitted 2^30 ExeContexts already
538 and have run out of numbers. Not sure what to do. */
539 VG_(core_panic)("m_execontext: more than 2^30 ExeContexts created");
542 new_ec->n_ips = n_ips;
543 new_ec->chain = ec_htab[hash];
544 new_ec->epoch = DiEpoch_INVALID();
545 ec_htab[hash] = new_ec;
547 /* Resize the hash table, maybe? */
548 if ( ((ULong)ec_totstored) > ((ULong)ec_htab_size) ) {
549 vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES);
550 if (ec_htab_size_idx < N_EC_PRIMES-1)
551 resize_ec_htab();
554 return new_ec;
557 ExeContext* VG_(record_ExeContext)( ThreadId tid, Word first_ip_delta ) {
558 return record_ExeContext_wrk( tid, first_ip_delta,
559 False/*!first_ip_only*/ );
562 ExeContext* VG_(record_depth_1_ExeContext)( ThreadId tid, Word first_ip_delta )
564 return record_ExeContext_wrk( tid, first_ip_delta,
565 True/*first_ip_only*/ );
568 ExeContext* VG_(make_depth_1_ExeContext_from_Addr)( Addr a ) {
569 init_ExeContext_storage();
570 return record_ExeContext_wrk2( &a, 1 );
573 StackTrace VG_(get_ExeContext_StackTrace) ( ExeContext* e ) {
574 return e->ips;
577 UInt VG_(get_ECU_from_ExeContext)( const ExeContext* e ) {
578 vg_assert(VG_(is_plausible_ECU)(e->ecu));
579 return e->ecu;
582 Int VG_(get_ExeContext_n_ips)( const ExeContext* e ) {
583 vg_assert(e->n_ips >= 1);
584 return e->n_ips;
587 ExeContext* VG_(get_ExeContext_from_ECU)( UInt ecu )
589 UWord i;
590 ExeContext* ec;
591 vg_assert(VG_(is_plausible_ECU)(ecu));
592 vg_assert(ec_htab_size > 0);
593 for (i = 0; i < ec_htab_size; i++) {
594 for (ec = ec_htab[i]; ec; ec = ec->chain) {
595 if (ec->ecu == ecu)
596 return ec;
599 return NULL;
602 ExeContext* VG_(make_ExeContext_from_StackTrace)( const Addr* ips, UInt n_ips )
604 init_ExeContext_storage();
605 return record_ExeContext_wrk2(ips, n_ips);
608 ExeContext* VG_(null_ExeContext) (void)
610 init_ExeContext_storage();
611 return null_ExeContext;
614 /*--------------------------------------------------------------------*/
615 /*--- end m_execontext.c ---*/
616 /*--------------------------------------------------------------------*/