Update Linux x86 system call number definitions
[valgrind.git] / memcheck / mc_errors.c
blobe067686a402f7e69b2da16b2062fecfa704cceac
2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_gdbserver.h"
32 #include "pub_tool_poolalloc.h" // For mc_include.h
33 #include "pub_tool_hashtable.h" // For mc_include.h
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_machine.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_tooliface.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_aspacemgr.h"
46 #include "pub_tool_addrinfo.h"
48 #include "mc_include.h"
51 /*------------------------------------------------------------*/
52 /*--- Error types ---*/
53 /*------------------------------------------------------------*/
55 /* See comment in mc_include.h */
56 Bool MC_(any_value_errors) = False;
59 /* ------------------ Errors ----------------------- */
61 /* What kind of error it is. */
62 typedef
63 enum {
64 Err_Value,
65 Err_Cond,
66 Err_CoreMem,
67 Err_Addr,
68 Err_Jump,
69 Err_RegParam,
70 Err_MemParam,
71 Err_User,
72 Err_Free,
73 Err_FreeMismatch,
74 Err_Overlap,
75 Err_Leak,
76 Err_IllegalMempool,
77 Err_FishyValue,
79 MC_ErrorTag;
82 typedef struct _MC_Error MC_Error;
84 struct _MC_Error {
85 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
86 //MC_ErrorTag tag;
88 union {
89 // Use of an undefined value:
90 // - as a pointer in a load or store
91 // - as a jump target
92 struct {
93 SizeT szB; // size of value in bytes
94 // Origin info
95 UInt otag; // origin tag
96 ExeContext* origin_ec; // filled in later
97 } Value;
99 // Use of an undefined value in a conditional branch or move.
100 struct {
101 // Origin info
102 UInt otag; // origin tag
103 ExeContext* origin_ec; // filled in later
104 } Cond;
106 // Addressability error in core (signal-handling) operation.
107 // It would be good to get rid of this error kind, merge it with
108 // another one somehow.
109 struct {
110 } CoreMem;
112 // Use of an unaddressable memory location in a load or store.
113 struct {
114 Bool isWrite; // read or write?
115 SizeT szB; // not used for exec (jump) errors
116 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
117 AddrInfo ai;
118 } Addr;
120 // Jump to an unaddressable memory location.
121 struct {
122 AddrInfo ai;
123 } Jump;
125 // System call register input contains undefined bytes.
126 struct {
127 // Origin info
128 UInt otag; // origin tag
129 ExeContext* origin_ec; // filled in later
130 } RegParam;
132 // System call memory input contains undefined/unaddressable bytes
133 struct {
134 Bool isAddrErr; // Addressability or definedness error?
135 AddrInfo ai;
136 // Origin info
137 UInt otag; // origin tag
138 ExeContext* origin_ec; // filled in later
139 } MemParam;
141 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
142 struct {
143 Bool isAddrErr; // Addressability or definedness error?
144 AddrInfo ai;
145 // Origin info
146 UInt otag; // origin tag
147 ExeContext* origin_ec; // filled in later
148 } User;
150 // Program tried to free() something that's not a heap block (this
151 // covers double-frees). */
152 struct {
153 AddrInfo ai;
154 } Free;
156 // Program allocates heap block with one function
157 // (malloc/new/new[]/custom) and deallocates with not the matching one.
158 struct {
159 AddrInfo ai;
160 } FreeMismatch;
162 // Call to strcpy, memcpy, etc, with overlapping blocks.
163 struct {
164 Addr src; // Source block
165 Addr dst; // Destination block
166 SizeT szB; // Size in bytes; 0 if unused.
167 } Overlap;
169 // A memory leak.
170 struct {
171 UInt n_this_record;
172 UInt n_total_records;
173 LossRecord* lr;
174 } Leak;
176 // A memory pool error.
177 struct {
178 AddrInfo ai;
179 } IllegalMempool;
181 // A fishy function argument value
182 // An argument value is considered fishy if the corresponding
183 // parameter has SizeT type and the value when interpreted as a
184 // signed number is negative.
185 struct {
186 const HChar *function_name;
187 const HChar *argument_name;
188 SizeT value;
189 } FishyValue;
190 } Err;
194 /*------------------------------------------------------------*/
195 /*--- Printing errors ---*/
196 /*------------------------------------------------------------*/
198 /* This is the "this error is due to be printed shortly; so have a
199 look at it any print any preamble you want" function. Which, in
200 Memcheck, we don't use. Hence a no-op.
202 void MC_(before_pp_Error) ( const Error* err ) {
205 /* Do a printf-style operation on either the XML or normal output
206 channel, depending on the setting of VG_(clo_xml).
208 static void emit_WRK ( const HChar* format, va_list vargs )
210 if (VG_(clo_xml)) {
211 VG_(vprintf_xml)(format, vargs);
212 } else {
213 VG_(vmessage)(Vg_UserMsg, format, vargs);
216 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
217 static void emit ( const HChar* format, ... )
219 va_list vargs;
220 va_start(vargs, format);
221 emit_WRK(format, vargs);
222 va_end(vargs);
226 static const HChar* str_leak_lossmode ( Reachedness lossmode )
228 const HChar *loss = "?";
229 switch (lossmode) {
230 case Unreached: loss = "definitely lost"; break;
231 case IndirectLeak: loss = "indirectly lost"; break;
232 case Possible: loss = "possibly lost"; break;
233 case Reachable: loss = "still reachable"; break;
235 return loss;
238 static const HChar* xml_leak_kind ( Reachedness lossmode )
240 const HChar *loss = "?";
241 switch (lossmode) {
242 case Unreached: loss = "Leak_DefinitelyLost"; break;
243 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
244 case Possible: loss = "Leak_PossiblyLost"; break;
245 case Reachable: loss = "Leak_StillReachable"; break;
247 return loss;
250 const HChar* MC_(parse_leak_kinds_tokens) =
251 "reachable,possible,indirect,definite";
253 UInt MC_(all_Reachedness)(void)
255 static UInt all;
257 if (all == 0) {
258 // Compute a set with all values by doing a parsing of the "all" keyword.
259 Bool parseok = VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
260 True,/*allow_all*/
261 "all",
262 &all);
263 tl_assert (parseok && all);
266 return all;
269 static const HChar* pp_Reachedness_for_leak_kinds(Reachedness r)
271 switch(r) {
272 case Reachable: return "reachable";
273 case Possible: return "possible";
274 case IndirectLeak: return "indirect";
275 case Unreached: return "definite";
276 default: tl_assert(0);
280 static void mc_pp_origin ( ExeContext* ec, UInt okind )
282 const HChar* src = NULL;
283 tl_assert(ec);
285 switch (okind) {
286 case MC_OKIND_STACK: src = " by a stack allocation"; break;
287 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
288 case MC_OKIND_USER: src = " by a client request"; break;
289 case MC_OKIND_UNKNOWN: src = ""; break;
291 tl_assert(src); /* guards against invalid 'okind' */
293 if (VG_(clo_xml)) {
294 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
295 src);
296 VG_(pp_ExeContext)( ec );
297 } else {
298 emit( " Uninitialised value was created%s\n", src);
299 VG_(pp_ExeContext)( ec );
303 HChar * MC_(snprintf_delta) (HChar * buf, Int size,
304 SizeT current_val, SizeT old_val,
305 LeakCheckDeltaMode delta_mode)
307 // Make sure the buffer size is large enough. With old_val == 0 and
308 // current_val == ULLONG_MAX the delta including inserted commas is:
309 // 18,446,744,073,709,551,615
310 // whose length is 26. Therefore:
311 tl_assert(size >= 26 + 4 + 1);
313 if (delta_mode == LCD_Any)
314 buf[0] = '\0';
315 else if (current_val >= old_val)
316 VG_(snprintf) (buf, size, " (+%'lu)", current_val - old_val);
317 else
318 VG_(snprintf) (buf, size, " (-%'lu)", old_val - current_val);
320 return buf;
323 static void pp_LossRecord(UInt n_this_record, UInt n_total_records,
324 LossRecord* lr, Bool xml)
326 // char arrays to produce the indication of increase/decrease in case
327 // of delta_mode != LCD_Any
328 HChar d_bytes[31];
329 HChar d_direct_bytes[31];
330 HChar d_indirect_bytes[31];
331 HChar d_num_blocks[31];
333 MC_(snprintf_delta) (d_bytes, sizeof(d_bytes),
334 lr->szB + lr->indirect_szB,
335 lr->old_szB + lr->old_indirect_szB,
336 MC_(detect_memory_leaks_last_delta_mode));
337 MC_(snprintf_delta) (d_direct_bytes, sizeof(d_direct_bytes),
338 lr->szB,
339 lr->old_szB,
340 MC_(detect_memory_leaks_last_delta_mode));
341 MC_(snprintf_delta) (d_indirect_bytes, sizeof(d_indirect_bytes),
342 lr->indirect_szB,
343 lr->old_indirect_szB,
344 MC_(detect_memory_leaks_last_delta_mode));
345 MC_(snprintf_delta) (d_num_blocks, sizeof(d_num_blocks),
346 (SizeT) lr->num_blocks,
347 (SizeT) lr->old_num_blocks,
348 MC_(detect_memory_leaks_last_delta_mode));
350 if (xml) {
351 emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
352 if (lr->indirect_szB > 0) {
353 emit( " <xwhat>\n" );
354 emit( " <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
355 "in %'u%s blocks"
356 " are %s in loss record %'u of %'u</text>\n",
357 lr->szB + lr->indirect_szB, d_bytes,
358 lr->szB, d_direct_bytes,
359 lr->indirect_szB, d_indirect_bytes,
360 lr->num_blocks, d_num_blocks,
361 str_leak_lossmode(lr->key.state),
362 n_this_record, n_total_records );
363 // Nb: don't put commas in these XML numbers
364 emit( " <leakedbytes>%lu</leakedbytes>\n",
365 lr->szB + lr->indirect_szB );
366 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
367 emit( " </xwhat>\n" );
368 } else {
369 emit( " <xwhat>\n" );
370 emit( " <text>%'lu%s bytes in %'u%s blocks"
371 " are %s in loss record %'u of %'u</text>\n",
372 lr->szB, d_direct_bytes,
373 lr->num_blocks, d_num_blocks,
374 str_leak_lossmode(lr->key.state),
375 n_this_record, n_total_records );
376 emit( " <leakedbytes>%lu</leakedbytes>\n", lr->szB);
377 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
378 emit( " </xwhat>\n" );
380 VG_(pp_ExeContext)(lr->key.allocated_at);
381 } else { /* ! if (xml) */
382 if (lr->indirect_szB > 0) {
383 emit(
384 "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
385 " are %s in loss record %'u of %'u\n",
386 lr->szB + lr->indirect_szB, d_bytes,
387 lr->szB, d_direct_bytes,
388 lr->indirect_szB, d_indirect_bytes,
389 lr->num_blocks, d_num_blocks,
390 str_leak_lossmode(lr->key.state),
391 n_this_record, n_total_records
393 } else {
394 emit(
395 "%'lu%s bytes in %'u%s blocks are %s in loss record %'u of %'u\n",
396 lr->szB, d_direct_bytes,
397 lr->num_blocks, d_num_blocks,
398 str_leak_lossmode(lr->key.state),
399 n_this_record, n_total_records
402 VG_(pp_ExeContext)(lr->key.allocated_at);
403 } /* if (xml) */
406 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
407 LossRecord* l)
409 pp_LossRecord (n_this_record, n_total_records, l, /* xml */ False);
412 void MC_(pp_Error) ( const Error* err )
414 const Bool xml = VG_(clo_xml); /* a shorthand */
415 MC_Error* extra = VG_(get_error_extra)(err);
417 switch (VG_(get_error_kind)(err)) {
418 case Err_CoreMem:
419 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
420 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
421 signal handler frame. --njn */
422 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
423 // the following code is untested. Bad.
424 if (xml) {
425 emit( " <kind>CoreMemError</kind>\n" );
426 emit( " <what>%pS contains unaddressable byte(s)</what>\n",
427 VG_(get_error_string)(err));
428 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
429 } else {
430 emit( "%s contains unaddressable byte(s)\n",
431 VG_(get_error_string)(err));
432 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
434 break;
436 case Err_Value:
437 MC_(any_value_errors) = True;
438 if (xml) {
439 emit( " <kind>UninitValue</kind>\n" );
440 emit( " <what>Use of uninitialised value of size %lu</what>\n",
441 extra->Err.Value.szB );
442 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
443 if (extra->Err.Value.origin_ec)
444 mc_pp_origin( extra->Err.Value.origin_ec,
445 extra->Err.Value.otag & 3 );
446 } else {
447 /* Could also show extra->Err.Cond.otag if debugging origin
448 tracking */
449 emit( "Use of uninitialised value of size %lu\n",
450 extra->Err.Value.szB );
451 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
452 if (extra->Err.Value.origin_ec)
453 mc_pp_origin( extra->Err.Value.origin_ec,
454 extra->Err.Value.otag & 3 );
456 break;
458 case Err_Cond:
459 MC_(any_value_errors) = True;
460 if (xml) {
461 emit( " <kind>UninitCondition</kind>\n" );
462 emit( " <what>Conditional jump or move depends"
463 " on uninitialised value(s)</what>\n" );
464 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
465 if (extra->Err.Cond.origin_ec)
466 mc_pp_origin( extra->Err.Cond.origin_ec,
467 extra->Err.Cond.otag & 3 );
468 } else {
469 /* Could also show extra->Err.Cond.otag if debugging origin
470 tracking */
471 emit( "Conditional jump or move depends"
472 " on uninitialised value(s)\n" );
473 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
474 if (extra->Err.Cond.origin_ec)
475 mc_pp_origin( extra->Err.Cond.origin_ec,
476 extra->Err.Cond.otag & 3 );
478 break;
480 case Err_RegParam:
481 MC_(any_value_errors) = True;
482 if (xml) {
483 emit( " <kind>SyscallParam</kind>\n" );
484 emit( " <what>Syscall param %pS contains "
485 "uninitialised byte(s)</what>\n",
486 VG_(get_error_string)(err) );
487 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
488 if (extra->Err.RegParam.origin_ec)
489 mc_pp_origin( extra->Err.RegParam.origin_ec,
490 extra->Err.RegParam.otag & 3 );
491 } else {
492 emit( "Syscall param %s contains uninitialised byte(s)\n",
493 VG_(get_error_string)(err) );
494 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
495 if (extra->Err.RegParam.origin_ec)
496 mc_pp_origin( extra->Err.RegParam.origin_ec,
497 extra->Err.RegParam.otag & 3 );
499 break;
501 case Err_MemParam:
502 if (!extra->Err.MemParam.isAddrErr)
503 MC_(any_value_errors) = True;
504 if (xml) {
505 emit( " <kind>SyscallParam</kind>\n" );
506 emit( " <what>Syscall param %pS points to %s byte(s)</what>\n",
507 VG_(get_error_string)(err),
508 extra->Err.MemParam.isAddrErr
509 ? "unaddressable" : "uninitialised" );
510 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
511 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
512 &extra->Err.MemParam.ai, False);
513 if (extra->Err.MemParam.origin_ec
514 && !extra->Err.MemParam.isAddrErr)
515 mc_pp_origin( extra->Err.MemParam.origin_ec,
516 extra->Err.MemParam.otag & 3 );
517 } else {
518 emit( "Syscall param %s points to %s byte(s)\n",
519 VG_(get_error_string)(err),
520 extra->Err.MemParam.isAddrErr
521 ? "unaddressable" : "uninitialised" );
522 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
523 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
524 &extra->Err.MemParam.ai, False);
525 if (extra->Err.MemParam.origin_ec
526 && !extra->Err.MemParam.isAddrErr)
527 mc_pp_origin( extra->Err.MemParam.origin_ec,
528 extra->Err.MemParam.otag & 3 );
530 break;
532 case Err_User:
533 if (!extra->Err.User.isAddrErr)
534 MC_(any_value_errors) = True;
535 if (xml) {
536 emit( " <kind>ClientCheck</kind>\n" );
537 emit( " <what>%s byte(s) found "
538 "during client check request</what>\n",
539 extra->Err.User.isAddrErr
540 ? "Unaddressable" : "Uninitialised" );
541 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
542 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
543 False);
544 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
545 mc_pp_origin( extra->Err.User.origin_ec,
546 extra->Err.User.otag & 3 );
547 } else {
548 emit( "%s byte(s) found during client check request\n",
549 extra->Err.User.isAddrErr
550 ? "Unaddressable" : "Uninitialised" );
551 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
552 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
553 False);
554 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
555 mc_pp_origin( extra->Err.User.origin_ec,
556 extra->Err.User.otag & 3 );
558 break;
560 case Err_Free:
561 if (xml) {
562 emit( " <kind>InvalidFree</kind>\n" );
563 emit( " <what>Invalid free() / delete / delete[]"
564 " / realloc()</what>\n" );
565 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
566 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
567 &extra->Err.Free.ai, False );
568 } else {
569 emit( "Invalid free() / delete / delete[] / realloc()\n" );
570 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
571 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
572 &extra->Err.Free.ai, False );
574 break;
576 case Err_FreeMismatch:
577 if (xml) {
578 emit( " <kind>MismatchedFree</kind>\n" );
579 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
580 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
581 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
582 &extra->Err.FreeMismatch.ai, False);
583 } else {
584 emit( "Mismatched free() / delete / delete []\n" );
585 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
586 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
587 &extra->Err.FreeMismatch.ai, False);
589 break;
591 case Err_Addr:
592 if (xml) {
593 emit( " <kind>Invalid%s</kind>\n",
594 extra->Err.Addr.isWrite ? "Write" : "Read" );
595 emit( " <what>Invalid %s of size %lu</what>\n",
596 extra->Err.Addr.isWrite ? "write" : "read",
597 extra->Err.Addr.szB );
598 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
599 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
600 &extra->Err.Addr.ai,
601 extra->Err.Addr.maybe_gcc );
602 } else {
603 emit( "Invalid %s of size %lu\n",
604 extra->Err.Addr.isWrite ? "write" : "read",
605 extra->Err.Addr.szB );
606 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
608 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
609 &extra->Err.Addr.ai,
610 extra->Err.Addr.maybe_gcc );
612 break;
614 case Err_Jump:
615 if (xml) {
616 emit( " <kind>InvalidJump</kind>\n" );
617 emit( " <what>Jump to the invalid address stated "
618 "on the next line</what>\n" );
619 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
620 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
621 False );
622 } else {
623 emit( "Jump to the invalid address stated on the next line\n" );
624 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
625 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
626 False );
628 break;
630 case Err_Overlap:
631 if (xml) {
632 emit( " <kind>Overlap</kind>\n" );
633 if (extra->Err.Overlap.szB == 0) {
634 emit( " <what>Source and destination overlap "
635 "in %pS(%#lx, %#lx)\n</what>\n",
636 VG_(get_error_string)(err),
637 extra->Err.Overlap.dst, extra->Err.Overlap.src );
638 } else {
639 emit( " <what>Source and destination overlap "
640 "in %pS(%#lx, %#lx, %lu)</what>\n",
641 VG_(get_error_string)(err),
642 extra->Err.Overlap.dst, extra->Err.Overlap.src,
643 extra->Err.Overlap.szB );
645 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
646 } else {
647 if (extra->Err.Overlap.szB == 0) {
648 emit( "Source and destination overlap in %s(%#lx, %#lx)\n",
649 VG_(get_error_string)(err),
650 extra->Err.Overlap.dst, extra->Err.Overlap.src );
651 } else {
652 emit( "Source and destination overlap in %s(%#lx, %#lx, %lu)\n",
653 VG_(get_error_string)(err),
654 extra->Err.Overlap.dst, extra->Err.Overlap.src,
655 extra->Err.Overlap.szB );
657 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
659 break;
661 case Err_IllegalMempool:
662 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
663 // the following code is untested. Bad.
664 if (xml) {
665 emit( " <kind>InvalidMemPool</kind>\n" );
666 emit( " <what>Illegal memory pool address</what>\n" );
667 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
668 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
669 &extra->Err.IllegalMempool.ai, False );
670 } else {
671 emit( "Illegal memory pool address\n" );
672 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
673 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
674 &extra->Err.IllegalMempool.ai, False );
676 break;
678 case Err_Leak: {
679 UInt n_this_record = extra->Err.Leak.n_this_record;
680 UInt n_total_records = extra->Err.Leak.n_total_records;
681 LossRecord* lr = extra->Err.Leak.lr;
682 pp_LossRecord (n_this_record, n_total_records, lr, xml);
683 break;
686 case Err_FishyValue:
687 if (xml) {
688 emit( " <kind>FishyValue</kind>\n" );
689 emit( " <what>");
690 emit( "Argument '%s' of function %s has a fishy "
691 "(possibly negative) value: %ld\n",
692 extra->Err.FishyValue.argument_name,
693 extra->Err.FishyValue.function_name,
694 (SSizeT)extra->Err.FishyValue.value);
695 emit( "</what>");
696 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
697 } else {
698 emit( "Argument '%s' of function %s has a fishy "
699 "(possibly negative) value: %ld\n",
700 extra->Err.FishyValue.argument_name,
701 extra->Err.FishyValue.function_name,
702 (SSizeT)extra->Err.FishyValue.value);
703 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
705 break;
707 default:
708 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
709 VG_(get_error_kind)(err));
710 VG_(tool_panic)("unknown error code in mc_pp_Error)");
714 /*------------------------------------------------------------*/
715 /*--- Recording errors ---*/
716 /*------------------------------------------------------------*/
718 /* These many bytes below %ESP are considered addressible if we're
719 doing the --workaround-gcc296-bugs hack. */
720 #define VG_GCC296_BUG_STACK_SLOP 1024
722 /* Is this address within some small distance below %ESP? Used only
723 for the --workaround-gcc296-bugs kludge. */
724 static Bool is_just_below_ESP( Addr esp, Addr aa )
726 esp -= VG_STACK_REDZONE_SZB;
727 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
728 return True;
729 else
730 return False;
733 /* --- Called from generated and non-generated code --- */
735 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
736 Bool isWrite )
738 MC_Error extra;
739 Bool just_below_esp;
741 if (MC_(in_ignored_range)(a))
742 return;
744 if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
745 return;
747 Addr current_sp = VG_(get_SP)(tid);
748 just_below_esp = is_just_below_ESP( current_sp, a );
750 /* If this is caused by an access immediately below %ESP, and the
751 user asks nicely, we just ignore it. */
752 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
753 return;
755 /* Also, if this is caused by an access in the range of offsets
756 below the stack pointer as described by
757 --ignore-range-below-sp, ignore it. */
758 if (MC_(in_ignored_range_below_sp)( current_sp, a, szB ))
759 return;
761 extra.Err.Addr.isWrite = isWrite;
762 extra.Err.Addr.szB = szB;
763 extra.Err.Addr.maybe_gcc = just_below_esp;
764 extra.Err.Addr.ai.tag = Addr_Undescribed;
765 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
768 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
770 MC_Error extra;
771 tl_assert( MC_(clo_mc_level) >= 2 );
772 if (otag > 0)
773 tl_assert( MC_(clo_mc_level) == 3 );
774 extra.Err.Value.szB = szB;
775 extra.Err.Value.otag = otag;
776 extra.Err.Value.origin_ec = NULL; /* Filled in later */
777 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
780 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
782 MC_Error extra;
783 tl_assert( MC_(clo_mc_level) >= 2 );
784 if (otag > 0)
785 tl_assert( MC_(clo_mc_level) == 3 );
786 extra.Err.Cond.otag = otag;
787 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
788 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
791 /* --- Called from non-generated code --- */
793 /* This is for memory errors in signal-related memory. */
794 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg )
796 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
799 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag )
801 MC_Error extra;
802 tl_assert(VG_INVALID_THREADID != tid);
803 if (otag > 0)
804 tl_assert( MC_(clo_mc_level) == 3 );
805 extra.Err.RegParam.otag = otag;
806 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
807 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
810 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
811 Bool isAddrErr, const HChar* msg, UInt otag )
813 MC_Error extra;
814 tl_assert(VG_INVALID_THREADID != tid);
815 if (!isAddrErr)
816 tl_assert( MC_(clo_mc_level) >= 2 );
817 if (otag != 0) {
818 tl_assert( MC_(clo_mc_level) == 3 );
819 tl_assert( !isAddrErr );
821 extra.Err.MemParam.isAddrErr = isAddrErr;
822 extra.Err.MemParam.ai.tag = Addr_Undescribed;
823 extra.Err.MemParam.otag = otag;
824 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
825 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
828 void MC_(record_jump_error) ( ThreadId tid, Addr a )
830 MC_Error extra;
831 tl_assert(VG_INVALID_THREADID != tid);
832 extra.Err.Jump.ai.tag = Addr_Undescribed;
833 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
836 void MC_(record_free_error) ( ThreadId tid, Addr a )
838 MC_Error extra;
839 tl_assert(VG_INVALID_THREADID != tid);
840 extra.Err.Free.ai.tag = Addr_Undescribed;
841 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
844 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
846 MC_Error extra;
847 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
848 tl_assert(VG_INVALID_THREADID != tid);
849 ai->tag = Addr_Block;
850 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
851 ai->Addr.Block.block_desc = "block";
852 ai->Addr.Block.block_szB = mc->szB;
853 ai->Addr.Block.rwoffset = 0;
854 ai->Addr.Block.allocated_at = MC_(allocated_at) (mc);
855 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
856 ai->Addr.Block.freed_at = MC_(freed_at) (mc);
857 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
858 &extra );
861 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
863 MC_Error extra;
864 tl_assert(VG_INVALID_THREADID != tid);
865 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
866 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
869 void MC_(record_overlap_error) ( ThreadId tid, const HChar* function,
870 Addr src, Addr dst, SizeT szB )
872 MC_Error extra;
873 tl_assert(VG_INVALID_THREADID != tid);
874 extra.Err.Overlap.src = src;
875 extra.Err.Overlap.dst = dst;
876 extra.Err.Overlap.szB = szB;
877 VG_(maybe_record_error)(
878 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
881 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
882 UInt n_total_records, LossRecord* lr,
883 Bool print_record, Bool count_error )
885 MC_Error extra;
886 extra.Err.Leak.n_this_record = n_this_record;
887 extra.Err.Leak.n_total_records = n_total_records;
888 extra.Err.Leak.lr = lr;
889 return
890 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
891 lr->key.allocated_at, print_record,
892 /*allow_GDB_attach*/False, count_error );
895 Bool MC_(record_fishy_value_error) ( ThreadId tid, const HChar *function_name,
896 const HChar *argument_name, SizeT value)
898 MC_Error extra;
900 tl_assert(VG_INVALID_THREADID != tid);
902 if ((SSizeT)value >= 0) return False; // not a fishy value
904 extra.Err.FishyValue.function_name = function_name;
905 extra.Err.FishyValue.argument_name = argument_name;
906 extra.Err.FishyValue.value = value;
908 VG_(maybe_record_error)(
909 tid, Err_FishyValue, /*addr*/0, /*s*/NULL, &extra );
911 return True;
914 void MC_(record_user_error) ( ThreadId tid, Addr a,
915 Bool isAddrErr, UInt otag )
917 MC_Error extra;
918 if (otag != 0) {
919 tl_assert(!isAddrErr);
920 tl_assert( MC_(clo_mc_level) == 3 );
922 if (!isAddrErr) {
923 tl_assert( MC_(clo_mc_level) >= 2 );
925 tl_assert(VG_INVALID_THREADID != tid);
926 extra.Err.User.isAddrErr = isAddrErr;
927 extra.Err.User.ai.tag = Addr_Undescribed;
928 extra.Err.User.otag = otag;
929 extra.Err.User.origin_ec = NULL; /* Filled in later */
930 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
933 Bool MC_(is_mempool_block)(MC_Chunk* mc_search)
935 MC_Mempool* mp;
937 if (!MC_(mempool_list))
938 return False;
940 // A chunk can only come from a mempool if a custom allocator
941 // is used. No search required for other kinds.
942 if (mc_search->allockind == MC_AllocCustom) {
943 VG_(HT_ResetIter)( MC_(mempool_list) );
944 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
945 MC_Chunk* mc;
946 VG_(HT_ResetIter)(mp->chunks);
947 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
948 if (mc == mc_search)
949 return True;
954 return False;
957 /*------------------------------------------------------------*/
958 /*--- Other error operations ---*/
959 /*------------------------------------------------------------*/
961 /* Compare error contexts, to detect duplicates. Note that if they
962 are otherwise the same, the faulting addrs and associated rwoffsets
963 are allowed to be different. */
964 Bool MC_(eq_Error) ( VgRes res, const Error* e1, const Error* e2 )
966 MC_Error* extra1 = VG_(get_error_extra)(e1);
967 MC_Error* extra2 = VG_(get_error_extra)(e2);
969 /* Guaranteed by calling function */
970 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
972 switch (VG_(get_error_kind)(e1)) {
973 case Err_CoreMem: {
974 const HChar *e1s, *e2s;
975 e1s = VG_(get_error_string)(e1);
976 e2s = VG_(get_error_string)(e2);
977 if (e1s == e2s) return True;
978 if (VG_STREQ(e1s, e2s)) return True;
979 return False;
982 case Err_RegParam:
983 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
985 // Perhaps we should also check the addrinfo.akinds for equality.
986 // That would result in more error reports, but only in cases where
987 // a register contains uninitialised bytes and points to memory
988 // containing uninitialised bytes. Currently, the 2nd of those to be
989 // detected won't be reported. That is (nearly?) always the memory
990 // error, which is good.
991 case Err_MemParam:
992 if (!VG_STREQ(VG_(get_error_string)(e1),
993 VG_(get_error_string)(e2))) return False;
994 // fall through
995 case Err_User:
996 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
997 ? True : False );
999 case Err_Free:
1000 case Err_FreeMismatch:
1001 case Err_Jump:
1002 case Err_IllegalMempool:
1003 case Err_Overlap:
1004 case Err_Cond:
1005 return True;
1007 case Err_FishyValue:
1008 return VG_STREQ(extra1->Err.FishyValue.function_name,
1009 extra2->Err.FishyValue.function_name) &&
1010 VG_STREQ(extra1->Err.FishyValue.argument_name,
1011 extra2->Err.FishyValue.argument_name);
1013 case Err_Addr:
1014 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1015 ? True : False );
1017 case Err_Value:
1018 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1019 ? True : False );
1021 case Err_Leak:
1022 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1023 "since it's handled with VG_(unique_error)()!");
1025 default:
1026 VG_(printf)("Error:\n unknown error code %d\n",
1027 VG_(get_error_kind)(e1));
1028 VG_(tool_panic)("unknown error code in mc_eq_Error");
1032 /* Functions used when searching MC_Chunk lists */
1033 static
1034 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1036 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1037 MC_(Malloc_Redzone_SzB) );
1039 static
1040 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1042 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1043 rzB );
1046 // Forward declarations
1047 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1048 static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
1049 AddrInfo* ai );
1052 /* Describe an address as best you can, for error messages,
1053 putting the result in ai. */
1054 static void describe_addr ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
1056 MC_Chunk* mc;
1058 tl_assert(Addr_Undescribed == ai->tag);
1060 /* -- Perhaps it's a user-named block? -- */
1061 if (client_block_maybe_describe( a, ai )) {
1062 return;
1065 /* -- Perhaps it's in mempool block (non-meta)? -- */
1066 if (mempool_block_maybe_describe( a, /*is_metapool*/ False, ai)) {
1067 return;
1070 /* Blocks allocated by memcheck malloc functions are either
1071 on the recently freed list or on the malloc-ed list.
1072 Custom blocks can be on both : a recently freed block might
1073 have been just re-allocated.
1074 So, first search the malloc-ed block, as the most recent
1075 block is the probable cause of error.
1076 We however detect and report that this is a recently re-allocated
1077 block. */
1078 /* -- Search for a currently malloc'd block which might bracket it. -- */
1079 VG_(HT_ResetIter)(MC_(malloc_list));
1080 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1081 if (!MC_(is_mempool_block)(mc) &&
1082 addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1083 ai->tag = Addr_Block;
1084 ai->Addr.Block.block_kind = Block_Mallocd;
1085 if (MC_(get_freed_block_bracketting)( a ))
1086 ai->Addr.Block.block_desc = "recently re-allocated block";
1087 else
1088 ai->Addr.Block.block_desc = "block";
1089 ai->Addr.Block.block_szB = mc->szB;
1090 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1091 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1092 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1093 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1094 return;
1097 /* -- Search for a recently freed block which might bracket it. -- */
1098 mc = MC_(get_freed_block_bracketting)( a );
1099 if (mc) {
1100 ai->tag = Addr_Block;
1101 ai->Addr.Block.block_kind = Block_Freed;
1102 ai->Addr.Block.block_desc = "block";
1103 ai->Addr.Block.block_szB = mc->szB;
1104 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1105 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1106 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1107 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1108 return;
1111 /* -- Perhaps it's in a meta mempool block? -- */
1112 /* This test is done last, because metapool blocks overlap with blocks
1113 handed out to the application. That makes every heap address part of
1114 a metapool block, so the interesting cases are handled first.
1115 This final search is a last-ditch attempt. When found, it is probably
1116 an error in the custom allocator itself. */
1117 if (mempool_block_maybe_describe( a, /*is_metapool*/ True, ai )) {
1118 return;
1121 /* No block found. Search a non-heap block description. */
1122 VG_(describe_addr) (ep, a, ai);
1125 void MC_(pp_describe_addr) ( DiEpoch ep, Addr a )
1127 AddrInfo ai;
1129 ai.tag = Addr_Undescribed;
1130 describe_addr (ep, a, &ai);
1131 VG_(pp_addrinfo_mc) (a, &ai, /* maybe_gcc */ False);
1132 VG_(clear_addrinfo) (&ai);
1135 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1136 does not refer to a known origin. */
1137 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1138 UInt otag )
1140 UInt ecu = otag & ~3;
1141 *origin_ec = NULL;
1142 if (VG_(is_plausible_ECU)(ecu)) {
1143 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1147 /* Updates the copy with address info if necessary (but not for all errors). */
1148 UInt MC_(update_Error_extra)( const Error* err )
1150 MC_Error* extra = VG_(get_error_extra)(err);
1151 DiEpoch ep = VG_(get_ExeContext_epoch)(VG_(get_error_where)(err));
1153 switch (VG_(get_error_kind)(err)) {
1154 // These ones don't have addresses associated with them, and so don't
1155 // need any updating.
1156 case Err_CoreMem:
1157 //case Err_Value:
1158 //case Err_Cond:
1159 case Err_Overlap:
1160 case Err_FishyValue:
1161 // For Err_Leaks the returned size does not matter -- they are always
1162 // shown with VG_(unique_error)() so they 'extra' not copied. But
1163 // we make it consistent with the others.
1164 case Err_Leak:
1165 return sizeof(MC_Error);
1167 // For value errors, get the ExeContext corresponding to the
1168 // origin tag. Note that it is a kludge to assume that
1169 // a length-1 trace indicates a stack origin. FIXME.
1170 case Err_Value:
1171 update_origin( &extra->Err.Value.origin_ec,
1172 extra->Err.Value.otag );
1173 return sizeof(MC_Error);
1174 case Err_Cond:
1175 update_origin( &extra->Err.Cond.origin_ec,
1176 extra->Err.Cond.otag );
1177 return sizeof(MC_Error);
1178 case Err_RegParam:
1179 update_origin( &extra->Err.RegParam.origin_ec,
1180 extra->Err.RegParam.otag );
1181 return sizeof(MC_Error);
1183 // These ones always involve a memory address.
1184 case Err_Addr:
1185 describe_addr ( ep, VG_(get_error_address)(err),
1186 &extra->Err.Addr.ai );
1187 return sizeof(MC_Error);
1188 case Err_MemParam:
1189 describe_addr ( ep, VG_(get_error_address)(err),
1190 &extra->Err.MemParam.ai );
1191 update_origin( &extra->Err.MemParam.origin_ec,
1192 extra->Err.MemParam.otag );
1193 return sizeof(MC_Error);
1194 case Err_Jump:
1195 describe_addr ( ep, VG_(get_error_address)(err),
1196 &extra->Err.Jump.ai );
1197 return sizeof(MC_Error);
1198 case Err_User:
1199 describe_addr ( ep, VG_(get_error_address)(err),
1200 &extra->Err.User.ai );
1201 update_origin( &extra->Err.User.origin_ec,
1202 extra->Err.User.otag );
1203 return sizeof(MC_Error);
1204 case Err_Free:
1205 describe_addr ( ep, VG_(get_error_address)(err),
1206 &extra->Err.Free.ai );
1207 return sizeof(MC_Error);
1208 case Err_IllegalMempool:
1209 describe_addr ( ep, VG_(get_error_address)(err),
1210 &extra->Err.IllegalMempool.ai );
1211 return sizeof(MC_Error);
1213 // Err_FreeMismatches have already had their address described; this is
1214 // possible because we have the MC_Chunk on hand when the error is
1215 // detected. However, the address may be part of a user block, and if so
1216 // we override the pre-determined description with a user block one.
1217 case Err_FreeMismatch: {
1218 tl_assert(extra && Block_Mallocd ==
1219 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1220 (void)client_block_maybe_describe( VG_(get_error_address)(err),
1221 &extra->Err.FreeMismatch.ai );
1222 return sizeof(MC_Error);
1225 default: VG_(tool_panic)("mc_update_extra: bad errkind");
1230 static Bool client_block_maybe_describe( Addr a,
1231 /*OUT*/AddrInfo* ai )
1233 UWord i;
1234 CGenBlock* cgbs = NULL;
1235 UWord cgb_used = 0;
1237 MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1238 if (cgbs == NULL)
1239 tl_assert(cgb_used == 0);
1241 /* Perhaps it's a general block ? */
1242 for (i = 0; i < cgb_used; i++) {
1243 if (cgbs[i].start == 0 && cgbs[i].size == 0)
1244 continue;
1245 // Use zero as the redzone for client blocks.
1246 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1247 ai->tag = Addr_Block;
1248 ai->Addr.Block.block_kind = Block_UserG;
1249 ai->Addr.Block.block_desc = cgbs[i].desc;
1250 ai->Addr.Block.block_szB = cgbs[i].size;
1251 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1252 ai->Addr.Block.allocated_at = cgbs[i].where;
1253 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1254 ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
1255 return True;
1258 return False;
1262 static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
1263 /*OUT*/AddrInfo* ai )
1265 MC_Mempool* mp;
1266 tl_assert( MC_(mempool_list) );
1268 VG_(HT_ResetIter)( MC_(mempool_list) );
1269 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1270 if (mp->chunks != NULL && mp->metapool == is_metapool) {
1271 MC_Chunk* mc;
1272 VG_(HT_ResetIter)(mp->chunks);
1273 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1274 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1275 ai->tag = Addr_Block;
1276 ai->Addr.Block.block_kind = Block_MempoolChunk;
1277 ai->Addr.Block.block_desc = "block";
1278 ai->Addr.Block.block_szB = mc->szB;
1279 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1280 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1281 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1282 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1283 return True;
1288 return False;
1292 /*------------------------------------------------------------*/
1293 /*--- Suppressions ---*/
1294 /*------------------------------------------------------------*/
1296 typedef
1297 enum {
1298 ParamSupp, // Bad syscall params
1299 UserSupp, // Errors arising from client-request checks
1300 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
1302 // Undefined value errors of given size
1303 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp, Value32Supp,
1305 // Undefined value error in conditional.
1306 CondSupp,
1308 // Unaddressable read/write attempt at given size
1309 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp, Addr32Supp,
1311 JumpSupp, // Jump to unaddressable target
1312 FreeSupp, // Invalid or mismatching free
1313 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
1314 LeakSupp, // Something to be suppressed in a leak check.
1315 MempoolSupp, // Memory pool suppression.
1316 FishyValueSupp,// Fishy value suppression.
1318 MC_SuppKind;
1320 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su )
1322 SuppKind skind;
1324 if (VG_STREQ(name, "Param")) skind = ParamSupp;
1325 else if (VG_STREQ(name, "User")) skind = UserSupp;
1326 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1327 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
1328 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
1329 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
1330 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
1331 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
1332 else if (VG_STREQ(name, "Addr32")) skind = Addr32Supp;
1333 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
1334 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
1335 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
1336 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1337 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1338 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
1339 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
1340 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1341 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1342 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1343 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1344 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1345 else if (VG_STREQ(name, "Value32")) skind = Value32Supp;
1346 else if (VG_STREQ(name, "FishyValue")) skind = FishyValueSupp;
1347 else
1348 return False;
1350 VG_(set_supp_kind)(su, skind);
1351 return True;
1354 typedef struct _MC_LeakSuppExtra MC_LeakSuppExtra;
1356 struct _MC_LeakSuppExtra {
1357 UInt match_leak_kinds;
1359 /* Maintains nr of blocks and bytes suppressed with this suppression
1360 during the leak search identified by leak_search_gen.
1361 blocks_suppressed and bytes_suppressed are reset to 0 when
1362 used the first time during a leak search. */
1363 SizeT blocks_suppressed;
1364 SizeT bytes_suppressed;
1365 UInt leak_search_gen;
1368 typedef struct {
1369 const HChar *function_name;
1370 const HChar *argument_name;
1371 } MC_FishyValueExtra;
1373 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** bufpp,
1374 SizeT* nBufp, Int* lineno, Supp *su )
1376 Bool eof;
1377 Int i;
1379 if (VG_(get_supp_kind)(su) == ParamSupp) {
1380 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1381 if (eof) return False;
1382 VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1383 } else if (VG_(get_supp_kind)(su) == LeakSupp) {
1384 // We might have the optional match-leak-kinds line
1385 MC_LeakSuppExtra* lse;
1386 lse = VG_(malloc)("mc.resi.2", sizeof(MC_LeakSuppExtra));
1387 lse->match_leak_kinds = MC_(all_Reachedness)();
1388 lse->blocks_suppressed = 0;
1389 lse->bytes_suppressed = 0;
1390 lse->leak_search_gen = 0;
1391 VG_(set_supp_extra)(su, lse); // By default, all kinds will match.
1392 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1393 if (eof) return True; // old LeakSupp style, no match-leak-kinds line.
1394 if (0 == VG_(strncmp)(*bufpp, "match-leak-kinds:", 17)) {
1395 i = 17;
1396 while ((*bufpp)[i] && VG_(isspace)((*bufpp)[i]))
1397 i++;
1398 if (!VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
1399 True/*allow_all*/,
1400 (*bufpp)+i, &lse->match_leak_kinds)) {
1401 return False;
1403 } else {
1404 return False; // unknown extra line.
1406 } else if (VG_(get_supp_kind)(su) == FishyValueSupp) {
1407 MC_FishyValueExtra *extra;
1408 HChar *p, *function_name, *argument_name = NULL;
1410 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1411 if (eof) return True;
1413 // The suppression string is: function_name(argument_name)
1414 function_name = VG_(strdup)("mv.resi.4", *bufpp);
1415 p = VG_(strchr)(function_name, '(');
1416 if (p != NULL) {
1417 *p++ = '\0';
1418 argument_name = p;
1419 p = VG_(strchr)(p, ')');
1420 if (p != NULL)
1421 *p = '\0';
1423 if (p == NULL) { // malformed suppression string
1424 VG_(free)(function_name);
1425 return False;
1428 extra = VG_(malloc)("mc.resi.3", sizeof *extra);
1429 extra->function_name = function_name;
1430 extra->argument_name = argument_name;
1432 VG_(set_supp_extra)(su, extra);
1434 return True;
1437 Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su )
1439 Int su_szB;
1440 MC_Error* extra = VG_(get_error_extra)(err);
1441 ErrorKind ekind = VG_(get_error_kind)(err);
1443 switch (VG_(get_supp_kind)(su)) {
1444 case ParamSupp:
1445 return ((ekind == Err_RegParam || ekind == Err_MemParam)
1446 && VG_STREQ(VG_(get_error_string)(err),
1447 VG_(get_supp_string)(su)));
1449 case UserSupp:
1450 return (ekind == Err_User);
1452 case CoreMemSupp:
1453 return (ekind == Err_CoreMem
1454 && VG_STREQ(VG_(get_error_string)(err),
1455 VG_(get_supp_string)(su)));
1457 case Value1Supp: su_szB = 1; goto value_case;
1458 case Value2Supp: su_szB = 2; goto value_case;
1459 case Value4Supp: su_szB = 4; goto value_case;
1460 case Value8Supp: su_szB = 8; goto value_case;
1461 case Value16Supp:su_szB =16; goto value_case;
1462 case Value32Supp:su_szB =32; goto value_case;
1463 value_case:
1464 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1466 case CondSupp:
1467 return (ekind == Err_Cond);
1469 case Addr1Supp: su_szB = 1; goto addr_case;
1470 case Addr2Supp: su_szB = 2; goto addr_case;
1471 case Addr4Supp: su_szB = 4; goto addr_case;
1472 case Addr8Supp: su_szB = 8; goto addr_case;
1473 case Addr16Supp:su_szB =16; goto addr_case;
1474 case Addr32Supp:su_szB =32; goto addr_case;
1475 addr_case:
1476 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1478 case JumpSupp:
1479 return (ekind == Err_Jump);
1481 case FreeSupp:
1482 return (ekind == Err_Free || ekind == Err_FreeMismatch);
1484 case OverlapSupp:
1485 return (ekind == Err_Overlap);
1487 case LeakSupp:
1488 if (ekind == Err_Leak) {
1489 MC_LeakSuppExtra* lse = (MC_LeakSuppExtra*) VG_(get_supp_extra)(su);
1490 if (lse->leak_search_gen != MC_(leak_search_gen)) {
1491 // First time we see this suppression during this leak search.
1492 // => reset the counters to 0.
1493 lse->blocks_suppressed = 0;
1494 lse->bytes_suppressed = 0;
1495 lse->leak_search_gen = MC_(leak_search_gen);
1497 return RiS(extra->Err.Leak.lr->key.state, lse->match_leak_kinds);
1498 } else
1499 return False;
1501 case MempoolSupp:
1502 return (ekind == Err_IllegalMempool);
1504 case FishyValueSupp: {
1505 MC_FishyValueExtra *supp_extra = VG_(get_supp_extra)(su);
1507 return (ekind == Err_FishyValue) &&
1508 VG_STREQ(extra->Err.FishyValue.function_name,
1509 supp_extra->function_name) &&
1510 VG_STREQ(extra->Err.FishyValue.argument_name,
1511 supp_extra->argument_name);
1514 default:
1515 VG_(printf)("Error:\n"
1516 " unknown suppression type %d\n",
1517 VG_(get_supp_kind)(su));
1518 VG_(tool_panic)("unknown suppression type in "
1519 "MC_(error_matches_suppression)");
1523 const HChar* MC_(get_error_name) ( const Error* err )
1525 switch (VG_(get_error_kind)(err)) {
1526 case Err_RegParam: return "Param";
1527 case Err_MemParam: return "Param";
1528 case Err_User: return "User";
1529 case Err_FreeMismatch: return "Free";
1530 case Err_IllegalMempool: return "Mempool";
1531 case Err_Free: return "Free";
1532 case Err_Jump: return "Jump";
1533 case Err_CoreMem: return "CoreMem";
1534 case Err_Overlap: return "Overlap";
1535 case Err_Leak: return "Leak";
1536 case Err_Cond: return "Cond";
1537 case Err_FishyValue: return "FishyValue";
1538 case Err_Addr: {
1539 MC_Error* extra = VG_(get_error_extra)(err);
1540 switch ( extra->Err.Addr.szB ) {
1541 case 1: return "Addr1";
1542 case 2: return "Addr2";
1543 case 4: return "Addr4";
1544 case 8: return "Addr8";
1545 case 16: return "Addr16";
1546 case 32: return "Addr32";
1547 default: VG_(tool_panic)("unexpected size for Addr");
1550 case Err_Value: {
1551 MC_Error* extra = VG_(get_error_extra)(err);
1552 switch ( extra->Err.Value.szB ) {
1553 case 1: return "Value1";
1554 case 2: return "Value2";
1555 case 4: return "Value4";
1556 case 8: return "Value8";
1557 case 16: return "Value16";
1558 case 32: return "Value32";
1559 default: VG_(tool_panic)("unexpected size for Value");
1562 default: VG_(tool_panic)("get_error_name: unexpected type");
1566 SizeT MC_(get_extra_suppression_info) ( const Error* err,
1567 /*OUT*/HChar* buf, Int nBuf )
1569 ErrorKind ekind = VG_(get_error_kind)(err);
1570 tl_assert(buf);
1571 tl_assert(nBuf >= 1);
1573 if (Err_RegParam == ekind || Err_MemParam == ekind) {
1574 const HChar* errstr = VG_(get_error_string)(err);
1575 tl_assert(errstr);
1576 return VG_(snprintf)(buf, nBuf, "%s", errstr);
1577 } else if (Err_Leak == ekind) {
1578 MC_Error* extra = VG_(get_error_extra)(err);
1579 return VG_(snprintf) (buf, nBuf, "match-leak-kinds: %s",
1580 pp_Reachedness_for_leak_kinds(extra->Err.Leak.lr->key.state));
1581 } else if (Err_FishyValue == ekind) {
1582 MC_Error* extra = VG_(get_error_extra)(err);
1583 return VG_(snprintf) (buf, nBuf, "%s(%s)",
1584 extra->Err.FishyValue.function_name,
1585 extra->Err.FishyValue.argument_name);
1586 } else {
1587 buf[0] = '\0';
1588 return 0;
1592 SizeT MC_(print_extra_suppression_use) ( const Supp *su,
1593 /*OUT*/HChar *buf, Int nBuf )
1595 tl_assert(nBuf >= 1);
1597 if (VG_(get_supp_kind)(su) == LeakSupp) {
1598 MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
1600 if (lse->leak_search_gen == MC_(leak_search_gen)
1601 && lse->blocks_suppressed > 0) {
1602 return VG_(snprintf) (buf, nBuf,
1603 "suppressed: %'lu bytes in %'lu blocks",
1604 lse->bytes_suppressed,
1605 lse->blocks_suppressed);
1609 buf[0] = '\0';
1610 return 0;
1613 void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su)
1615 if (VG_(get_supp_kind)(su) == LeakSupp) {
1616 MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
1617 MC_Error* extra = VG_(get_error_extra)(err);
1619 tl_assert (lse->leak_search_gen == MC_(leak_search_gen));
1620 lse->blocks_suppressed += extra->Err.Leak.lr->num_blocks;
1621 lse->bytes_suppressed
1622 += extra->Err.Leak.lr->szB + extra->Err.Leak.lr->indirect_szB;
1626 /*--------------------------------------------------------------------*/
1627 /*--- end mc_errors.c ---*/
1628 /*--------------------------------------------------------------------*/