Bug 475652 - Missing suppression for __wcsncpy_avx2 (strncpy-avx2.S:308)?
[valgrind.git] / memcheck / mc_errors.c
bloba708b3f8521017100d66aefbf83327b4bcdc6ca3
2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2017 Julian Seward
12 jseward@acm.org
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_gdbserver.h"
32 #include "pub_tool_poolalloc.h" // For mc_include.h
33 #include "pub_tool_hashtable.h" // For mc_include.h
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_machine.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_tooliface.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
44 #include "pub_tool_xarray.h"
45 #include "pub_tool_aspacemgr.h"
46 #include "pub_tool_addrinfo.h"
48 #include "mc_include.h"
51 /*------------------------------------------------------------*/
52 /*--- Error types ---*/
53 /*------------------------------------------------------------*/
55 /* See comment in mc_include.h */
56 Bool MC_(any_value_errors) = False;
59 /* ------------------ Errors ----------------------- */
61 /* What kind of error it is. */
62 typedef
63 enum {
64 Err_Value,
65 Err_Cond,
66 Err_CoreMem,
67 Err_Addr,
68 Err_Jump,
69 Err_RegParam,
70 Err_MemParam,
71 Err_User,
72 Err_Free,
73 Err_FreeMismatch,
74 Err_Overlap,
75 Err_Leak,
76 Err_IllegalMempool,
77 Err_FishyValue,
78 Err_ReallocSizeZero,
79 Err_BadAlign,
80 Err_BadSize,
81 Err_SizeMismatch,
82 Err_AlignMismatch,
84 MC_ErrorTag;
87 typedef struct _MC_Error MC_Error;
89 struct _MC_Error {
90 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
91 //MC_ErrorTag tag;
93 union {
94 // Use of an undefined value:
95 // - as a pointer in a load or store
96 // - as a jump target
97 struct {
98 SizeT szB; // size of value in bytes
99 // Origin info
100 UInt otag; // origin tag
101 ExeContext* origin_ec; // filled in later
102 } Value;
104 // Use of an undefined value in a conditional branch or move.
105 struct {
106 // Origin info
107 UInt otag; // origin tag
108 ExeContext* origin_ec; // filled in later
109 } Cond;
111 // Addressability error in core (signal-handling) operation.
112 // It would be good to get rid of this error kind, merge it with
113 // another one somehow.
114 struct {
115 } CoreMem;
117 // Use of an unaddressable memory location in a load or store.
118 struct {
119 Bool isWrite; // read or write?
120 SizeT szB; // not used for exec (jump) errors
121 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
122 AddrInfo ai;
123 } Addr;
125 // Jump to an unaddressable memory location.
126 struct {
127 AddrInfo ai;
128 } Jump;
130 // System call register input contains undefined bytes.
131 struct {
132 // Origin info
133 UInt otag; // origin tag
134 ExeContext* origin_ec; // filled in later
135 } RegParam;
137 // System call memory input contains undefined/unaddressable bytes
138 struct {
139 Bool isAddrErr; // Addressability or definedness error?
140 AddrInfo ai;
141 // Origin info
142 UInt otag; // origin tag
143 ExeContext* origin_ec; // filled in later
144 } MemParam;
146 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
147 struct {
148 Bool isAddrErr; // Addressability or definedness error?
149 AddrInfo ai;
150 // Origin info
151 UInt otag; // origin tag
152 ExeContext* origin_ec; // filled in later
153 } User;
155 // Program tried to free() something that's not a heap block (this
156 // covers double-frees). */
157 struct {
158 AddrInfo ai;
159 } Free;
161 // Program allocates heap block with one function
162 // (malloc/new/new[]/custom) and deallocates with not the matching one.
163 struct {
164 AddrInfo ai;
165 } FreeMismatch;
167 struct {
168 AddrInfo ai;
169 } ReallocSizeZero;
171 struct {
172 AddrInfo ai;
173 SizeT dealloc_align;
174 SizeT size;
175 const HChar *msg;
176 } BadAlign;
178 struct {
179 AddrInfo ai;
180 SizeT size;
181 const HChar *func;
182 } BadSize;
184 // Call to strcpy, memcpy, etc, with overlapping blocks.
185 struct {
186 Addr src; // Source block
187 Addr dst; // Destination block
188 SizeT szB; // Size in bytes; 0 if unused.
189 } Overlap;
191 // A memory leak.
192 struct {
193 UInt n_this_record;
194 UInt n_total_records;
195 LossRecord* lr;
196 } Leak;
198 // A memory pool error.
199 struct {
200 AddrInfo ai;
201 } IllegalMempool;
203 // A fishy function argument value
204 // An argument value is considered fishy if the corresponding
205 // parameter has SizeT type and the value when interpreted as a
206 // signed number is negative.
207 struct {
208 const HChar *function_name;
209 const HChar *argument_name;
210 SizeT value;
211 } FishyValue;
213 // Program allocates heap block with new but
214 // deallocates with a matching delete
215 // but with a different size
216 struct {
217 AddrInfo ai;
218 const HChar *function_names;
219 SizeT size;
220 } SizeMismatch;
222 // Program allocates heap block with one function
223 // (malloc/new/new[]/custom) and deallocates with
224 // a matching one but different alignment
225 struct {
226 AddrInfo ai;
227 const HChar *function_names;
228 SizeT alloc_align;
229 SizeT dealloc_align;
230 Bool default_delete;
231 } AlignMismatch;
232 } Err;
236 /*------------------------------------------------------------*/
237 /*--- Printing errors ---*/
238 /*------------------------------------------------------------*/
240 /* This is the "this error is due to be printed shortly; so have a
241 look at it any print any preamble you want" function. Which, in
242 Memcheck, we don't use. Hence a no-op.
244 void MC_(before_pp_Error) ( const Error* err ) {
247 /* Do a printf-style operation on either the XML or normal output
248 channel, depending on the setting of VG_(clo_xml).
250 static void emit_WRK ( const HChar* format, va_list vargs )
252 if (VG_(clo_xml)) {
253 VG_(vprintf_xml)(format, vargs);
254 } else {
255 VG_(vmessage)(Vg_UserMsg, format, vargs);
258 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
259 static void emit ( const HChar* format, ... )
261 va_list vargs;
262 va_start(vargs, format);
263 emit_WRK(format, vargs);
264 va_end(vargs);
268 static const HChar* str_leak_lossmode ( Reachedness lossmode )
270 const HChar *loss = "?";
271 switch (lossmode) {
272 case Unreached: loss = "definitely lost"; break;
273 case IndirectLeak: loss = "indirectly lost"; break;
274 case Possible: loss = "possibly lost"; break;
275 case Reachable: loss = "still reachable"; break;
277 return loss;
280 static const HChar* xml_leak_kind ( Reachedness lossmode )
282 const HChar *loss = "?";
283 switch (lossmode) {
284 case Unreached: loss = "Leak_DefinitelyLost"; break;
285 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
286 case Possible: loss = "Leak_PossiblyLost"; break;
287 case Reachable: loss = "Leak_StillReachable"; break;
289 return loss;
292 const HChar* MC_(parse_leak_kinds_tokens) =
293 "reachable,possible,indirect,definite";
295 UInt MC_(all_Reachedness)(void)
297 static UInt all;
299 if (all == 0) {
300 // Compute a set with all values by doing a parsing of the "all" keyword.
301 Bool parseok = VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
302 True,/*allow_all*/
303 "all",
304 &all);
305 tl_assert (parseok && all);
308 return all;
311 static const HChar* pp_Reachedness_for_leak_kinds(Reachedness r)
313 switch(r) {
314 case Reachable: return "reachable";
315 case Possible: return "possible";
316 case IndirectLeak: return "indirect";
317 case Unreached: return "definite";
318 default: tl_assert(0);
322 static void mc_pp_origin ( ExeContext* ec, UInt okind )
324 const HChar* src = NULL;
325 tl_assert(ec);
327 switch (okind) {
328 case MC_OKIND_STACK: src = " by a stack allocation"; break;
329 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
330 case MC_OKIND_USER: src = " by a client request"; break;
331 case MC_OKIND_UNKNOWN: src = ""; break;
333 tl_assert(src); /* guards against invalid 'okind' */
335 if (VG_(clo_xml)) {
336 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
337 src);
338 VG_(pp_ExeContext)( ec );
339 } else {
340 emit( " Uninitialised value was created%s\n", src);
341 VG_(pp_ExeContext)( ec );
345 HChar * MC_(snprintf_delta) (HChar * buf, Int size,
346 SizeT current_val, SizeT old_val,
347 LeakCheckDeltaMode delta_mode)
349 // Make sure the buffer size is large enough. With old_val == 0 and
350 // current_val == ULLONG_MAX the delta including inserted commas is:
351 // 18,446,744,073,709,551,615
352 // whose length is 26. Therefore:
353 tl_assert(size >= 26 + 4 + 1);
355 if (delta_mode == LCD_Any)
356 buf[0] = '\0';
357 else if (current_val >= old_val)
358 VG_(snprintf) (buf, size, " (+%'lu)", current_val - old_val);
359 else
360 VG_(snprintf) (buf, size, " (-%'lu)", old_val - current_val);
362 return buf;
365 static void pp_LossRecord(UInt n_this_record, UInt n_total_records,
366 LossRecord* lr, Bool xml)
368 // char arrays to produce the indication of increase/decrease in case
369 // of delta_mode != LCD_Any
370 HChar d_bytes[31];
371 HChar d_direct_bytes[31];
372 HChar d_indirect_bytes[31];
373 HChar d_num_blocks[31];
374 /* A loss record that had an old number of blocks 0 is a new loss record.
375 We mark it as new only when doing any kind of delta leak search. */
376 const HChar *new_loss_record_marker
377 = MC_(detect_memory_leaks_last_delta_mode) != LCD_Any
378 && lr->old_num_blocks == 0
379 ? "new " : "";
381 MC_(snprintf_delta) (d_bytes, sizeof(d_bytes),
382 lr->szB + lr->indirect_szB,
383 lr->old_szB + lr->old_indirect_szB,
384 MC_(detect_memory_leaks_last_delta_mode));
385 MC_(snprintf_delta) (d_direct_bytes, sizeof(d_direct_bytes),
386 lr->szB,
387 lr->old_szB,
388 MC_(detect_memory_leaks_last_delta_mode));
389 MC_(snprintf_delta) (d_indirect_bytes, sizeof(d_indirect_bytes),
390 lr->indirect_szB,
391 lr->old_indirect_szB,
392 MC_(detect_memory_leaks_last_delta_mode));
393 MC_(snprintf_delta) (d_num_blocks, sizeof(d_num_blocks),
394 (SizeT) lr->num_blocks,
395 (SizeT) lr->old_num_blocks,
396 MC_(detect_memory_leaks_last_delta_mode));
398 if (xml) {
399 emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
400 if (lr->indirect_szB > 0) {
401 emit( " <xwhat>\n" );
402 emit( " <text>%'lu%s (%'lu%s direct, %'lu%s indirect) bytes "
403 "in %'u%s blocks"
404 " are %s in %sloss record %'u of %'u</text>\n",
405 lr->szB + lr->indirect_szB, d_bytes,
406 lr->szB, d_direct_bytes,
407 lr->indirect_szB, d_indirect_bytes,
408 lr->num_blocks, d_num_blocks,
409 str_leak_lossmode(lr->key.state),
410 new_loss_record_marker,
411 n_this_record, n_total_records );
412 // Nb: don't put commas in these XML numbers
413 emit( " <leakedbytes>%lu</leakedbytes>\n",
414 lr->szB + lr->indirect_szB );
415 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
416 emit( " </xwhat>\n" );
417 } else {
418 emit( " <xwhat>\n" );
419 emit( " <text>%'lu%s bytes in %'u%s blocks"
420 " are %s in %sloss record %'u of %'u</text>\n",
421 lr->szB, d_direct_bytes,
422 lr->num_blocks, d_num_blocks,
423 str_leak_lossmode(lr->key.state),
424 new_loss_record_marker,
425 n_this_record, n_total_records );
426 emit( " <leakedbytes>%lu</leakedbytes>\n", lr->szB);
427 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
428 emit( " </xwhat>\n" );
430 VG_(pp_ExeContext)(lr->key.allocated_at);
431 } else { /* ! if (xml) */
432 if (lr->indirect_szB > 0) {
433 emit(
434 "%'lu%s (%'lu%s direct, %'lu%s indirect) bytes in %'u%s blocks"
435 " are %s in %sloss record %'u of %'u\n",
436 lr->szB + lr->indirect_szB, d_bytes,
437 lr->szB, d_direct_bytes,
438 lr->indirect_szB, d_indirect_bytes,
439 lr->num_blocks, d_num_blocks,
440 str_leak_lossmode(lr->key.state),
441 new_loss_record_marker,
442 n_this_record, n_total_records
444 } else {
445 emit(
446 "%'lu%s bytes in %'u%s blocks are %s in %sloss record %'u of %'u\n",
447 lr->szB, d_direct_bytes,
448 lr->num_blocks, d_num_blocks,
449 str_leak_lossmode(lr->key.state),
450 new_loss_record_marker,
451 n_this_record, n_total_records
454 VG_(pp_ExeContext)(lr->key.allocated_at);
455 } /* if (xml) */
458 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
459 LossRecord* l)
461 pp_LossRecord (n_this_record, n_total_records, l, /* xml */ False);
464 void MC_(pp_Error) ( const Error* err )
466 const Bool xml = VG_(clo_xml); /* a shorthand */
467 MC_Error* extra = VG_(get_error_extra)(err);
469 switch (VG_(get_error_kind)(err)) {
470 case Err_CoreMem:
471 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
472 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
473 signal handler frame. --njn */
474 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
475 // the following code is untested. Bad.
476 if (xml) {
477 emit( " <kind>CoreMemError</kind>\n" );
478 emit( " <what>%pS contains unaddressable byte(s)</what>\n",
479 VG_(get_error_string)(err));
480 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
481 } else {
482 emit( "%s contains unaddressable byte(s)\n",
483 VG_(get_error_string)(err));
484 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
486 break;
488 case Err_Value:
489 MC_(any_value_errors) = True;
490 if (xml) {
491 emit( " <kind>UninitValue</kind>\n" );
492 emit( " <what>Use of uninitialised value of size %lu</what>\n",
493 extra->Err.Value.szB );
494 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
495 if (extra->Err.Value.origin_ec)
496 mc_pp_origin( extra->Err.Value.origin_ec,
497 extra->Err.Value.otag & 3 );
498 } else {
499 /* Could also show extra->Err.Cond.otag if debugging origin
500 tracking */
501 emit( "Use of uninitialised value of size %lu\n",
502 extra->Err.Value.szB );
503 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
504 if (extra->Err.Value.origin_ec)
505 mc_pp_origin( extra->Err.Value.origin_ec,
506 extra->Err.Value.otag & 3 );
508 break;
510 case Err_Cond:
511 MC_(any_value_errors) = True;
512 if (xml) {
513 emit( " <kind>UninitCondition</kind>\n" );
514 emit( " <what>Conditional jump or move depends"
515 " on uninitialised value(s)</what>\n" );
516 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
517 if (extra->Err.Cond.origin_ec)
518 mc_pp_origin( extra->Err.Cond.origin_ec,
519 extra->Err.Cond.otag & 3 );
520 } else {
521 /* Could also show extra->Err.Cond.otag if debugging origin
522 tracking */
523 emit( "Conditional jump or move depends"
524 " on uninitialised value(s)\n" );
525 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
526 if (extra->Err.Cond.origin_ec)
527 mc_pp_origin( extra->Err.Cond.origin_ec,
528 extra->Err.Cond.otag & 3 );
530 break;
532 case Err_RegParam:
533 MC_(any_value_errors) = True;
534 if (xml) {
535 emit( " <kind>SyscallParam</kind>\n" );
536 emit( " <what>Syscall param %pS contains "
537 "uninitialised byte(s)</what>\n",
538 VG_(get_error_string)(err) );
539 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
540 if (extra->Err.RegParam.origin_ec)
541 mc_pp_origin( extra->Err.RegParam.origin_ec,
542 extra->Err.RegParam.otag & 3 );
543 } else {
544 emit( "Syscall param %s contains uninitialised byte(s)\n",
545 VG_(get_error_string)(err) );
546 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
547 if (extra->Err.RegParam.origin_ec)
548 mc_pp_origin( extra->Err.RegParam.origin_ec,
549 extra->Err.RegParam.otag & 3 );
551 break;
553 case Err_MemParam:
554 if (!extra->Err.MemParam.isAddrErr)
555 MC_(any_value_errors) = True;
556 if (xml) {
557 emit( " <kind>SyscallParam</kind>\n" );
558 emit( " <what>Syscall param %pS points to %s byte(s)</what>\n",
559 VG_(get_error_string)(err),
560 extra->Err.MemParam.isAddrErr
561 ? "unaddressable" : "uninitialised" );
562 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
563 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
564 &extra->Err.MemParam.ai, False);
565 if (extra->Err.MemParam.origin_ec
566 && !extra->Err.MemParam.isAddrErr)
567 mc_pp_origin( extra->Err.MemParam.origin_ec,
568 extra->Err.MemParam.otag & 3 );
569 } else {
570 emit( "Syscall param %s points to %s byte(s)\n",
571 VG_(get_error_string)(err),
572 extra->Err.MemParam.isAddrErr
573 ? "unaddressable" : "uninitialised" );
574 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
575 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
576 &extra->Err.MemParam.ai, False);
577 if (extra->Err.MemParam.origin_ec
578 && !extra->Err.MemParam.isAddrErr)
579 mc_pp_origin( extra->Err.MemParam.origin_ec,
580 extra->Err.MemParam.otag & 3 );
582 break;
584 case Err_User:
585 if (!extra->Err.User.isAddrErr)
586 MC_(any_value_errors) = True;
587 if (xml) {
588 emit( " <kind>ClientCheck</kind>\n" );
589 emit( " <what>%s byte(s) found "
590 "during client check request</what>\n",
591 extra->Err.User.isAddrErr
592 ? "Unaddressable" : "Uninitialised" );
593 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
594 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
595 False);
596 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
597 mc_pp_origin( extra->Err.User.origin_ec,
598 extra->Err.User.otag & 3 );
599 } else {
600 emit( "%s byte(s) found during client check request\n",
601 extra->Err.User.isAddrErr
602 ? "Unaddressable" : "Uninitialised" );
603 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
604 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
605 False);
606 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
607 mc_pp_origin( extra->Err.User.origin_ec,
608 extra->Err.User.otag & 3 );
610 break;
612 case Err_Free:
613 if (xml) {
614 emit( " <kind>InvalidFree</kind>\n" );
615 emit( " <what>Invalid free() / delete / delete[]"
616 " / realloc()</what>\n" );
617 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
618 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
619 &extra->Err.Free.ai, False );
620 } else {
621 emit( "Invalid free() / delete / delete[] / realloc()\n" );
622 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
623 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
624 &extra->Err.Free.ai, False );
626 break;
628 case Err_FreeMismatch:
629 if (xml) {
630 emit( " <kind>MismatchedFree</kind>\n" );
631 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
632 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
633 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
634 &extra->Err.FreeMismatch.ai, False);
635 } else {
636 emit( "Mismatched free() / delete / delete []\n" );
637 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
638 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
639 &extra->Err.FreeMismatch.ai, False);
641 break;
643 case Err_Addr:
644 if (xml) {
645 emit( " <kind>Invalid%s</kind>\n",
646 extra->Err.Addr.isWrite ? "Write" : "Read" );
647 emit( " <what>Invalid %s of size %lu</what>\n",
648 extra->Err.Addr.isWrite ? "write" : "read",
649 extra->Err.Addr.szB );
650 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
651 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
652 &extra->Err.Addr.ai,
653 extra->Err.Addr.maybe_gcc );
654 } else {
655 emit( "Invalid %s of size %lu\n",
656 extra->Err.Addr.isWrite ? "write" : "read",
657 extra->Err.Addr.szB );
658 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
660 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
661 &extra->Err.Addr.ai,
662 extra->Err.Addr.maybe_gcc );
664 break;
666 case Err_Jump:
667 if (xml) {
668 emit( " <kind>InvalidJump</kind>\n" );
669 emit( " <what>Jump to the invalid address stated "
670 "on the next line</what>\n" );
671 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
672 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
673 False );
674 } else {
675 emit( "Jump to the invalid address stated on the next line\n" );
676 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
677 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
678 False );
680 break;
682 case Err_Overlap:
683 if (xml) {
684 emit( " <kind>Overlap</kind>\n" );
685 if (extra->Err.Overlap.szB == 0) {
686 emit( " <what>Source and destination overlap "
687 "in %pS(%#lx, %#lx)\n</what>\n",
688 VG_(get_error_string)(err),
689 extra->Err.Overlap.dst, extra->Err.Overlap.src );
690 } else {
691 emit( " <what>Source and destination overlap "
692 "in %pS(%#lx, %#lx, %lu)</what>\n",
693 VG_(get_error_string)(err),
694 extra->Err.Overlap.dst, extra->Err.Overlap.src,
695 extra->Err.Overlap.szB );
697 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
698 } else {
699 if (extra->Err.Overlap.szB == 0) {
700 emit( "Source and destination overlap in %s(%#lx, %#lx)\n",
701 VG_(get_error_string)(err),
702 extra->Err.Overlap.dst, extra->Err.Overlap.src );
703 } else {
704 emit( "Source and destination overlap in %s(%#lx, %#lx, %lu)\n",
705 VG_(get_error_string)(err),
706 extra->Err.Overlap.dst, extra->Err.Overlap.src,
707 extra->Err.Overlap.szB );
709 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
711 break;
713 case Err_IllegalMempool:
714 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
715 // the following code is untested. Bad.
716 if (xml) {
717 emit( " <kind>InvalidMemPool</kind>\n" );
718 emit( " <what>Illegal memory pool address</what>\n" );
719 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
720 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
721 &extra->Err.IllegalMempool.ai, False );
722 } else {
723 emit( "Illegal memory pool address\n" );
724 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
725 VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
726 &extra->Err.IllegalMempool.ai, False );
728 break;
730 case Err_Leak: {
731 UInt n_this_record = extra->Err.Leak.n_this_record;
732 UInt n_total_records = extra->Err.Leak.n_total_records;
733 LossRecord* lr = extra->Err.Leak.lr;
734 pp_LossRecord (n_this_record, n_total_records, lr, xml);
735 break;
738 case Err_FishyValue:
739 if (xml) {
740 emit( " <kind>FishyValue</kind>\n" );
741 emit( " <what>");
742 emit( "Argument '%s' of function %s has a fishy "
743 "(possibly negative) value: %ld\n",
744 extra->Err.FishyValue.argument_name,
745 extra->Err.FishyValue.function_name,
746 (SSizeT)extra->Err.FishyValue.value);
747 emit( "</what>");
748 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
749 } else {
750 emit( "Argument '%s' of function %s has a fishy "
751 "(possibly negative) value: %ld\n",
752 extra->Err.FishyValue.argument_name,
753 extra->Err.FishyValue.function_name,
754 (SSizeT)extra->Err.FishyValue.value);
755 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
757 break;
759 case Err_ReallocSizeZero:
760 if (xml) {
761 emit( " <kind>ReallocSizeZero</kind>\n" );
762 emit( " <what>realloc() with size 0</what>\n" );
763 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
764 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
765 &extra->Err.ReallocSizeZero.ai, False);
766 } else {
767 emit( "realloc() with size 0\n" );
768 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
769 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
770 &extra->Err.ReallocSizeZero.ai, False);
772 break;
774 case Err_BadAlign:
775 if (extra->Err.BadAlign.size) {
776 if (xml) {
777 emit( " <kind>InvalidSizeAndAlignment</kind>\n" );
778 emit( " <what>Invalid size value: %lu alignment value: %lu%s</what>\n",
779 extra->Err.BadAlign.size,
780 extra->Err.BadAlign.dealloc_align, extra->Err.BadAlign.msg );
781 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
782 } else {
783 emit( "Invalid size value: %lu alignment value: %lu%s\n",
784 extra->Err.BadAlign.size,
785 extra->Err.BadAlign.dealloc_align, extra->Err.BadAlign.msg );
786 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
788 } else {
789 if (xml) {
790 emit( " <kind>InvalidAlignment</kind>\n" );
791 emit( " <what>Invalid alignment value: %lu%s</what>\n",
792 extra->Err.BadAlign.dealloc_align, extra->Err.BadAlign.msg );
793 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
794 } else {
795 emit( "Invalid alignment value: %lu%s\n",
796 extra->Err.BadAlign.dealloc_align, extra->Err.BadAlign.msg );
797 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
800 break;
802 case Err_BadSize:
803 if (xml) {
804 emit( " <kind>InvalidSize</kind>\n" );
805 emit( " <what>%s invalid size value: %lu</what>\n",
806 extra->Err.BadSize.func, extra->Err.BadSize.size );
807 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
808 } else {
809 emit( "%s invalid size value: %lu\n",
810 extra->Err.BadSize.func, extra->Err.BadSize.size );
811 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
813 break;
815 case Err_SizeMismatch:
816 if (xml) {
817 emit( " <kind>MismatchedAllocateDeallocateSize</kind>\n" );
818 emit( " <what>Mismatched %s size value: %lu</what>\n",
819 extra->Err.SizeMismatch.function_names, extra->Err.SizeMismatch.size );
820 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
821 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
822 &extra->Err.SizeMismatch.ai, False);
823 } else {
824 emit( "Mismatched %s size value: %lu\n",
825 extra->Err.SizeMismatch.function_names, extra->Err.SizeMismatch.size );
826 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
827 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
828 &extra->Err.SizeMismatch.ai, False);
830 break;
832 case Err_AlignMismatch:
833 if (xml) {
834 emit( " <kind>MismatchedAllocateDeallocateAlignment</kind>\n" );
835 if (extra->Err.AlignMismatch.default_delete) {
836 emit( " <what>Mismatched %s size alloc value: %lu dealloc value: default-aligned</what>\n",
837 extra->Err.SizeMismatch.function_names, extra->Err.AlignMismatch.alloc_align );
838 } else {
839 emit( " <what>Mismatched %s size alloc value: %lu dealloc value: %lu</what>\n",
840 extra->Err.SizeMismatch.function_names, extra->Err.AlignMismatch.alloc_align, extra->Err.AlignMismatch.dealloc_align );
842 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
843 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
844 &extra->Err.AlignMismatch.ai, False);
845 } else {
846 if (extra->Err.AlignMismatch.default_delete) {
847 emit( "Mismatched %s alignment alloc value: %lu dealloc value: default-aligned\n",
848 extra->Err.AlignMismatch.function_names, extra->Err.AlignMismatch.alloc_align );
849 } else {
850 emit( "Mismatched %s alignment alloc value: %lu dealloc value: %lu\n",
851 extra->Err.AlignMismatch.function_names, extra->Err.AlignMismatch.alloc_align, extra->Err.AlignMismatch.dealloc_align );
853 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
854 VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
855 &extra->Err.AlignMismatch.ai, False);
857 break;
859 default:
860 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
861 VG_(get_error_kind)(err));
862 VG_(tool_panic)("unknown error code in mc_pp_Error)");
866 /*------------------------------------------------------------*/
867 /*--- Recording errors ---*/
868 /*------------------------------------------------------------*/
870 /* These many bytes below %ESP are considered addressible if we're
871 doing the --workaround-gcc296-bugs hack. */
872 #define VG_GCC296_BUG_STACK_SLOP 1024
874 /* Is this address within some small distance below %ESP? Used only
875 for the --workaround-gcc296-bugs kludge. */
876 static Bool is_just_below_ESP( Addr esp, Addr aa )
878 esp -= VG_STACK_REDZONE_SZB;
879 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
880 return True;
881 else
882 return False;
885 /* --- Called from generated and non-generated code --- */
887 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
888 Bool isWrite )
890 MC_Error extra;
891 Bool just_below_esp;
893 if (MC_(in_ignored_range)(a))
894 return;
896 if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
897 return;
899 Addr current_sp = VG_(get_SP)(tid);
900 just_below_esp = is_just_below_ESP( current_sp, a );
902 /* If this is caused by an access immediately below %ESP, and the
903 user asks nicely, we just ignore it. */
904 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
905 return;
907 /* Also, if this is caused by an access in the range of offsets
908 below the stack pointer as described by
909 --ignore-range-below-sp, ignore it. */
910 if (MC_(in_ignored_range_below_sp)( current_sp, a, szB ))
911 return;
913 extra.Err.Addr.isWrite = isWrite;
914 extra.Err.Addr.szB = szB;
915 extra.Err.Addr.maybe_gcc = just_below_esp;
916 extra.Err.Addr.ai.tag = Addr_Undescribed;
917 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
920 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
922 MC_Error extra;
923 tl_assert( MC_(clo_mc_level) >= 2 );
924 if (otag > 0)
925 tl_assert( MC_(clo_mc_level) == 3 );
926 extra.Err.Value.szB = szB;
927 extra.Err.Value.otag = otag;
928 extra.Err.Value.origin_ec = NULL; /* Filled in later */
929 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
932 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
934 MC_Error extra;
935 tl_assert( MC_(clo_mc_level) >= 2 );
936 if (otag > 0)
937 tl_assert( MC_(clo_mc_level) == 3 );
938 extra.Err.Cond.otag = otag;
939 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
940 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
943 /* --- Called from non-generated code --- */
945 /* This is for memory errors in signal-related memory. */
946 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg )
948 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
951 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag )
953 MC_Error extra;
954 tl_assert(VG_INVALID_THREADID != tid);
955 if (otag > 0)
956 tl_assert( MC_(clo_mc_level) == 3 );
957 extra.Err.RegParam.otag = otag;
958 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
959 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
962 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
963 Bool isAddrErr, const HChar* msg, UInt otag )
965 MC_Error extra;
966 tl_assert(VG_INVALID_THREADID != tid);
967 if (!isAddrErr)
968 tl_assert( MC_(clo_mc_level) >= 2 );
969 if (otag != 0) {
970 tl_assert( MC_(clo_mc_level) == 3 );
971 tl_assert( !isAddrErr );
973 extra.Err.MemParam.isAddrErr = isAddrErr;
974 extra.Err.MemParam.ai.tag = Addr_Undescribed;
975 extra.Err.MemParam.otag = otag;
976 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
977 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
980 void MC_(record_jump_error) ( ThreadId tid, Addr a )
982 MC_Error extra;
983 tl_assert(VG_INVALID_THREADID != tid);
984 extra.Err.Jump.ai.tag = Addr_Undescribed;
985 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
988 void MC_(record_free_error) ( ThreadId tid, Addr a )
990 MC_Error extra;
991 tl_assert(VG_INVALID_THREADID != tid);
992 extra.Err.Free.ai.tag = Addr_Undescribed;
993 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
996 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
998 MC_Error extra;
999 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
1000 tl_assert(VG_INVALID_THREADID != tid);
1001 ai->tag = Addr_Block;
1002 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
1003 ai->Addr.Block.block_desc = "block";
1004 ai->Addr.Block.block_szB = mc->szB;
1005 ai->Addr.Block.rwoffset = 0;
1006 ai->Addr.Block.allocated_at = MC_(allocated_at) (mc);
1007 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1008 ai->Addr.Block.freed_at = MC_(freed_at) (mc);
1009 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
1010 &extra );
1013 void MC_(record_realloc_size_zero) ( ThreadId tid, Addr a )
1015 MC_Error extra;
1016 tl_assert(VG_INVALID_THREADID != tid);
1017 extra.Err.ReallocSizeZero.ai.tag = Addr_Undescribed;
1018 VG_(maybe_record_error)( tid, Err_ReallocSizeZero, a, /*s*/NULL, &extra );
1021 void MC_(record_bad_alignment) ( ThreadId tid, SizeT align, SizeT size, const HChar *msg )
1023 MC_Error extra;
1024 tl_assert(VG_INVALID_THREADID != tid);
1025 extra.Err.BadAlign.dealloc_align = align;
1026 extra.Err.BadAlign.size= size;
1027 extra.Err.BadAlign.msg = msg;
1028 VG_(maybe_record_error)( tid, Err_BadAlign, /*addr*/0, /*s*/NULL, &extra );
1031 void MC_(record_bad_size) ( ThreadId tid, SizeT size, const HChar *function )
1033 MC_Error extra;
1034 tl_assert(VG_INVALID_THREADID != tid);
1035 extra.Err.BadSize.size= size;
1036 extra.Err.BadSize.func = function;
1037 VG_(maybe_record_error)( tid, Err_BadSize, /*addr*/0, /*s*/NULL, &extra );
1040 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
1042 MC_Error extra;
1043 tl_assert(VG_INVALID_THREADID != tid);
1044 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
1045 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
1048 void MC_(record_overlap_error) ( ThreadId tid, const HChar* function,
1049 Addr src, Addr dst, SizeT szB )
1051 MC_Error extra;
1052 tl_assert(VG_INVALID_THREADID != tid);
1053 extra.Err.Overlap.src = src;
1054 extra.Err.Overlap.dst = dst;
1055 extra.Err.Overlap.szB = szB;
1056 VG_(maybe_record_error)(
1057 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
1060 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
1061 UInt n_total_records, LossRecord* lr,
1062 Bool print_record, Bool count_error )
1064 MC_Error extra;
1065 extra.Err.Leak.n_this_record = n_this_record;
1066 extra.Err.Leak.n_total_records = n_total_records;
1067 extra.Err.Leak.lr = lr;
1068 return
1069 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
1070 lr->key.allocated_at, print_record,
1071 /*allow_GDB_attach*/False, count_error );
1074 Bool MC_(record_fishy_value_error) ( ThreadId tid, const HChar *function_name,
1075 const HChar *argument_name, SizeT value)
1077 MC_Error extra;
1079 tl_assert(VG_INVALID_THREADID != tid);
1081 if ((SSizeT)value >= 0) return False; // not a fishy value
1083 extra.Err.FishyValue.function_name = function_name;
1084 extra.Err.FishyValue.argument_name = argument_name;
1085 extra.Err.FishyValue.value = value;
1087 VG_(maybe_record_error)(
1088 tid, Err_FishyValue, /*addr*/0, /*s*/NULL, &extra );
1090 return True;
1093 void MC_(record_size_mismatch_error) ( ThreadId tid, MC_Chunk* mc, SizeT size, const HChar *function_names)
1095 MC_Error extra;
1096 AddrInfo* ai = &extra.Err.SizeMismatch.ai;
1097 tl_assert(VG_INVALID_THREADID != tid);
1098 ai->tag = Addr_Block;
1099 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
1100 ai->Addr.Block.block_desc = "block";
1101 ai->Addr.Block.block_szB = mc->szB;
1102 ai->Addr.Block.rwoffset = 0;
1103 ai->Addr.Block.allocated_at = MC_(allocated_at) (mc);
1104 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1105 ai->Addr.Block.freed_at = MC_(freed_at) (mc);
1106 extra.Err.SizeMismatch.size = size;
1107 extra.Err.SizeMismatch.function_names = function_names;
1108 VG_(maybe_record_error)( tid, Err_SizeMismatch, mc->data, /*s*/NULL,
1109 &extra );
1112 void MC_(record_align_mismatch_error) ( ThreadId tid, MC_Chunk* mc, SizeT align, Bool default_delete, const HChar *function_names )
1114 MC_Error extra;
1115 AddrInfo* ai = &extra.Err.AlignMismatch.ai;
1116 tl_assert(VG_INVALID_THREADID != tid);
1117 ai->tag = Addr_Block;
1118 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
1119 ai->Addr.Block.block_desc = "block";
1120 ai->Addr.Block.block_szB = mc->szB;
1121 ai->Addr.Block.rwoffset = 0;
1122 ai->Addr.Block.allocated_at = MC_(allocated_at) (mc);
1123 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1124 ai->Addr.Block.freed_at = MC_(freed_at) (mc);
1125 extra.Err.AlignMismatch.alloc_align = mc->alignB;
1126 extra.Err.AlignMismatch.dealloc_align = align;
1127 extra.Err.AlignMismatch.default_delete = default_delete;
1128 extra.Err.AlignMismatch.function_names = function_names;
1129 VG_(maybe_record_error)( tid, Err_AlignMismatch, mc->data, /*s*/NULL,
1130 &extra );
1133 void MC_(record_user_error) ( ThreadId tid, Addr a,
1134 Bool isAddrErr, UInt otag )
1136 MC_Error extra;
1137 if (otag != 0) {
1138 tl_assert(!isAddrErr);
1139 tl_assert( MC_(clo_mc_level) == 3 );
1141 if (!isAddrErr) {
1142 tl_assert( MC_(clo_mc_level) >= 2 );
1144 tl_assert(VG_INVALID_THREADID != tid);
1145 extra.Err.User.isAddrErr = isAddrErr;
1146 extra.Err.User.ai.tag = Addr_Undescribed;
1147 extra.Err.User.otag = otag;
1148 extra.Err.User.origin_ec = NULL; /* Filled in later */
1149 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
1152 Bool MC_(is_mempool_block)(MC_Chunk* mc_search)
1154 MC_Mempool* mp;
1156 if (!MC_(mempool_list))
1157 return False;
1159 // A chunk can only come from a mempool if a custom allocator
1160 // is used. No search required for other kinds.
1161 if (mc_search->allockind == MC_AllocCustom) {
1162 VG_(HT_ResetIter)( MC_(mempool_list) );
1163 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1164 MC_Chunk* mc;
1165 VG_(HT_ResetIter)(mp->chunks);
1166 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1167 if (mc == mc_search)
1168 return True;
1173 return False;
1176 /*------------------------------------------------------------*/
1177 /*--- Other error operations ---*/
1178 /*------------------------------------------------------------*/
1180 /* Compare error contexts, to detect duplicates. Note that if they
1181 are otherwise the same, the faulting addrs and associated rwoffsets
1182 are allowed to be different. */
1183 Bool MC_(eq_Error) ( VgRes res, const Error* e1, const Error* e2 )
1185 MC_Error* extra1 = VG_(get_error_extra)(e1);
1186 MC_Error* extra2 = VG_(get_error_extra)(e2);
1188 /* Guaranteed by calling function */
1189 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
1191 switch (VG_(get_error_kind)(e1)) {
1192 case Err_CoreMem: {
1193 const HChar *e1s, *e2s;
1194 e1s = VG_(get_error_string)(e1);
1195 e2s = VG_(get_error_string)(e2);
1196 if (e1s == e2s) return True;
1197 if (VG_STREQ(e1s, e2s)) return True;
1198 return False;
1201 case Err_RegParam:
1202 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1204 // Perhaps we should also check the addrinfo.akinds for equality.
1205 // That would result in more error reports, but only in cases where
1206 // a register contains uninitialised bytes and points to memory
1207 // containing uninitialised bytes. Currently, the 2nd of those to be
1208 // detected won't be reported. That is (nearly?) always the memory
1209 // error, which is good.
1210 case Err_MemParam:
1211 if (!VG_STREQ(VG_(get_error_string)(e1),
1212 VG_(get_error_string)(e2))) return False;
1213 // fall through
1214 case Err_User:
1215 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1216 ? True : False );
1218 case Err_Free:
1219 case Err_FreeMismatch:
1220 case Err_Jump:
1221 case Err_IllegalMempool:
1222 case Err_Overlap:
1223 case Err_Cond:
1224 case Err_ReallocSizeZero:
1225 return True;
1227 case Err_FishyValue:
1228 return VG_STREQ(extra1->Err.FishyValue.function_name,
1229 extra2->Err.FishyValue.function_name) &&
1230 VG_STREQ(extra1->Err.FishyValue.argument_name,
1231 extra2->Err.FishyValue.argument_name);
1233 case Err_Addr:
1234 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1235 ? True : False );
1237 case Err_Value:
1238 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1239 ? True : False );
1241 case Err_BadAlign:
1242 if (extra1->Err.BadAlign.size &&
1243 extra2->Err.BadAlign.size) {
1244 // cases where size should be non-zero or a multiple of alignment
1245 return extra1->Err.BadAlign.size ==
1246 extra2->Err.BadAlign.size
1248 extra1->Err.BadAlign.dealloc_align ==
1249 extra2->Err.BadAlign.dealloc_align;
1250 } else {
1251 // non multiple of 2 alignment
1252 return extra1->Err.BadAlign.dealloc_align ==
1253 extra2->Err.BadAlign.dealloc_align;
1256 case Err_BadSize:
1257 // sized delete mismatch
1258 return extra1->Err.BadSize.size ==
1259 extra2->Err.BadSize.size;
1261 case Err_SizeMismatch:
1262 return extra1->Err.SizeMismatch.size ==
1263 extra2->Err.SizeMismatch.size;
1265 case Err_AlignMismatch:
1266 // alignments both powers of 2 but different
1267 return extra1->Err.AlignMismatch.alloc_align ==
1268 extra2->Err.AlignMismatch.alloc_align
1270 extra1->Err.AlignMismatch.dealloc_align ==
1271 extra2->Err.AlignMismatch.dealloc_align
1273 extra1->Err.AlignMismatch.default_delete ==
1274 extra2->Err.AlignMismatch.default_delete;
1276 case Err_Leak:
1277 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1278 "since it's handled with VG_(unique_error)()!");
1280 default:
1281 VG_(printf)("Error:\n unknown error code %d\n",
1282 VG_(get_error_kind)(e1));
1283 VG_(tool_panic)("unknown error code in mc_eq_Error");
1287 /* Functions used when searching MC_Chunk lists */
1288 static
1289 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1291 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1292 MC_(Malloc_Redzone_SzB) );
1294 static
1295 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1297 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1298 rzB );
1301 // Forward declarations
1302 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1303 static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
1304 AddrInfo* ai );
1307 /* Describe an address as best you can, for error messages,
1308 putting the result in ai. */
1309 static void describe_addr ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
1311 MC_Chunk* mc;
1313 tl_assert(Addr_Undescribed == ai->tag);
1315 /* -- Perhaps it's a user-named block? -- */
1316 if (client_block_maybe_describe( a, ai )) {
1317 return;
1320 /* -- Perhaps it's in mempool block (non-meta)? -- */
1321 if (mempool_block_maybe_describe( a, /*is_metapool*/ False, ai)) {
1322 return;
1325 /* Blocks allocated by memcheck malloc functions are either
1326 on the recently freed list or on the malloc-ed list.
1327 Custom blocks can be on both : a recently freed block might
1328 have been just re-allocated.
1329 So, first search the malloc-ed block, as the most recent
1330 block is the probable cause of error.
1331 We however detect and report that this is a recently re-allocated
1332 block. */
1333 /* -- Search for a currently malloc'd block which might bracket it. -- */
1334 VG_(HT_ResetIter)(MC_(malloc_list));
1335 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1336 if (!MC_(is_mempool_block)(mc) &&
1337 addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1338 ai->tag = Addr_Block;
1339 ai->Addr.Block.block_kind = Block_Mallocd;
1340 if (MC_(get_freed_block_bracketting)( a ))
1341 ai->Addr.Block.block_desc = "recently re-allocated block";
1342 else
1343 ai->Addr.Block.block_desc = "block";
1344 ai->Addr.Block.block_szB = mc->szB;
1345 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1346 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1347 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1348 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1349 return;
1352 /* -- Search for a recently freed block which might bracket it. -- */
1353 mc = MC_(get_freed_block_bracketting)( a );
1354 if (mc) {
1355 ai->tag = Addr_Block;
1356 ai->Addr.Block.block_kind = Block_Freed;
1357 ai->Addr.Block.block_desc = "block";
1358 ai->Addr.Block.block_szB = mc->szB;
1359 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1360 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1361 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1362 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1363 return;
1366 /* -- Perhaps it's in a meta mempool block? -- */
1367 /* This test is done last, because metapool blocks overlap with blocks
1368 handed out to the application. That makes every heap address part of
1369 a metapool block, so the interesting cases are handled first.
1370 This final search is a last-ditch attempt. When found, it is probably
1371 an error in the custom allocator itself. */
1372 if (mempool_block_maybe_describe( a, /*is_metapool*/ True, ai )) {
1373 return;
1376 /* No block found. Search a non-heap block description. */
1377 VG_(describe_addr) (ep, a, ai);
1380 void MC_(pp_describe_addr) ( DiEpoch ep, Addr a )
1382 AddrInfo ai;
1384 ai.tag = Addr_Undescribed;
1385 describe_addr (ep, a, &ai);
1386 VG_(pp_addrinfo_mc) (a, &ai, /* maybe_gcc */ False);
1387 VG_(clear_addrinfo) (&ai);
1390 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1391 does not refer to a known origin. */
1392 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1393 UInt otag )
1395 UInt ecu = otag & ~3;
1396 *origin_ec = NULL;
1397 if (VG_(is_plausible_ECU)(ecu)) {
1398 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1402 /* Updates the copy with address info if necessary (but not for all errors). */
1403 UInt MC_(update_Error_extra)( const Error* err )
1405 MC_Error* extra = VG_(get_error_extra)(err);
1406 DiEpoch ep = VG_(get_ExeContext_epoch)(VG_(get_error_where)(err));
1408 switch (VG_(get_error_kind)(err)) {
1409 // These ones don't have addresses associated with them, and so don't
1410 // need any updating.
1411 case Err_CoreMem:
1412 //case Err_Value:
1413 //case Err_Cond:
1414 case Err_Overlap:
1415 case Err_FishyValue:
1416 // For Err_Leaks the returned size does not matter -- they are always
1417 // shown with VG_(unique_error)() so they 'extra' not copied. But
1418 // we make it consistent with the others.
1419 case Err_Leak:
1420 case Err_BadAlign:
1421 case Err_BadSize:
1422 case Err_SizeMismatch:
1423 case Err_AlignMismatch:
1424 return sizeof(MC_Error);
1426 // For value errors, get the ExeContext corresponding to the
1427 // origin tag. Note that it is a kludge to assume that
1428 // a length-1 trace indicates a stack origin. FIXME.
1429 case Err_Value:
1430 update_origin( &extra->Err.Value.origin_ec,
1431 extra->Err.Value.otag );
1432 return sizeof(MC_Error);
1433 case Err_Cond:
1434 update_origin( &extra->Err.Cond.origin_ec,
1435 extra->Err.Cond.otag );
1436 return sizeof(MC_Error);
1437 case Err_RegParam:
1438 update_origin( &extra->Err.RegParam.origin_ec,
1439 extra->Err.RegParam.otag );
1440 return sizeof(MC_Error);
1442 // These ones always involve a memory address.
1443 case Err_Addr:
1444 describe_addr ( ep, VG_(get_error_address)(err),
1445 &extra->Err.Addr.ai );
1446 return sizeof(MC_Error);
1447 case Err_MemParam:
1448 describe_addr ( ep, VG_(get_error_address)(err),
1449 &extra->Err.MemParam.ai );
1450 update_origin( &extra->Err.MemParam.origin_ec,
1451 extra->Err.MemParam.otag );
1452 return sizeof(MC_Error);
1453 case Err_Jump:
1454 describe_addr ( ep, VG_(get_error_address)(err),
1455 &extra->Err.Jump.ai );
1456 return sizeof(MC_Error);
1457 case Err_User:
1458 describe_addr ( ep, VG_(get_error_address)(err),
1459 &extra->Err.User.ai );
1460 update_origin( &extra->Err.User.origin_ec,
1461 extra->Err.User.otag );
1462 return sizeof(MC_Error);
1463 case Err_Free:
1464 describe_addr ( ep, VG_(get_error_address)(err),
1465 &extra->Err.Free.ai );
1466 return sizeof(MC_Error);
1467 case Err_IllegalMempool:
1468 describe_addr ( ep, VG_(get_error_address)(err),
1469 &extra->Err.IllegalMempool.ai );
1470 return sizeof(MC_Error);
1472 // Err_FreeMismatches have already had their address described; this is
1473 // possible because we have the MC_Chunk on hand when the error is
1474 // detected. However, the address may be part of a user block, and if so
1475 // we override the pre-determined description with a user block one.
1476 case Err_FreeMismatch: {
1477 tl_assert(extra && Block_Mallocd ==
1478 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1479 (void)client_block_maybe_describe( VG_(get_error_address)(err),
1480 &extra->Err.FreeMismatch.ai );
1481 return sizeof(MC_Error);
1483 case Err_ReallocSizeZero:
1484 describe_addr ( ep, VG_(get_error_address)(err),
1485 &extra->Err.ReallocSizeZero.ai );
1486 return sizeof(MC_Error);
1488 default: VG_(tool_panic)("mc_update_extra: bad errkind");
1493 static Bool client_block_maybe_describe( Addr a,
1494 /*OUT*/AddrInfo* ai )
1496 UWord i;
1497 CGenBlock* cgbs = NULL;
1498 UWord cgb_used = 0;
1500 MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1501 if (cgbs == NULL)
1502 tl_assert(cgb_used == 0);
1504 /* Perhaps it's a general block ? */
1505 for (i = 0; i < cgb_used; i++) {
1506 if (cgbs[i].start == 0 && cgbs[i].size == 0)
1507 continue;
1508 // Use zero as the redzone for client blocks.
1509 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1510 ai->tag = Addr_Block;
1511 ai->Addr.Block.block_kind = Block_UserG;
1512 ai->Addr.Block.block_desc = cgbs[i].desc;
1513 ai->Addr.Block.block_szB = cgbs[i].size;
1514 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1515 ai->Addr.Block.allocated_at = cgbs[i].where;
1516 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1517 ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
1518 return True;
1521 return False;
1525 static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
1526 /*OUT*/AddrInfo* ai )
1528 MC_Mempool* mp;
1529 tl_assert( MC_(mempool_list) );
1531 VG_(HT_ResetIter)( MC_(mempool_list) );
1532 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1533 if (mp->chunks != NULL && mp->metapool == is_metapool) {
1534 MC_Chunk* mc;
1535 VG_(HT_ResetIter)(mp->chunks);
1536 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1537 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1538 ai->tag = Addr_Block;
1539 ai->Addr.Block.block_kind = Block_MempoolChunk;
1540 ai->Addr.Block.block_desc = "block";
1541 ai->Addr.Block.block_szB = mc->szB;
1542 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1543 ai->Addr.Block.allocated_at = MC_(allocated_at)(mc);
1544 VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
1545 ai->Addr.Block.freed_at = MC_(freed_at)(mc);
1546 return True;
1551 return False;
1555 /*------------------------------------------------------------*/
1556 /*--- Suppressions ---*/
1557 /*------------------------------------------------------------*/
1559 typedef
1560 enum {
1561 ParamSupp, // Bad syscall params
1562 UserSupp, // Errors arising from client-request checks
1563 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
1565 // Undefined value errors of given size
1566 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp, Value32Supp,
1568 // Undefined value error in conditional.
1569 CondSupp,
1571 // Unaddressable read/write attempt at given size
1572 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp, Addr32Supp,
1574 JumpSupp, // Jump to unaddressable target
1575 FreeSupp, // Invalid or mismatching free
1576 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
1577 LeakSupp, // Something to be suppressed in a leak check.
1578 MempoolSupp, // Memory pool suppression.
1579 FishyValueSupp, // Fishy value suppression.
1580 ReallocSizeZeroSupp, // realloc size 0 suppression
1581 BadAlignSupp, // Alignment not 2
1582 BadSizeSupp, // aligned alloc with size 0
1583 SizeMismatch, // Sized deallocation did not match allocation size
1584 AlignMismatch, // Aligned deallocation did not match aligned allocation
1586 MC_SuppKind;
1588 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su )
1590 SuppKind skind;
1592 if (VG_STREQ(name, "Param")) skind = ParamSupp;
1593 else if (VG_STREQ(name, "User")) skind = UserSupp;
1594 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1595 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
1596 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
1597 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
1598 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
1599 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
1600 else if (VG_STREQ(name, "Addr32")) skind = Addr32Supp;
1601 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
1602 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
1603 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
1604 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1605 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1606 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
1607 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
1608 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1609 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1610 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1611 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1612 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1613 else if (VG_STREQ(name, "Value32")) skind = Value32Supp;
1614 else if (VG_STREQ(name, "FishyValue")) skind = FishyValueSupp;
1615 else if (VG_STREQ(name, "ReallocZero")) skind = ReallocSizeZeroSupp;
1616 else if (VG_STREQ(name, "BadAlign")) skind = BadAlignSupp;
1617 else if (VG_STREQ(name, "BadSize")) skind = BadSizeSupp;
1618 else if (VG_STREQ(name, "SizeMismatch")) skind = SizeMismatch;
1619 else if (VG_STREQ(name, "AlignMismatch")) skind = AlignMismatch;
1620 else
1621 return False;
1623 VG_(set_supp_kind)(su, skind);
1624 return True;
1627 typedef struct _MC_LeakSuppExtra MC_LeakSuppExtra;
1629 struct _MC_LeakSuppExtra {
1630 UInt match_leak_kinds;
1631 UInt leak_search_gen;
1633 /* Maintains nr of blocks and bytes suppressed with this suppression
1634 during the leak search identified by leak_search_gen.
1635 blocks_suppressed and bytes_suppressed are reset to 0 when
1636 used the first time during a leak search. */
1637 SizeT blocks_suppressed;
1638 SizeT bytes_suppressed;
1641 typedef struct {
1642 const HChar *function_name;
1643 const HChar *argument_name;
1644 } MC_FishyValueExtra;
1646 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** bufpp,
1647 SizeT* nBufp, Int* lineno, Supp *su )
1649 Bool eof;
1650 Int i;
1652 if (VG_(get_supp_kind)(su) == ParamSupp) {
1653 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1654 if (eof) return False;
1655 VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1656 if (VG_(strcmp) (*bufpp, "preadv(vector[...])") == 0
1657 || VG_(strcmp) (*bufpp, "pwritev(vector[...])") == 0) {
1658 /* Report the incompatible change introduced in 3.15
1659 when reading a unsupported 3.14 or before entry.
1660 See bug 417075. */
1661 VG_(umsg)("WARNING: %s is an obsolete suppression line "
1662 "not supported in valgrind 3.15 or later.\n"
1663 "You should replace [...] by a specific index"
1664 " such as [0] or [1] or [2] or similar\n\n", *bufpp);
1666 } else if (VG_(get_supp_kind)(su) == LeakSupp) {
1667 // We might have the optional match-leak-kinds line
1668 MC_LeakSuppExtra* lse;
1669 lse = VG_(malloc)("mc.resi.2", sizeof(MC_LeakSuppExtra));
1670 lse->match_leak_kinds = MC_(all_Reachedness)();
1671 lse->blocks_suppressed = 0;
1672 lse->bytes_suppressed = 0;
1673 lse->leak_search_gen = 0;
1674 VG_(set_supp_extra)(su, lse); // By default, all kinds will match.
1675 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1676 if (eof) return True; // old LeakSupp style, no match-leak-kinds line.
1677 if (0 == VG_(strncmp)(*bufpp, "match-leak-kinds:", 17)) {
1678 i = 17;
1679 while ((*bufpp)[i] && VG_(isspace)((*bufpp)[i]))
1680 i++;
1681 if (!VG_(parse_enum_set)(MC_(parse_leak_kinds_tokens),
1682 True/*allow_all*/,
1683 (*bufpp)+i, &lse->match_leak_kinds)) {
1684 return False;
1686 } else {
1687 return False; // unknown extra line.
1689 } else if (VG_(get_supp_kind)(su) == FishyValueSupp) {
1690 MC_FishyValueExtra *extra;
1691 HChar *p, *function_name, *argument_name = NULL;
1693 eof = VG_(get_line) ( fd, bufpp, nBufp, lineno );
1694 if (eof) return True;
1696 // The suppression string is: function_name(argument_name)
1697 function_name = VG_(strdup)("mv.resi.4", *bufpp);
1698 p = VG_(strchr)(function_name, '(');
1699 if (p != NULL) {
1700 *p++ = '\0';
1701 argument_name = p;
1702 p = VG_(strchr)(p, ')');
1703 if (p != NULL)
1704 *p = '\0';
1706 if (p == NULL) { // malformed suppression string
1707 VG_(free)(function_name);
1708 return False;
1711 extra = VG_(malloc)("mc.resi.3", sizeof *extra);
1712 extra->function_name = function_name;
1713 extra->argument_name = argument_name;
1715 VG_(set_supp_extra)(su, extra);
1717 return True;
1720 Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su )
1722 Int su_szB;
1723 MC_Error* extra = VG_(get_error_extra)(err);
1724 ErrorKind ekind = VG_(get_error_kind)(err);
1726 switch (VG_(get_supp_kind)(su)) {
1727 case ParamSupp:
1728 return ((ekind == Err_RegParam || ekind == Err_MemParam)
1729 && VG_STREQ(VG_(get_error_string)(err),
1730 VG_(get_supp_string)(su)));
1732 case UserSupp:
1733 return (ekind == Err_User);
1735 case CoreMemSupp:
1736 return (ekind == Err_CoreMem
1737 && VG_STREQ(VG_(get_error_string)(err),
1738 VG_(get_supp_string)(su)));
1740 case Value1Supp: su_szB = 1; goto value_case;
1741 case Value2Supp: su_szB = 2; goto value_case;
1742 case Value4Supp: su_szB = 4; goto value_case;
1743 case Value8Supp: su_szB = 8; goto value_case;
1744 case Value16Supp:su_szB =16; goto value_case;
1745 case Value32Supp:su_szB =32; goto value_case;
1746 value_case:
1747 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1749 case CondSupp:
1750 return (ekind == Err_Cond);
1752 case Addr1Supp: su_szB = 1; goto addr_case;
1753 case Addr2Supp: su_szB = 2; goto addr_case;
1754 case Addr4Supp: su_szB = 4; goto addr_case;
1755 case Addr8Supp: su_szB = 8; goto addr_case;
1756 case Addr16Supp:su_szB =16; goto addr_case;
1757 case Addr32Supp:su_szB =32; goto addr_case;
1758 addr_case:
1759 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1761 case JumpSupp:
1762 return (ekind == Err_Jump);
1764 case FreeSupp:
1765 return (ekind == Err_Free || ekind == Err_FreeMismatch);
1767 case OverlapSupp:
1768 return (ekind == Err_Overlap);
1770 case LeakSupp:
1771 if (ekind == Err_Leak) {
1772 MC_LeakSuppExtra* lse = (MC_LeakSuppExtra*) VG_(get_supp_extra)(su);
1773 if (lse->leak_search_gen != MC_(leak_search_gen)) {
1774 // First time we see this suppression during this leak search.
1775 // => reset the counters to 0.
1776 lse->blocks_suppressed = 0;
1777 lse->bytes_suppressed = 0;
1778 lse->leak_search_gen = MC_(leak_search_gen);
1780 return RiS(extra->Err.Leak.lr->key.state, lse->match_leak_kinds);
1781 } else
1782 return False;
1784 case MempoolSupp:
1785 return (ekind == Err_IllegalMempool);
1787 case FishyValueSupp: {
1788 MC_FishyValueExtra *supp_extra = VG_(get_supp_extra)(su);
1790 return (ekind == Err_FishyValue) &&
1791 VG_STREQ(extra->Err.FishyValue.function_name,
1792 supp_extra->function_name) &&
1793 VG_STREQ(extra->Err.FishyValue.argument_name,
1794 supp_extra->argument_name);
1797 case ReallocSizeZeroSupp:
1798 return (ekind == Err_ReallocSizeZero);
1800 case BadAlignSupp:
1801 return (ekind == Err_BadAlign);
1803 case BadSizeSupp:
1804 return (ekind == Err_BadSize);
1806 case SizeMismatch:
1807 return (ekind == Err_SizeMismatch);
1809 case AlignMismatch:
1810 return (ekind == Err_AlignMismatch);
1812 default:
1813 VG_(printf)("Error:\n"
1814 " unknown suppression type %d\n",
1815 VG_(get_supp_kind)(su));
1816 VG_(tool_panic)("unknown suppression type in "
1817 "MC_(error_matches_suppression)");
1821 const HChar* MC_(get_error_name) ( const Error* err )
1823 switch (VG_(get_error_kind)(err)) {
1824 case Err_RegParam: return "Param";
1825 case Err_MemParam: return "Param";
1826 case Err_User: return "User";
1827 case Err_FreeMismatch: return "Free";
1828 case Err_IllegalMempool: return "Mempool";
1829 case Err_Free: return "Free";
1830 case Err_Jump: return "Jump";
1831 case Err_CoreMem: return "CoreMem";
1832 case Err_Overlap: return "Overlap";
1833 case Err_Leak: return "Leak";
1834 case Err_Cond: return "Cond";
1835 case Err_FishyValue: return "FishyValue";
1836 case Err_ReallocSizeZero: return "ReallocZero";
1837 case Err_BadAlign: return "BadAlign";
1838 case Err_BadSize: return "BadSize";
1839 case Err_SizeMismatch: return "SizeMismatch";
1840 case Err_AlignMismatch: return "AlignMismatch";
1841 case Err_Addr: {
1842 MC_Error* extra = VG_(get_error_extra)(err);
1843 switch ( extra->Err.Addr.szB ) {
1844 case 1: return "Addr1";
1845 case 2: return "Addr2";
1846 case 4: return "Addr4";
1847 case 8: return "Addr8";
1848 case 16: return "Addr16";
1849 case 32: return "Addr32";
1850 default: VG_(tool_panic)("unexpected size for Addr");
1853 case Err_Value: {
1854 MC_Error* extra = VG_(get_error_extra)(err);
1855 switch ( extra->Err.Value.szB ) {
1856 case 1: return "Value1";
1857 case 2: return "Value2";
1858 case 4: return "Value4";
1859 case 8: return "Value8";
1860 case 16: return "Value16";
1861 case 32: return "Value32";
1862 default: VG_(tool_panic)("unexpected size for Value");
1865 default: VG_(tool_panic)("get_error_name: unexpected type");
1869 SizeT MC_(get_extra_suppression_info) ( const Error* err,
1870 /*OUT*/HChar* buf, Int nBuf )
1872 ErrorKind ekind = VG_(get_error_kind)(err);
1873 tl_assert(buf);
1874 tl_assert(nBuf >= 1);
1876 if (Err_RegParam == ekind || Err_MemParam == ekind) {
1877 const HChar* errstr = VG_(get_error_string)(err);
1878 tl_assert(errstr);
1879 return VG_(snprintf)(buf, nBuf, "%s", errstr);
1880 } else if (Err_Leak == ekind) {
1881 MC_Error* extra = VG_(get_error_extra)(err);
1882 return VG_(snprintf) (buf, nBuf, "match-leak-kinds: %s",
1883 pp_Reachedness_for_leak_kinds(extra->Err.Leak.lr->key.state));
1884 } else if (Err_FishyValue == ekind) {
1885 MC_Error* extra = VG_(get_error_extra)(err);
1886 return VG_(snprintf) (buf, nBuf, "%s(%s)",
1887 extra->Err.FishyValue.function_name,
1888 extra->Err.FishyValue.argument_name);
1889 } else {
1890 buf[0] = '\0';
1891 return 0;
1895 SizeT MC_(print_extra_suppression_use) ( const Supp *su,
1896 /*OUT*/HChar *buf, Int nBuf )
1898 tl_assert(nBuf >= 1);
1900 if (VG_(get_supp_kind)(su) == LeakSupp) {
1901 MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
1903 if (lse->leak_search_gen == MC_(leak_search_gen)
1904 && lse->blocks_suppressed > 0) {
1905 return VG_(snprintf) (buf, nBuf,
1906 "suppressed: %'lu bytes in %'lu blocks",
1907 lse->bytes_suppressed,
1908 lse->blocks_suppressed);
1912 buf[0] = '\0';
1913 return 0;
1916 void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su)
1918 if (VG_(get_supp_kind)(su) == LeakSupp) {
1919 MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
1920 MC_Error* extra = VG_(get_error_extra)(err);
1922 tl_assert (lse->leak_search_gen == MC_(leak_search_gen));
1923 lse->blocks_suppressed += extra->Err.Leak.lr->num_blocks;
1924 lse->bytes_suppressed
1925 += extra->Err.Leak.lr->szB + extra->Err.Leak.lr->indirect_szB;
1929 /*--------------------------------------------------------------------*/
1930 /*--- end mc_errors.c ---*/
1931 /*--------------------------------------------------------------------*/