tests/vg_regtest: Always evaluate prerequisite expressions with sh
[valgrind.git] / drd / drd_load_store.c
blob88e4d44e1f0ef277d4de7a610f42fb15cde8ccc6
1 /*
2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
21 The GNU General Public License is contained in the file COPYING.
25 #include "drd_bitmap.h"
26 #include "drd_thread_bitmap.h"
27 #include "drd_vc.h" /* DRD_(vc_snprint)() */
29 /* Include several source files here in order to allow the compiler to */
30 /* do more inlining. */
31 #include "drd_bitmap.c"
32 #include "drd_load_store.h"
33 #include "drd_segment.c"
34 #include "drd_thread.c"
35 #include "drd_vc.c"
36 #include "libvex_guest_offsets.h"
39 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */
40 #if defined(VGA_x86)
41 #define STACK_POINTER_OFFSET OFFSET_x86_ESP
42 #elif defined(VGA_amd64)
43 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP
44 #elif defined(VGA_ppc32)
45 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1
46 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
47 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1
48 #elif defined(VGA_arm)
49 #define STACK_POINTER_OFFSET OFFSET_arm_R13
50 #elif defined(VGA_arm64)
51 #define STACK_POINTER_OFFSET OFFSET_arm64_XSP
52 #elif defined(VGA_s390x)
53 #define STACK_POINTER_OFFSET OFFSET_s390x_r15
54 #elif defined(VGA_mips32)
55 #define STACK_POINTER_OFFSET OFFSET_mips32_r29
56 #elif defined(VGA_mips64)
57 #define STACK_POINTER_OFFSET OFFSET_mips64_r29
58 #elif defined(VGA_tilegx)
59 #define STACK_POINTER_OFFSET OFFSET_tilegx_r54
60 #else
61 #error Unknown architecture.
62 #endif
65 /* Local variables. */
67 static Bool s_check_stack_accesses = False;
68 static Bool s_first_race_only = False;
71 /* Function definitions. */
73 Bool DRD_(get_check_stack_accesses)()
75 return s_check_stack_accesses;
78 void DRD_(set_check_stack_accesses)(const Bool c)
80 tl_assert(c == False || c == True);
81 s_check_stack_accesses = c;
84 Bool DRD_(get_first_race_only)()
86 return s_first_race_only;
89 void DRD_(set_first_race_only)(const Bool fro)
91 tl_assert(fro == False || fro == True);
92 s_first_race_only = fro;
95 void DRD_(trace_mem_access)(const Addr addr, const SizeT size,
96 const BmAccessTypeT access_type,
97 const HWord stored_value_hi,
98 const HWord stored_value_lo)
100 if (DRD_(is_any_traced)(addr, addr + size))
102 HChar* vc;
104 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)()));
105 if (access_type == eStore && size <= sizeof(HWord)) {
106 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %ld/0x%lx (thread %d /"
107 " vc %s)", addr, size, stored_value_lo,
108 stored_value_lo, DRD_(thread_get_running_tid)(),
109 vc);
110 } else if (access_type == eStore && size > sizeof(HWord)) {
111 ULong sv;
113 tl_assert(sizeof(HWord) == 4);
114 sv = ((ULong)stored_value_hi << 32) | stored_value_lo;
115 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %lld/0x%llx (thread %d"
116 " / vc %s)", addr, size, sv, sv,
117 DRD_(thread_get_running_tid)(), vc);
118 } else {
119 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)",
120 access_type == eLoad ? "load "
121 : access_type == eStore ? "store"
122 : access_type == eStart ? "start"
123 : access_type == eEnd ? "end " : "????",
124 addr, size, DRD_(thread_get_running_tid)(), vc);
126 VG_(free)(vc);
127 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)())
128 == VG_(get_running_tid)());
132 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size)
134 return DRD_(trace_mem_access)(addr, size, eLoad, 0, 0);
137 static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size,
138 const HWord stored_value_hi,
139 const HWord stored_value_lo)
141 return DRD_(trace_mem_access)(addr, size, eStore, stored_value_hi,
142 stored_value_lo);
145 static void drd_report_race(const Addr addr, const SizeT size,
146 const BmAccessTypeT access_type)
148 ThreadId vg_tid;
150 vg_tid = VG_(get_running_tid)();
151 if (!DRD_(get_check_stack_accesses)()
152 && DRD_(thread_address_on_any_stack)(addr)) {
153 #if 0
154 GenericErrInfo GEI = {
155 .tid = DRD_(thread_get_running_tid)(),
156 .addr = addr,
158 VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid),
159 "--check-stack-var=no skips checking stack"
160 " variables shared over threads",
161 &GEI);
162 #endif
163 } else {
164 DataRaceErrInfo drei = {
165 .tid = DRD_(thread_get_running_tid)(),
166 .addr = addr,
167 .size = size,
168 .access_type = access_type,
170 VG_(maybe_record_error)(vg_tid, DataRaceErr, VG_(get_IP)(vg_tid),
171 "Conflicting access", &drei);
173 if (s_first_race_only)
174 DRD_(start_suppression)(addr, addr + size, "first race only");
178 VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size)
180 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
181 /* The assert below has been commented out because of performance reasons.*/
182 tl_assert(DRD_(thread_get_running_tid)()
183 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid())));
184 #endif
186 if (DRD_(running_thread_is_recording_loads)()
187 && (s_check_stack_accesses
188 || ! DRD_(thread_address_on_stack)(addr))
189 && bm_access_load_triggers_conflict(addr, addr + size)
190 && ! DRD_(is_suppressed)(addr, addr + size))
192 drd_report_race(addr, size, eLoad);
196 static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
198 if (DRD_(running_thread_is_recording_loads)()
199 && (s_check_stack_accesses
200 || ! DRD_(thread_address_on_stack)(addr))
201 && bm_access_load_1_triggers_conflict(addr)
202 && ! DRD_(is_suppressed)(addr, addr + 1))
204 drd_report_race(addr, 1, eLoad);
208 static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
210 if (DRD_(running_thread_is_recording_loads)()
211 && (s_check_stack_accesses
212 || ! DRD_(thread_address_on_stack)(addr))
213 && bm_access_load_2_triggers_conflict(addr)
214 && ! DRD_(is_suppressed)(addr, addr + 2))
216 drd_report_race(addr, 2, eLoad);
220 static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
222 if (DRD_(running_thread_is_recording_loads)()
223 && (s_check_stack_accesses
224 || ! DRD_(thread_address_on_stack)(addr))
225 && bm_access_load_4_triggers_conflict(addr)
226 && ! DRD_(is_suppressed)(addr, addr + 4))
228 drd_report_race(addr, 4, eLoad);
232 static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
234 if (DRD_(running_thread_is_recording_loads)()
235 && (s_check_stack_accesses
236 || ! DRD_(thread_address_on_stack)(addr))
237 && bm_access_load_8_triggers_conflict(addr)
238 && ! DRD_(is_suppressed)(addr, addr + 8))
240 drd_report_race(addr, 8, eLoad);
244 VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size)
246 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
247 /* The assert below has been commented out because of performance reasons.*/
248 tl_assert(DRD_(thread_get_running_tid)()
249 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid())));
250 #endif
252 if (DRD_(running_thread_is_recording_stores)()
253 && (s_check_stack_accesses
254 || ! DRD_(thread_address_on_stack)(addr))
255 && bm_access_store_triggers_conflict(addr, addr + size)
256 && ! DRD_(is_suppressed)(addr, addr + size))
258 drd_report_race(addr, size, eStore);
262 static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
264 if (DRD_(running_thread_is_recording_stores)()
265 && (s_check_stack_accesses
266 || ! DRD_(thread_address_on_stack)(addr))
267 && bm_access_store_1_triggers_conflict(addr)
268 && ! DRD_(is_suppressed)(addr, addr + 1))
270 drd_report_race(addr, 1, eStore);
274 static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
276 if (DRD_(running_thread_is_recording_stores)()
277 && (s_check_stack_accesses
278 || ! DRD_(thread_address_on_stack)(addr))
279 && bm_access_store_2_triggers_conflict(addr)
280 && ! DRD_(is_suppressed)(addr, addr + 2))
282 drd_report_race(addr, 2, eStore);
286 static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
288 if (DRD_(running_thread_is_recording_stores)()
289 && (s_check_stack_accesses
290 || !DRD_(thread_address_on_stack)(addr))
291 && bm_access_store_4_triggers_conflict(addr)
292 && !DRD_(is_suppressed)(addr, addr + 4))
294 drd_report_race(addr, 4, eStore);
298 static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
300 if (DRD_(running_thread_is_recording_stores)()
301 && (s_check_stack_accesses
302 || ! DRD_(thread_address_on_stack)(addr))
303 && bm_access_store_8_triggers_conflict(addr)
304 && ! DRD_(is_suppressed)(addr, addr + 8))
306 drd_report_race(addr, 8, eStore);
311 * Return true if and only if addr_expr matches the pattern (SP) or
312 * <offset>(SP).
314 static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr)
316 Bool result = False;
318 if (addr_expr->tag == Iex_RdTmp)
320 int i;
321 for (i = 0; i < bb->stmts_used; i++)
323 if (bb->stmts[i]
324 && bb->stmts[i]->tag == Ist_WrTmp
325 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp)
327 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data;
328 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET)
330 result = True;
333 //ppIRExpr(e);
334 //VG_(printf)(" (%s)\n", result ? "True" : "False");
335 break;
339 return result;
342 static const IROp u_widen_irop[5][9] = {
343 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 },
344 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 },
345 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 },
346 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 },
350 * Instrument the client code to trace a memory load (--trace-addr).
352 static IRExpr* instr_trace_mem_load(IRSB* const bb, IRExpr* addr_expr,
353 const HWord size,
354 IRExpr* const guard/* NULL => True */)
356 IRTemp tmp;
358 tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr));
359 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr));
360 addr_expr = IRExpr_RdTmp(tmp);
361 IRDirty* di
362 = unsafeIRDirty_0_N(/*regparms*/2,
363 "drd_trace_mem_load",
364 VG_(fnptr_to_fnentry)
365 (drd_trace_mem_load),
366 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size)));
367 if (guard) di->guard = guard;
368 addStmtToIRSB(bb, IRStmt_Dirty(di));
370 return addr_expr;
374 * Instrument the client code to trace a memory store (--trace-addr).
376 static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr,
377 IRExpr* data_expr_hi, IRExpr* data_expr_lo,
378 IRExpr* const guard/* NULL => True */)
380 IRType ty_data_expr;
381 HWord size;
383 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8);
384 tl_assert(!data_expr_hi || typeOfIRExpr(bb->tyenv, data_expr_hi) == Ity_I32);
386 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr_lo);
387 size = sizeofIRType(ty_data_expr);
389 #if 0
390 // Test code
391 if (ty_data_expr == Ity_I32) {
392 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32);
393 data_expr_lo = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr_lo);
394 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo));
395 data_expr_lo = IRExpr_RdTmp(tmp);
396 ty_data_expr = Ity_F32;
397 } else if (ty_data_expr == Ity_I64) {
398 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64);
399 data_expr_lo = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr_lo);
400 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo));
401 data_expr_lo = IRExpr_RdTmp(tmp);
402 ty_data_expr = Ity_F64;
404 #endif
406 if (ty_data_expr == Ity_F32) {
407 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32);
408 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32,
409 data_expr_lo)));
410 data_expr_lo = IRExpr_RdTmp(tmp);
411 ty_data_expr = Ity_I32;
412 } else if (ty_data_expr == Ity_F64) {
413 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64);
414 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64,
415 data_expr_lo)));
416 data_expr_lo = IRExpr_RdTmp(tmp);
417 ty_data_expr = Ity_I64;
420 if (size == sizeof(HWord)
421 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64))
423 /* No conversion necessary */
424 } else {
425 IROp widen_op;
427 if (Ity_I1 <= ty_data_expr
428 && ty_data_expr
429 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0]))
431 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)];
432 if (!widen_op)
433 widen_op = Iop_INVALID;
434 } else {
435 widen_op = Iop_INVALID;
437 if (widen_op != Iop_INVALID) {
438 IRTemp tmp;
440 /* Widen the integer expression to a HWord */
441 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64);
442 addStmtToIRSB(bb,
443 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr_lo)));
444 data_expr_lo = IRExpr_RdTmp(tmp);
445 } else if (size > sizeof(HWord) && !data_expr_hi
446 && ty_data_expr == Ity_I64) {
447 IRTemp tmp;
449 tl_assert(sizeof(HWord) == 4);
450 tl_assert(size == 8);
451 tmp = newIRTemp(bb->tyenv, Ity_I32);
452 addStmtToIRSB(bb,
453 IRStmt_WrTmp(tmp,
454 IRExpr_Unop(Iop_64HIto32, data_expr_lo)));
455 data_expr_hi = IRExpr_RdTmp(tmp);
456 tmp = newIRTemp(bb->tyenv, Ity_I32);
457 addStmtToIRSB(bb, IRStmt_WrTmp(tmp,
458 IRExpr_Unop(Iop_64to32, data_expr_lo)));
459 data_expr_lo = IRExpr_RdTmp(tmp);
460 } else {
461 data_expr_lo = mkIRExpr_HWord(0);
464 IRDirty* di
465 = unsafeIRDirty_0_N(/*regparms*/3,
466 "drd_trace_mem_store",
467 VG_(fnptr_to_fnentry)(drd_trace_mem_store),
468 mkIRExprVec_4(addr_expr, mkIRExpr_HWord(size),
469 data_expr_hi ? data_expr_hi
470 : mkIRExpr_HWord(0), data_expr_lo));
471 if (guard) di->guard = guard;
472 addStmtToIRSB(bb, IRStmt_Dirty(di) );
475 static void instrument_load(IRSB* const bb, IRExpr* const addr_expr,
476 const HWord size,
477 IRExpr* const guard/* NULL => True */)
479 IRExpr* size_expr;
480 IRExpr** argv;
481 IRDirty* di;
483 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr))
484 return;
486 switch (size)
488 case 1:
489 argv = mkIRExprVec_1(addr_expr);
490 di = unsafeIRDirty_0_N(/*regparms*/1,
491 "drd_trace_load_1",
492 VG_(fnptr_to_fnentry)(drd_trace_load_1),
493 argv);
494 break;
495 case 2:
496 argv = mkIRExprVec_1(addr_expr);
497 di = unsafeIRDirty_0_N(/*regparms*/1,
498 "drd_trace_load_2",
499 VG_(fnptr_to_fnentry)(drd_trace_load_2),
500 argv);
501 break;
502 case 4:
503 argv = mkIRExprVec_1(addr_expr);
504 di = unsafeIRDirty_0_N(/*regparms*/1,
505 "drd_trace_load_4",
506 VG_(fnptr_to_fnentry)(drd_trace_load_4),
507 argv);
508 break;
509 case 8:
510 argv = mkIRExprVec_1(addr_expr);
511 di = unsafeIRDirty_0_N(/*regparms*/1,
512 "drd_trace_load_8",
513 VG_(fnptr_to_fnentry)(drd_trace_load_8),
514 argv);
515 break;
516 default:
517 size_expr = mkIRExpr_HWord(size);
518 argv = mkIRExprVec_2(addr_expr, size_expr);
519 di = unsafeIRDirty_0_N(/*regparms*/2,
520 "drd_trace_load",
521 VG_(fnptr_to_fnentry)(DRD_(trace_load)),
522 argv);
523 break;
525 if (guard) di->guard = guard;
526 addStmtToIRSB(bb, IRStmt_Dirty(di));
529 static void instrument_store(IRSB* const bb, IRExpr* addr_expr,
530 IRExpr* const data_expr,
531 IRExpr* const guard_expr/* NULL => True */)
533 IRExpr* size_expr;
534 IRExpr** argv;
535 IRDirty* di;
536 HWord size;
538 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr));
540 if (UNLIKELY(DRD_(any_address_is_traced)())) {
541 IRTemp tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr));
542 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr));
543 addr_expr = IRExpr_RdTmp(tmp);
544 instr_trace_mem_store(bb, addr_expr, NULL, data_expr, guard_expr);
547 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr))
548 return;
550 switch (size)
552 case 1:
553 argv = mkIRExprVec_1(addr_expr);
554 di = unsafeIRDirty_0_N(/*regparms*/1,
555 "drd_trace_store_1",
556 VG_(fnptr_to_fnentry)(drd_trace_store_1),
557 argv);
558 break;
559 case 2:
560 argv = mkIRExprVec_1(addr_expr);
561 di = unsafeIRDirty_0_N(/*regparms*/1,
562 "drd_trace_store_2",
563 VG_(fnptr_to_fnentry)(drd_trace_store_2),
564 argv);
565 break;
566 case 4:
567 argv = mkIRExprVec_1(addr_expr);
568 di = unsafeIRDirty_0_N(/*regparms*/1,
569 "drd_trace_store_4",
570 VG_(fnptr_to_fnentry)(drd_trace_store_4),
571 argv);
572 break;
573 case 8:
574 argv = mkIRExprVec_1(addr_expr);
575 di = unsafeIRDirty_0_N(/*regparms*/1,
576 "drd_trace_store_8",
577 VG_(fnptr_to_fnentry)(drd_trace_store_8),
578 argv);
579 break;
580 default:
581 size_expr = mkIRExpr_HWord(size);
582 argv = mkIRExprVec_2(addr_expr, size_expr);
583 di = unsafeIRDirty_0_N(/*regparms*/2,
584 "drd_trace_store",
585 VG_(fnptr_to_fnentry)(DRD_(trace_store)),
586 argv);
587 break;
589 if (guard_expr) di->guard = guard_expr;
590 addStmtToIRSB(bb, IRStmt_Dirty(di));
593 IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
594 IRSB* const bb_in,
595 const VexGuestLayout* const layout,
596 const VexGuestExtents* const vge,
597 const VexArchInfo* archinfo_host,
598 IRType const gWordTy,
599 IRType const hWordTy)
601 IRDirty* di;
602 Int i;
603 IRSB* bb;
604 IRExpr** argv;
605 Bool instrument = True;
607 /* Set up BB */
608 bb = emptyIRSB();
609 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv);
610 bb->next = deepCopyIRExpr(bb_in->next);
611 bb->jumpkind = bb_in->jumpkind;
612 bb->offsIP = bb_in->offsIP;
614 for (i = 0; i < bb_in->stmts_used; i++)
616 IRStmt* const st = bb_in->stmts[i];
617 tl_assert(st);
618 tl_assert(isFlatIRStmt(st));
620 switch (st->tag)
622 /* Note: the code for not instrumenting the code in .plt */
623 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */
624 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */
625 /* This is because on this platform dynamic library symbols are */
626 /* relocated in another way than by later binutils versions. The */
627 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
628 case Ist_IMark:
629 instrument = VG_(DebugInfo_sect_kind)(NULL, st->Ist.IMark.addr)
630 != Vg_SectPLT;
631 addStmtToIRSB(bb, st);
632 break;
634 case Ist_MBE:
635 switch (st->Ist.MBE.event)
637 case Imbe_Fence:
638 break; /* not interesting to DRD */
639 case Imbe_CancelReservation:
640 break; /* not interesting to DRD */
641 default:
642 tl_assert(0);
644 addStmtToIRSB(bb, st);
645 break;
647 case Ist_Store:
648 if (instrument)
649 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data,
650 NULL/* no guard */);
651 addStmtToIRSB(bb, st);
652 break;
654 case Ist_StoreG: {
655 IRStoreG* sg = st->Ist.StoreG.details;
656 IRExpr* data = sg->data;
657 IRExpr* addr = sg->addr;
658 if (instrument)
659 instrument_store(bb, addr, data, sg->guard);
660 addStmtToIRSB(bb, st);
661 break;
664 case Ist_LoadG: {
665 IRLoadG* lg = st->Ist.LoadG.details;
666 IRType type = Ity_INVALID; /* loaded type */
667 IRType typeWide = Ity_INVALID; /* after implicit widening */
668 IRExpr* addr_expr = lg->addr;
669 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
670 tl_assert(type != Ity_INVALID);
671 if (UNLIKELY(DRD_(any_address_is_traced)())) {
672 addr_expr = instr_trace_mem_load(bb, addr_expr,
673 sizeofIRType(type), lg->guard);
675 instrument_load(bb, lg->addr,
676 sizeofIRType(type), lg->guard);
677 addStmtToIRSB(bb, st);
678 break;
681 case Ist_WrTmp:
682 if (instrument) {
683 const IRExpr* const data = st->Ist.WrTmp.data;
684 IRExpr* addr_expr = data->Iex.Load.addr;
685 if (data->tag == Iex_Load) {
686 if (UNLIKELY(DRD_(any_address_is_traced)())) {
687 addr_expr = instr_trace_mem_load(bb, addr_expr,
688 sizeofIRType(data->Iex.Load.ty),
689 NULL/* no guard */);
691 instrument_load(bb, addr_expr, sizeofIRType(data->Iex.Load.ty),
692 NULL/* no guard */);
695 addStmtToIRSB(bb, st);
696 break;
698 case Ist_Dirty:
699 if (instrument) {
700 IRDirty* d = st->Ist.Dirty.details;
701 IREffect const mFx = d->mFx;
702 switch (mFx) {
703 case Ifx_None:
704 break;
705 case Ifx_Read:
706 case Ifx_Write:
707 case Ifx_Modify:
708 tl_assert(d->mAddr);
709 tl_assert(d->mSize > 0);
710 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
711 if (mFx == Ifx_Read || mFx == Ifx_Modify) {
712 di = unsafeIRDirty_0_N(
713 /*regparms*/2,
714 "drd_trace_load",
715 VG_(fnptr_to_fnentry)(DRD_(trace_load)),
716 argv);
717 addStmtToIRSB(bb, IRStmt_Dirty(di));
719 if (mFx == Ifx_Write || mFx == Ifx_Modify)
721 di = unsafeIRDirty_0_N(
722 /*regparms*/2,
723 "drd_trace_store",
724 VG_(fnptr_to_fnentry)(DRD_(trace_store)),
725 argv);
726 addStmtToIRSB(bb, IRStmt_Dirty(di));
728 break;
729 default:
730 tl_assert(0);
733 addStmtToIRSB(bb, st);
734 break;
736 case Ist_CAS:
737 if (instrument) {
739 * Treat compare-and-swap as a read. By handling atomic
740 * instructions as read instructions no data races are reported
741 * between conflicting atomic operations nor between atomic
742 * operations and non-atomic reads. Conflicts between atomic
743 * operations and non-atomic write operations are still reported
744 * however.
746 Int dataSize;
747 IRCAS* cas = st->Ist.CAS.details;
749 tl_assert(cas->addr != NULL);
750 tl_assert(cas->dataLo != NULL);
751 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo));
752 if (cas->dataHi != NULL)
753 dataSize *= 2; /* since it's a doubleword-CAS */
755 if (UNLIKELY(DRD_(any_address_is_traced)()))
756 instr_trace_mem_store(bb, cas->addr, cas->dataHi, cas->dataLo,
757 NULL/* no guard */);
759 instrument_load(bb, cas->addr, dataSize, NULL/*no guard*/);
761 addStmtToIRSB(bb, st);
762 break;
764 case Ist_LLSC: {
766 * Ignore store-conditionals (except for tracing), and handle
767 * load-linked's exactly like normal loads.
769 IRType dataTy;
771 if (st->Ist.LLSC.storedata == NULL) {
772 /* LL */
773 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result);
774 if (instrument) {
775 IRExpr* addr_expr = st->Ist.LLSC.addr;
776 if (UNLIKELY(DRD_(any_address_is_traced)()))
777 addr_expr = instr_trace_mem_load(bb, addr_expr,
778 sizeofIRType(dataTy),
779 NULL /* no guard */);
781 instrument_load(bb, addr_expr, sizeofIRType(dataTy),
782 NULL/*no guard*/);
784 } else {
785 /* SC */
786 instr_trace_mem_store(bb, st->Ist.LLSC.addr, NULL,
787 st->Ist.LLSC.storedata,
788 NULL/* no guard */);
790 addStmtToIRSB(bb, st);
791 break;
794 case Ist_NoOp:
795 case Ist_AbiHint:
796 case Ist_Put:
797 case Ist_PutI:
798 case Ist_Exit:
799 /* None of these can contain any memory references. */
800 addStmtToIRSB(bb, st);
801 break;
803 default:
804 ppIRStmt(st);
805 tl_assert(0);
809 return bb;