2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 The GNU General Public License is contained in the file COPYING.
25 #include "drd_bitmap.h"
26 #include "drd_thread_bitmap.h"
27 #include "drd_vc.h" /* DRD_(vc_snprint)() */
29 /* Include several source files here in order to allow the compiler to */
30 /* do more inlining. */
31 #include "drd_bitmap.c"
32 #include "drd_load_store.h"
33 #include "drd_segment.c"
34 #include "drd_thread.c"
36 #include "libvex_guest_offsets.h"
39 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */
41 #define STACK_POINTER_OFFSET OFFSET_x86_ESP
42 #elif defined(VGA_amd64)
43 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP
44 #elif defined(VGA_ppc32)
45 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1
46 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
47 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1
48 #elif defined(VGA_arm)
49 #define STACK_POINTER_OFFSET OFFSET_arm_R13
50 #elif defined(VGA_arm64)
51 #define STACK_POINTER_OFFSET OFFSET_arm64_XSP
52 #elif defined(VGA_s390x)
53 #define STACK_POINTER_OFFSET OFFSET_s390x_r15
54 #elif defined(VGA_mips32)
55 #define STACK_POINTER_OFFSET OFFSET_mips32_r29
56 #elif defined(VGA_mips64)
57 #define STACK_POINTER_OFFSET OFFSET_mips64_r29
58 #elif defined(VGA_tilegx)
59 #define STACK_POINTER_OFFSET OFFSET_tilegx_r54
61 #error Unknown architecture.
65 /* Local variables. */
67 static Bool s_check_stack_accesses
= False
;
68 static Bool s_first_race_only
= False
;
71 /* Function definitions. */
73 Bool
DRD_(get_check_stack_accesses
)()
75 return s_check_stack_accesses
;
78 void DRD_(set_check_stack_accesses
)(const Bool c
)
80 tl_assert(c
== False
|| c
== True
);
81 s_check_stack_accesses
= c
;
84 Bool
DRD_(get_first_race_only
)()
86 return s_first_race_only
;
89 void DRD_(set_first_race_only
)(const Bool fro
)
91 tl_assert(fro
== False
|| fro
== True
);
92 s_first_race_only
= fro
;
95 void DRD_(trace_mem_access
)(const Addr addr
, const SizeT size
,
96 const BmAccessTypeT access_type
,
97 const HWord stored_value_hi
,
98 const HWord stored_value_lo
)
100 if (DRD_(is_any_traced
)(addr
, addr
+ size
))
104 vc
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(DRD_(thread_get_running_tid
)()));
105 if (access_type
== eStore
&& size
<= sizeof(HWord
)) {
106 DRD_(trace_msg_w_bt
)("store 0x%lx size %ld val %ld/0x%lx (thread %d /"
107 " vc %s)", addr
, size
, stored_value_lo
,
108 stored_value_lo
, DRD_(thread_get_running_tid
)(),
110 } else if (access_type
== eStore
&& size
> sizeof(HWord
)) {
113 tl_assert(sizeof(HWord
) == 4);
114 sv
= ((ULong
)stored_value_hi
<< 32) | stored_value_lo
;
115 DRD_(trace_msg_w_bt
)("store 0x%lx size %ld val %lld/0x%llx (thread %d"
116 " / vc %s)", addr
, size
, sv
, sv
,
117 DRD_(thread_get_running_tid
)(), vc
);
119 DRD_(trace_msg_w_bt
)("%s 0x%lx size %ld (thread %d / vc %s)",
120 access_type
== eLoad
? "load "
121 : access_type
== eStore
? "store"
122 : access_type
== eStart
? "start"
123 : access_type
== eEnd
? "end " : "????",
124 addr
, size
, DRD_(thread_get_running_tid
)(), vc
);
127 tl_assert(DRD_(DrdThreadIdToVgThreadId
)(DRD_(thread_get_running_tid
)())
128 == VG_(get_running_tid
)());
132 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr
, const SizeT size
)
134 return DRD_(trace_mem_access
)(addr
, size
, eLoad
, 0, 0);
137 static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr
,const SizeT size
,
138 const HWord stored_value_hi
,
139 const HWord stored_value_lo
)
141 return DRD_(trace_mem_access
)(addr
, size
, eStore
, stored_value_hi
,
145 static void drd_report_race(const Addr addr
, const SizeT size
,
146 const BmAccessTypeT access_type
)
150 vg_tid
= VG_(get_running_tid
)();
151 if (!DRD_(get_check_stack_accesses
)()
152 && DRD_(thread_address_on_any_stack
)(addr
)) {
154 GenericErrInfo GEI
= {
155 .tid
= DRD_(thread_get_running_tid
)(),
158 VG_(maybe_record_error
)(vg_tid
, GenericErr
, VG_(get_IP
)(vg_tid
),
159 "--check-stack-var=no skips checking stack"
160 " variables shared over threads",
164 DataRaceErrInfo drei
= {
165 .tid
= DRD_(thread_get_running_tid
)(),
168 .access_type
= access_type
,
170 VG_(maybe_record_error
)(vg_tid
, DataRaceErr
, VG_(get_IP
)(vg_tid
),
171 "Conflicting access", &drei
);
173 if (s_first_race_only
)
174 DRD_(start_suppression
)(addr
, addr
+ size
, "first race only");
178 VG_REGPARM(2) void DRD_(trace_load
)(Addr addr
, SizeT size
)
180 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
181 /* The assert below has been commented out because of performance reasons.*/
182 tl_assert(DRD_(thread_get_running_tid
)()
183 == DRD_(VgThreadIdToDrdThreadId
)(VG_(get_running_tid())));
186 if (DRD_(running_thread_is_recording_loads
)()
187 && (s_check_stack_accesses
188 || ! DRD_(thread_address_on_stack
)(addr
))
189 && bm_access_load_triggers_conflict(addr
, addr
+ size
)
190 && ! DRD_(is_suppressed
)(addr
, addr
+ size
))
192 drd_report_race(addr
, size
, eLoad
);
196 static VG_REGPARM(1) void drd_trace_load_1(Addr addr
)
198 if (DRD_(running_thread_is_recording_loads
)()
199 && (s_check_stack_accesses
200 || ! DRD_(thread_address_on_stack
)(addr
))
201 && bm_access_load_1_triggers_conflict(addr
)
202 && ! DRD_(is_suppressed
)(addr
, addr
+ 1))
204 drd_report_race(addr
, 1, eLoad
);
208 static VG_REGPARM(1) void drd_trace_load_2(Addr addr
)
210 if (DRD_(running_thread_is_recording_loads
)()
211 && (s_check_stack_accesses
212 || ! DRD_(thread_address_on_stack
)(addr
))
213 && bm_access_load_2_triggers_conflict(addr
)
214 && ! DRD_(is_suppressed
)(addr
, addr
+ 2))
216 drd_report_race(addr
, 2, eLoad
);
220 static VG_REGPARM(1) void drd_trace_load_4(Addr addr
)
222 if (DRD_(running_thread_is_recording_loads
)()
223 && (s_check_stack_accesses
224 || ! DRD_(thread_address_on_stack
)(addr
))
225 && bm_access_load_4_triggers_conflict(addr
)
226 && ! DRD_(is_suppressed
)(addr
, addr
+ 4))
228 drd_report_race(addr
, 4, eLoad
);
232 static VG_REGPARM(1) void drd_trace_load_8(Addr addr
)
234 if (DRD_(running_thread_is_recording_loads
)()
235 && (s_check_stack_accesses
236 || ! DRD_(thread_address_on_stack
)(addr
))
237 && bm_access_load_8_triggers_conflict(addr
)
238 && ! DRD_(is_suppressed
)(addr
, addr
+ 8))
240 drd_report_race(addr
, 8, eLoad
);
244 VG_REGPARM(2) void DRD_(trace_store
)(Addr addr
, SizeT size
)
246 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
247 /* The assert below has been commented out because of performance reasons.*/
248 tl_assert(DRD_(thread_get_running_tid
)()
249 == DRD_(VgThreadIdToDrdThreadId
)(VG_(get_running_tid())));
252 if (DRD_(running_thread_is_recording_stores
)()
253 && (s_check_stack_accesses
254 || ! DRD_(thread_address_on_stack
)(addr
))
255 && bm_access_store_triggers_conflict(addr
, addr
+ size
)
256 && ! DRD_(is_suppressed
)(addr
, addr
+ size
))
258 drd_report_race(addr
, size
, eStore
);
262 static VG_REGPARM(1) void drd_trace_store_1(Addr addr
)
264 if (DRD_(running_thread_is_recording_stores
)()
265 && (s_check_stack_accesses
266 || ! DRD_(thread_address_on_stack
)(addr
))
267 && bm_access_store_1_triggers_conflict(addr
)
268 && ! DRD_(is_suppressed
)(addr
, addr
+ 1))
270 drd_report_race(addr
, 1, eStore
);
274 static VG_REGPARM(1) void drd_trace_store_2(Addr addr
)
276 if (DRD_(running_thread_is_recording_stores
)()
277 && (s_check_stack_accesses
278 || ! DRD_(thread_address_on_stack
)(addr
))
279 && bm_access_store_2_triggers_conflict(addr
)
280 && ! DRD_(is_suppressed
)(addr
, addr
+ 2))
282 drd_report_race(addr
, 2, eStore
);
286 static VG_REGPARM(1) void drd_trace_store_4(Addr addr
)
288 if (DRD_(running_thread_is_recording_stores
)()
289 && (s_check_stack_accesses
290 || !DRD_(thread_address_on_stack
)(addr
))
291 && bm_access_store_4_triggers_conflict(addr
)
292 && !DRD_(is_suppressed
)(addr
, addr
+ 4))
294 drd_report_race(addr
, 4, eStore
);
298 static VG_REGPARM(1) void drd_trace_store_8(Addr addr
)
300 if (DRD_(running_thread_is_recording_stores
)()
301 && (s_check_stack_accesses
302 || ! DRD_(thread_address_on_stack
)(addr
))
303 && bm_access_store_8_triggers_conflict(addr
)
304 && ! DRD_(is_suppressed
)(addr
, addr
+ 8))
306 drd_report_race(addr
, 8, eStore
);
311 * Return true if and only if addr_expr matches the pattern (SP) or
314 static Bool
is_stack_access(IRSB
* const bb
, IRExpr
* const addr_expr
)
318 if (addr_expr
->tag
== Iex_RdTmp
)
321 for (i
= 0; i
< bb
->stmts_used
; i
++)
324 && bb
->stmts
[i
]->tag
== Ist_WrTmp
325 && bb
->stmts
[i
]->Ist
.WrTmp
.tmp
== addr_expr
->Iex
.RdTmp
.tmp
)
327 IRExpr
* e
= bb
->stmts
[i
]->Ist
.WrTmp
.data
;
328 if (e
->tag
== Iex_Get
&& e
->Iex
.Get
.offset
== STACK_POINTER_OFFSET
)
334 //VG_(printf)(" (%s)\n", result ? "True" : "False");
342 static const IROp u_widen_irop
[5][9] = {
343 [Ity_I1
- Ity_I1
] = { [4] = Iop_1Uto32
, [8] = Iop_1Uto64
},
344 [Ity_I8
- Ity_I1
] = { [4] = Iop_8Uto32
, [8] = Iop_8Uto64
},
345 [Ity_I16
- Ity_I1
] = { [4] = Iop_16Uto32
, [8] = Iop_16Uto64
},
346 [Ity_I32
- Ity_I1
] = { [8] = Iop_32Uto64
},
350 * Instrument the client code to trace a memory load (--trace-addr).
352 static IRExpr
* instr_trace_mem_load(IRSB
* const bb
, IRExpr
* addr_expr
,
354 IRExpr
* const guard
/* NULL => True */)
358 tmp
= newIRTemp(bb
->tyenv
, typeOfIRExpr(bb
->tyenv
, addr_expr
));
359 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, addr_expr
));
360 addr_expr
= IRExpr_RdTmp(tmp
);
362 = unsafeIRDirty_0_N(/*regparms*/2,
363 "drd_trace_mem_load",
364 VG_(fnptr_to_fnentry
)
365 (drd_trace_mem_load
),
366 mkIRExprVec_2(addr_expr
, mkIRExpr_HWord(size
)));
367 if (guard
) di
->guard
= guard
;
368 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
374 * Instrument the client code to trace a memory store (--trace-addr).
376 static void instr_trace_mem_store(IRSB
* const bb
, IRExpr
* const addr_expr
,
377 IRExpr
* data_expr_hi
, IRExpr
* data_expr_lo
,
378 IRExpr
* const guard
/* NULL => True */)
383 tl_assert(sizeof(HWord
) == 4 || sizeof(HWord
) == 8);
384 tl_assert(!data_expr_hi
|| typeOfIRExpr(bb
->tyenv
, data_expr_hi
) == Ity_I32
);
386 ty_data_expr
= typeOfIRExpr(bb
->tyenv
, data_expr_lo
);
387 size
= sizeofIRType(ty_data_expr
);
391 if (ty_data_expr
== Ity_I32
) {
392 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_F32
);
393 data_expr_lo
= IRExpr_Unop(Iop_ReinterpI32asF32
, data_expr_lo
);
394 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, data_expr_lo
));
395 data_expr_lo
= IRExpr_RdTmp(tmp
);
396 ty_data_expr
= Ity_F32
;
397 } else if (ty_data_expr
== Ity_I64
) {
398 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_F64
);
399 data_expr_lo
= IRExpr_Unop(Iop_ReinterpI64asF64
, data_expr_lo
);
400 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, data_expr_lo
));
401 data_expr_lo
= IRExpr_RdTmp(tmp
);
402 ty_data_expr
= Ity_F64
;
406 if (ty_data_expr
== Ity_F32
) {
407 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_I32
);
408 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, IRExpr_Unop(Iop_ReinterpF32asI32
,
410 data_expr_lo
= IRExpr_RdTmp(tmp
);
411 ty_data_expr
= Ity_I32
;
412 } else if (ty_data_expr
== Ity_F64
) {
413 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_I64
);
414 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, IRExpr_Unop(Iop_ReinterpF64asI64
,
416 data_expr_lo
= IRExpr_RdTmp(tmp
);
417 ty_data_expr
= Ity_I64
;
420 if (size
== sizeof(HWord
)
421 && (ty_data_expr
== Ity_I32
|| ty_data_expr
== Ity_I64
))
423 /* No conversion necessary */
427 if (Ity_I1
<= ty_data_expr
429 < Ity_I1
+ sizeof(u_widen_irop
)/sizeof(u_widen_irop
[0]))
431 widen_op
= u_widen_irop
[ty_data_expr
- Ity_I1
][sizeof(HWord
)];
433 widen_op
= Iop_INVALID
;
435 widen_op
= Iop_INVALID
;
437 if (widen_op
!= Iop_INVALID
) {
440 /* Widen the integer expression to a HWord */
441 tmp
= newIRTemp(bb
->tyenv
, sizeof(HWord
) == 4 ? Ity_I32
: Ity_I64
);
443 IRStmt_WrTmp(tmp
, IRExpr_Unop(widen_op
, data_expr_lo
)));
444 data_expr_lo
= IRExpr_RdTmp(tmp
);
445 } else if (size
> sizeof(HWord
) && !data_expr_hi
446 && ty_data_expr
== Ity_I64
) {
449 tl_assert(sizeof(HWord
) == 4);
450 tl_assert(size
== 8);
451 tmp
= newIRTemp(bb
->tyenv
, Ity_I32
);
454 IRExpr_Unop(Iop_64HIto32
, data_expr_lo
)));
455 data_expr_hi
= IRExpr_RdTmp(tmp
);
456 tmp
= newIRTemp(bb
->tyenv
, Ity_I32
);
457 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
,
458 IRExpr_Unop(Iop_64to32
, data_expr_lo
)));
459 data_expr_lo
= IRExpr_RdTmp(tmp
);
461 data_expr_lo
= mkIRExpr_HWord(0);
465 = unsafeIRDirty_0_N(/*regparms*/3,
466 "drd_trace_mem_store",
467 VG_(fnptr_to_fnentry
)(drd_trace_mem_store
),
468 mkIRExprVec_4(addr_expr
, mkIRExpr_HWord(size
),
469 data_expr_hi
? data_expr_hi
470 : mkIRExpr_HWord(0), data_expr_lo
));
471 if (guard
) di
->guard
= guard
;
472 addStmtToIRSB(bb
, IRStmt_Dirty(di
) );
475 static void instrument_load(IRSB
* const bb
, IRExpr
* const addr_expr
,
477 IRExpr
* const guard
/* NULL => True */)
483 if (!s_check_stack_accesses
&& is_stack_access(bb
, addr_expr
))
489 argv
= mkIRExprVec_1(addr_expr
);
490 di
= unsafeIRDirty_0_N(/*regparms*/1,
492 VG_(fnptr_to_fnentry
)(drd_trace_load_1
),
496 argv
= mkIRExprVec_1(addr_expr
);
497 di
= unsafeIRDirty_0_N(/*regparms*/1,
499 VG_(fnptr_to_fnentry
)(drd_trace_load_2
),
503 argv
= mkIRExprVec_1(addr_expr
);
504 di
= unsafeIRDirty_0_N(/*regparms*/1,
506 VG_(fnptr_to_fnentry
)(drd_trace_load_4
),
510 argv
= mkIRExprVec_1(addr_expr
);
511 di
= unsafeIRDirty_0_N(/*regparms*/1,
513 VG_(fnptr_to_fnentry
)(drd_trace_load_8
),
517 size_expr
= mkIRExpr_HWord(size
);
518 argv
= mkIRExprVec_2(addr_expr
, size_expr
);
519 di
= unsafeIRDirty_0_N(/*regparms*/2,
521 VG_(fnptr_to_fnentry
)(DRD_(trace_load
)),
525 if (guard
) di
->guard
= guard
;
526 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
529 static void instrument_store(IRSB
* const bb
, IRExpr
* addr_expr
,
530 IRExpr
* const data_expr
,
531 IRExpr
* const guard_expr
/* NULL => True */)
538 size
= sizeofIRType(typeOfIRExpr(bb
->tyenv
, data_expr
));
540 if (UNLIKELY(DRD_(any_address_is_traced
)())) {
541 IRTemp tmp
= newIRTemp(bb
->tyenv
, typeOfIRExpr(bb
->tyenv
, addr_expr
));
542 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, addr_expr
));
543 addr_expr
= IRExpr_RdTmp(tmp
);
544 instr_trace_mem_store(bb
, addr_expr
, NULL
, data_expr
, guard_expr
);
547 if (!s_check_stack_accesses
&& is_stack_access(bb
, addr_expr
))
553 argv
= mkIRExprVec_1(addr_expr
);
554 di
= unsafeIRDirty_0_N(/*regparms*/1,
556 VG_(fnptr_to_fnentry
)(drd_trace_store_1
),
560 argv
= mkIRExprVec_1(addr_expr
);
561 di
= unsafeIRDirty_0_N(/*regparms*/1,
563 VG_(fnptr_to_fnentry
)(drd_trace_store_2
),
567 argv
= mkIRExprVec_1(addr_expr
);
568 di
= unsafeIRDirty_0_N(/*regparms*/1,
570 VG_(fnptr_to_fnentry
)(drd_trace_store_4
),
574 argv
= mkIRExprVec_1(addr_expr
);
575 di
= unsafeIRDirty_0_N(/*regparms*/1,
577 VG_(fnptr_to_fnentry
)(drd_trace_store_8
),
581 size_expr
= mkIRExpr_HWord(size
);
582 argv
= mkIRExprVec_2(addr_expr
, size_expr
);
583 di
= unsafeIRDirty_0_N(/*regparms*/2,
585 VG_(fnptr_to_fnentry
)(DRD_(trace_store
)),
589 if (guard_expr
) di
->guard
= guard_expr
;
590 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
593 IRSB
* DRD_(instrument
)(VgCallbackClosure
* const closure
,
595 const VexGuestLayout
* const layout
,
596 const VexGuestExtents
* const vge
,
597 const VexArchInfo
* archinfo_host
,
598 IRType
const gWordTy
,
599 IRType
const hWordTy
)
605 Bool instrument
= True
;
609 bb
->tyenv
= deepCopyIRTypeEnv(bb_in
->tyenv
);
610 bb
->next
= deepCopyIRExpr(bb_in
->next
);
611 bb
->jumpkind
= bb_in
->jumpkind
;
612 bb
->offsIP
= bb_in
->offsIP
;
614 for (i
= 0; i
< bb_in
->stmts_used
; i
++)
616 IRStmt
* const st
= bb_in
->stmts
[i
];
618 tl_assert(isFlatIRStmt(st
));
622 /* Note: the code for not instrumenting the code in .plt */
623 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */
624 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */
625 /* This is because on this platform dynamic library symbols are */
626 /* relocated in another way than by later binutils versions. The */
627 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
629 instrument
= VG_(DebugInfo_sect_kind
)(NULL
, st
->Ist
.IMark
.addr
)
631 addStmtToIRSB(bb
, st
);
635 switch (st
->Ist
.MBE
.event
)
638 break; /* not interesting to DRD */
639 case Imbe_CancelReservation
:
640 break; /* not interesting to DRD */
644 addStmtToIRSB(bb
, st
);
649 instrument_store(bb
, st
->Ist
.Store
.addr
, st
->Ist
.Store
.data
,
651 addStmtToIRSB(bb
, st
);
655 IRStoreG
* sg
= st
->Ist
.StoreG
.details
;
656 IRExpr
* data
= sg
->data
;
657 IRExpr
* addr
= sg
->addr
;
659 instrument_store(bb
, addr
, data
, sg
->guard
);
660 addStmtToIRSB(bb
, st
);
665 IRLoadG
* lg
= st
->Ist
.LoadG
.details
;
666 IRType type
= Ity_INVALID
; /* loaded type */
667 IRType typeWide
= Ity_INVALID
; /* after implicit widening */
668 IRExpr
* addr_expr
= lg
->addr
;
669 typeOfIRLoadGOp(lg
->cvt
, &typeWide
, &type
);
670 tl_assert(type
!= Ity_INVALID
);
671 if (UNLIKELY(DRD_(any_address_is_traced
)())) {
672 addr_expr
= instr_trace_mem_load(bb
, addr_expr
,
673 sizeofIRType(type
), lg
->guard
);
675 instrument_load(bb
, lg
->addr
,
676 sizeofIRType(type
), lg
->guard
);
677 addStmtToIRSB(bb
, st
);
683 const IRExpr
* const data
= st
->Ist
.WrTmp
.data
;
684 IRExpr
* addr_expr
= data
->Iex
.Load
.addr
;
685 if (data
->tag
== Iex_Load
) {
686 if (UNLIKELY(DRD_(any_address_is_traced
)())) {
687 addr_expr
= instr_trace_mem_load(bb
, addr_expr
,
688 sizeofIRType(data
->Iex
.Load
.ty
),
691 instrument_load(bb
, addr_expr
, sizeofIRType(data
->Iex
.Load
.ty
),
695 addStmtToIRSB(bb
, st
);
700 IRDirty
* d
= st
->Ist
.Dirty
.details
;
701 IREffect
const mFx
= d
->mFx
;
709 tl_assert(d
->mSize
> 0);
710 argv
= mkIRExprVec_2(d
->mAddr
, mkIRExpr_HWord(d
->mSize
));
711 if (mFx
== Ifx_Read
|| mFx
== Ifx_Modify
) {
712 di
= unsafeIRDirty_0_N(
715 VG_(fnptr_to_fnentry
)(DRD_(trace_load
)),
717 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
719 if (mFx
== Ifx_Write
|| mFx
== Ifx_Modify
)
721 di
= unsafeIRDirty_0_N(
724 VG_(fnptr_to_fnentry
)(DRD_(trace_store
)),
726 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
733 addStmtToIRSB(bb
, st
);
739 * Treat compare-and-swap as a read. By handling atomic
740 * instructions as read instructions no data races are reported
741 * between conflicting atomic operations nor between atomic
742 * operations and non-atomic reads. Conflicts between atomic
743 * operations and non-atomic write operations are still reported
747 IRCAS
* cas
= st
->Ist
.CAS
.details
;
749 tl_assert(cas
->addr
!= NULL
);
750 tl_assert(cas
->dataLo
!= NULL
);
751 dataSize
= sizeofIRType(typeOfIRExpr(bb
->tyenv
, cas
->dataLo
));
752 if (cas
->dataHi
!= NULL
)
753 dataSize
*= 2; /* since it's a doubleword-CAS */
755 if (UNLIKELY(DRD_(any_address_is_traced
)()))
756 instr_trace_mem_store(bb
, cas
->addr
, cas
->dataHi
, cas
->dataLo
,
759 instrument_load(bb
, cas
->addr
, dataSize
, NULL
/*no guard*/);
761 addStmtToIRSB(bb
, st
);
766 * Ignore store-conditionals (except for tracing), and handle
767 * load-linked's exactly like normal loads.
771 if (st
->Ist
.LLSC
.storedata
== NULL
) {
773 dataTy
= typeOfIRTemp(bb_in
->tyenv
, st
->Ist
.LLSC
.result
);
775 IRExpr
* addr_expr
= st
->Ist
.LLSC
.addr
;
776 if (UNLIKELY(DRD_(any_address_is_traced
)()))
777 addr_expr
= instr_trace_mem_load(bb
, addr_expr
,
778 sizeofIRType(dataTy
),
779 NULL
/* no guard */);
781 instrument_load(bb
, addr_expr
, sizeofIRType(dataTy
),
786 instr_trace_mem_store(bb
, st
->Ist
.LLSC
.addr
, NULL
,
787 st
->Ist
.LLSC
.storedata
,
790 addStmtToIRSB(bb
, st
);
799 /* None of these can contain any memory references. */
800 addStmtToIRSB(bb
, st
);