2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2020 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
19 The GNU General Public License is contained in the file COPYING.
23 #include "drd_bitmap.h"
24 #include "drd_thread_bitmap.h"
25 #include "drd_vc.h" /* DRD_(vc_snprint)() */
27 /* Include several source files here in order to allow the compiler to */
28 /* do more inlining. */
29 #include "drd_bitmap.c"
30 #include "drd_load_store.h"
31 #include "drd_segment.c"
32 #include "drd_thread.c"
34 #include "libvex_guest_offsets.h"
37 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */
39 #define STACK_POINTER_OFFSET OFFSET_x86_ESP
40 #elif defined(VGA_amd64)
41 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP
42 #elif defined(VGA_ppc32)
43 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1
44 #elif defined(VGA_ppc64be) || defined(VGA_ppc64le)
45 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1
46 #elif defined(VGA_arm)
47 #define STACK_POINTER_OFFSET OFFSET_arm_R13
48 #elif defined(VGA_arm64)
49 #define STACK_POINTER_OFFSET OFFSET_arm64_XSP
50 #elif defined(VGA_s390x)
51 #define STACK_POINTER_OFFSET OFFSET_s390x_r15
52 #elif defined(VGA_mips32) || defined(VGA_nanomips)
53 #define STACK_POINTER_OFFSET OFFSET_mips32_r29
54 #elif defined(VGA_mips64)
55 #define STACK_POINTER_OFFSET OFFSET_mips64_r29
57 #error Unknown architecture.
61 /* Local variables. */
63 static Bool s_check_stack_accesses
= False
;
64 static Bool s_first_race_only
= False
;
67 /* Function definitions. */
69 Bool
DRD_(get_check_stack_accesses
)()
71 return s_check_stack_accesses
;
74 void DRD_(set_check_stack_accesses
)(const Bool c
)
76 tl_assert(c
== False
|| c
== True
);
77 s_check_stack_accesses
= c
;
80 Bool
DRD_(get_first_race_only
)()
82 return s_first_race_only
;
85 void DRD_(set_first_race_only
)(const Bool fro
)
87 tl_assert(fro
== False
|| fro
== True
);
88 s_first_race_only
= fro
;
91 void DRD_(trace_mem_access
)(const Addr addr
, const SizeT size
,
92 const BmAccessTypeT access_type
,
93 const HWord stored_value_hi
,
94 const HWord stored_value_lo
)
96 if (DRD_(is_any_traced
)(addr
, addr
+ size
))
100 vc
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(DRD_(thread_get_running_tid
)()));
101 if (access_type
== eStore
&& size
<= sizeof(HWord
)) {
102 DRD_(trace_msg_w_bt
)("store 0x%lx size %lu val %lu/0x%lx (thread %u /"
103 " vc %s)", addr
, size
, stored_value_lo
,
104 stored_value_lo
, DRD_(thread_get_running_tid
)(),
106 } else if (access_type
== eStore
&& size
> sizeof(HWord
)) {
109 tl_assert(sizeof(HWord
) == 4);
110 sv
= ((ULong
)stored_value_hi
<< 32) | stored_value_lo
;
111 DRD_(trace_msg_w_bt
)("store 0x%lx size %lu val %llu/0x%llx (thread %u"
112 " / vc %s)", addr
, size
, sv
, sv
,
113 DRD_(thread_get_running_tid
)(), vc
);
115 DRD_(trace_msg_w_bt
)("%s 0x%lx size %lu (thread %u / vc %s)",
116 access_type
== eLoad
? "load "
117 : access_type
== eStore
? "store"
118 : access_type
== eStart
? "start"
119 : access_type
== eEnd
? "end " : "????",
120 addr
, size
, DRD_(thread_get_running_tid
)(), vc
);
123 tl_assert(DRD_(DrdThreadIdToVgThreadId
)(DRD_(thread_get_running_tid
)())
124 == VG_(get_running_tid
)());
128 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr
, const SizeT size
)
130 return DRD_(trace_mem_access
)(addr
, size
, eLoad
, 0, 0);
133 static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr
,const SizeT size
,
134 const HWord stored_value_hi
,
135 const HWord stored_value_lo
)
137 return DRD_(trace_mem_access
)(addr
, size
, eStore
, stored_value_hi
,
141 static void drd_report_race(const Addr addr
, const SizeT size
,
142 const BmAccessTypeT access_type
)
146 vg_tid
= VG_(get_running_tid
)();
147 if (!DRD_(get_check_stack_accesses
)()
148 && DRD_(thread_address_on_any_stack
)(addr
)) {
150 GenericErrInfo GEI
= {
151 .tid
= DRD_(thread_get_running_tid
)(),
154 VG_(maybe_record_error
)(vg_tid
, GenericErr
, VG_(get_IP
)(vg_tid
),
155 "--check-stack-var=no skips checking stack"
156 " variables shared over threads",
160 DataRaceErrInfo drei
= {
161 .tid
= DRD_(thread_get_running_tid
)(),
164 .access_type
= access_type
,
166 VG_(maybe_record_error
)(vg_tid
, DataRaceErr
, VG_(get_IP
)(vg_tid
),
167 "Conflicting access", &drei
);
169 if (s_first_race_only
)
170 DRD_(start_suppression
)(addr
, addr
+ size
, "first race only");
174 VG_REGPARM(2) void DRD_(trace_load
)(Addr addr
, SizeT size
)
176 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
177 /* The assert below has been commented out because of performance reasons.*/
178 tl_assert(DRD_(thread_get_running_tid
)()
179 == DRD_(VgThreadIdToDrdThreadId
)(VG_(get_running_tid
)()));
182 if (DRD_(running_thread_is_recording_loads
)()
183 && (s_check_stack_accesses
184 || ! DRD_(thread_address_on_stack
)(addr
))
185 && bm_access_load_triggers_conflict(addr
, addr
+ size
)
186 && ! DRD_(is_suppressed
)(addr
, addr
+ size
))
188 drd_report_race(addr
, size
, eLoad
);
192 static VG_REGPARM(1) void drd_trace_load_1(Addr addr
)
194 if (DRD_(running_thread_is_recording_loads
)()
195 && (s_check_stack_accesses
196 || ! DRD_(thread_address_on_stack
)(addr
))
197 && bm_access_load_1_triggers_conflict(addr
)
198 && ! DRD_(is_suppressed
)(addr
, addr
+ 1))
200 drd_report_race(addr
, 1, eLoad
);
204 static VG_REGPARM(1) void drd_trace_load_2(Addr addr
)
206 if (DRD_(running_thread_is_recording_loads
)()
207 && (s_check_stack_accesses
208 || ! DRD_(thread_address_on_stack
)(addr
))
209 && bm_access_load_2_triggers_conflict(addr
)
210 && ! DRD_(is_suppressed
)(addr
, addr
+ 2))
212 drd_report_race(addr
, 2, eLoad
);
216 static VG_REGPARM(1) void drd_trace_load_4(Addr addr
)
218 if (DRD_(running_thread_is_recording_loads
)()
219 && (s_check_stack_accesses
220 || ! DRD_(thread_address_on_stack
)(addr
))
221 && bm_access_load_4_triggers_conflict(addr
)
222 && ! DRD_(is_suppressed
)(addr
, addr
+ 4))
224 drd_report_race(addr
, 4, eLoad
);
228 static VG_REGPARM(1) void drd_trace_load_8(Addr addr
)
230 if (DRD_(running_thread_is_recording_loads
)()
231 && (s_check_stack_accesses
232 || ! DRD_(thread_address_on_stack
)(addr
))
233 && bm_access_load_8_triggers_conflict(addr
)
234 && ! DRD_(is_suppressed
)(addr
, addr
+ 8))
236 drd_report_race(addr
, 8, eLoad
);
240 VG_REGPARM(2) void DRD_(trace_store
)(Addr addr
, SizeT size
)
242 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
243 /* The assert below has been commented out because of performance reasons.*/
244 tl_assert(DRD_(thread_get_running_tid
)()
245 == DRD_(VgThreadIdToDrdThreadId
)(VG_(get_running_tid
)()));
248 if (DRD_(running_thread_is_recording_stores
)()
249 && (s_check_stack_accesses
250 || ! DRD_(thread_address_on_stack
)(addr
))
251 && bm_access_store_triggers_conflict(addr
, addr
+ size
)
252 && ! DRD_(is_suppressed
)(addr
, addr
+ size
))
254 drd_report_race(addr
, size
, eStore
);
258 static VG_REGPARM(1) void drd_trace_store_1(Addr addr
)
260 if (DRD_(running_thread_is_recording_stores
)()
261 && (s_check_stack_accesses
262 || ! DRD_(thread_address_on_stack
)(addr
))
263 && bm_access_store_1_triggers_conflict(addr
)
264 && ! DRD_(is_suppressed
)(addr
, addr
+ 1))
266 drd_report_race(addr
, 1, eStore
);
270 static VG_REGPARM(1) void drd_trace_store_2(Addr addr
)
272 if (DRD_(running_thread_is_recording_stores
)()
273 && (s_check_stack_accesses
274 || ! DRD_(thread_address_on_stack
)(addr
))
275 && bm_access_store_2_triggers_conflict(addr
)
276 && ! DRD_(is_suppressed
)(addr
, addr
+ 2))
278 drd_report_race(addr
, 2, eStore
);
282 static VG_REGPARM(1) void drd_trace_store_4(Addr addr
)
284 if (DRD_(running_thread_is_recording_stores
)()
285 && (s_check_stack_accesses
286 || !DRD_(thread_address_on_stack
)(addr
))
287 && bm_access_store_4_triggers_conflict(addr
)
288 && !DRD_(is_suppressed
)(addr
, addr
+ 4))
290 drd_report_race(addr
, 4, eStore
);
294 static VG_REGPARM(1) void drd_trace_store_8(Addr addr
)
296 if (DRD_(running_thread_is_recording_stores
)()
297 && (s_check_stack_accesses
298 || ! DRD_(thread_address_on_stack
)(addr
))
299 && bm_access_store_8_triggers_conflict(addr
)
300 && ! DRD_(is_suppressed
)(addr
, addr
+ 8))
302 drd_report_race(addr
, 8, eStore
);
307 * Return true if and only if addr_expr matches the pattern (SP) or
310 static Bool
is_stack_access(IRSB
* const bb
, IRExpr
* const addr_expr
)
314 if (addr_expr
->tag
== Iex_RdTmp
)
317 for (i
= 0; i
< bb
->stmts_used
; i
++)
320 && bb
->stmts
[i
]->tag
== Ist_WrTmp
321 && bb
->stmts
[i
]->Ist
.WrTmp
.tmp
== addr_expr
->Iex
.RdTmp
.tmp
)
323 IRExpr
* e
= bb
->stmts
[i
]->Ist
.WrTmp
.data
;
324 if (e
->tag
== Iex_Get
&& e
->Iex
.Get
.offset
== STACK_POINTER_OFFSET
)
330 //VG_(printf)(" (%s)\n", result ? "True" : "False");
338 static const IROp u_widen_irop
[5][9] = {
339 [Ity_I1
- Ity_I1
] = { [4] = Iop_1Uto32
, [8] = Iop_1Uto64
},
340 [Ity_I8
- Ity_I1
] = { [4] = Iop_8Uto32
, [8] = Iop_8Uto64
},
341 [Ity_I16
- Ity_I1
] = { [4] = Iop_16Uto32
, [8] = Iop_16Uto64
},
342 [Ity_I32
- Ity_I1
] = { [8] = Iop_32Uto64
},
346 * Instrument the client code to trace a memory load (--trace-addr).
348 static IRExpr
* instr_trace_mem_load(IRSB
* const bb
, IRExpr
* addr_expr
,
350 IRExpr
* const guard
/* NULL => True */)
354 tmp
= newIRTemp(bb
->tyenv
, typeOfIRExpr(bb
->tyenv
, addr_expr
));
355 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, addr_expr
));
356 addr_expr
= IRExpr_RdTmp(tmp
);
358 = unsafeIRDirty_0_N(/*regparms*/2,
359 "drd_trace_mem_load",
360 VG_(fnptr_to_fnentry
)
361 (drd_trace_mem_load
),
362 mkIRExprVec_2(addr_expr
, mkIRExpr_HWord(size
)));
363 if (guard
) di
->guard
= guard
;
364 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
370 * Instrument the client code to trace a memory store (--trace-addr).
372 static void instr_trace_mem_store(IRSB
* const bb
, IRExpr
* const addr_expr
,
373 IRExpr
* data_expr_hi
, IRExpr
* data_expr_lo
,
374 IRExpr
* const guard
/* NULL => True */)
379 tl_assert(sizeof(HWord
) == 4 || sizeof(HWord
) == 8);
380 tl_assert(!data_expr_hi
|| typeOfIRExpr(bb
->tyenv
, data_expr_hi
) == Ity_I32
);
382 ty_data_expr
= typeOfIRExpr(bb
->tyenv
, data_expr_lo
);
383 size
= sizeofIRType(ty_data_expr
);
387 if (ty_data_expr
== Ity_I32
) {
388 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_F32
);
389 data_expr_lo
= IRExpr_Unop(Iop_ReinterpI32asF32
, data_expr_lo
);
390 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, data_expr_lo
));
391 data_expr_lo
= IRExpr_RdTmp(tmp
);
392 ty_data_expr
= Ity_F32
;
393 } else if (ty_data_expr
== Ity_I64
) {
394 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_F64
);
395 data_expr_lo
= IRExpr_Unop(Iop_ReinterpI64asF64
, data_expr_lo
);
396 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, data_expr_lo
));
397 data_expr_lo
= IRExpr_RdTmp(tmp
);
398 ty_data_expr
= Ity_F64
;
402 if (ty_data_expr
== Ity_F32
) {
403 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_I32
);
404 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, IRExpr_Unop(Iop_ReinterpF32asI32
,
406 data_expr_lo
= IRExpr_RdTmp(tmp
);
407 ty_data_expr
= Ity_I32
;
408 } else if (ty_data_expr
== Ity_F64
) {
409 IRTemp tmp
= newIRTemp(bb
->tyenv
, Ity_I64
);
410 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, IRExpr_Unop(Iop_ReinterpF64asI64
,
412 data_expr_lo
= IRExpr_RdTmp(tmp
);
413 ty_data_expr
= Ity_I64
;
416 if (size
== sizeof(HWord
)
417 && (ty_data_expr
== Ity_I32
|| ty_data_expr
== Ity_I64
))
419 /* No conversion necessary */
423 if (Ity_I1
<= ty_data_expr
425 < Ity_I1
+ sizeof(u_widen_irop
)/sizeof(u_widen_irop
[0]))
427 widen_op
= u_widen_irop
[ty_data_expr
- Ity_I1
][sizeof(HWord
)];
429 widen_op
= Iop_INVALID
;
431 widen_op
= Iop_INVALID
;
433 if (widen_op
!= Iop_INVALID
) {
436 /* Widen the integer expression to a HWord */
437 tmp
= newIRTemp(bb
->tyenv
, sizeof(HWord
) == 4 ? Ity_I32
: Ity_I64
);
439 IRStmt_WrTmp(tmp
, IRExpr_Unop(widen_op
, data_expr_lo
)));
440 data_expr_lo
= IRExpr_RdTmp(tmp
);
441 } else if (size
> sizeof(HWord
) && !data_expr_hi
442 && ty_data_expr
== Ity_I64
) {
445 tl_assert(sizeof(HWord
) == 4);
446 tl_assert(size
== 8);
447 tmp
= newIRTemp(bb
->tyenv
, Ity_I32
);
450 IRExpr_Unop(Iop_64HIto32
, data_expr_lo
)));
451 data_expr_hi
= IRExpr_RdTmp(tmp
);
452 tmp
= newIRTemp(bb
->tyenv
, Ity_I32
);
453 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
,
454 IRExpr_Unop(Iop_64to32
, data_expr_lo
)));
455 data_expr_lo
= IRExpr_RdTmp(tmp
);
457 data_expr_lo
= mkIRExpr_HWord(0);
461 = unsafeIRDirty_0_N(/*regparms*/3,
462 "drd_trace_mem_store",
463 VG_(fnptr_to_fnentry
)(drd_trace_mem_store
),
464 mkIRExprVec_4(addr_expr
, mkIRExpr_HWord(size
),
465 data_expr_hi
? data_expr_hi
466 : mkIRExpr_HWord(0), data_expr_lo
));
467 if (guard
) di
->guard
= guard
;
468 addStmtToIRSB(bb
, IRStmt_Dirty(di
) );
471 static void instrument_load(IRSB
* const bb
, IRExpr
* const addr_expr
,
473 IRExpr
* const guard
/* NULL => True */)
479 if (!s_check_stack_accesses
&& is_stack_access(bb
, addr_expr
))
485 argv
= mkIRExprVec_1(addr_expr
);
486 di
= unsafeIRDirty_0_N(/*regparms*/1,
488 VG_(fnptr_to_fnentry
)(drd_trace_load_1
),
492 argv
= mkIRExprVec_1(addr_expr
);
493 di
= unsafeIRDirty_0_N(/*regparms*/1,
495 VG_(fnptr_to_fnentry
)(drd_trace_load_2
),
499 argv
= mkIRExprVec_1(addr_expr
);
500 di
= unsafeIRDirty_0_N(/*regparms*/1,
502 VG_(fnptr_to_fnentry
)(drd_trace_load_4
),
506 argv
= mkIRExprVec_1(addr_expr
);
507 di
= unsafeIRDirty_0_N(/*regparms*/1,
509 VG_(fnptr_to_fnentry
)(drd_trace_load_8
),
513 size_expr
= mkIRExpr_HWord(size
);
514 argv
= mkIRExprVec_2(addr_expr
, size_expr
);
515 di
= unsafeIRDirty_0_N(/*regparms*/2,
517 VG_(fnptr_to_fnentry
)(DRD_(trace_load
)),
521 if (guard
) di
->guard
= guard
;
522 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
525 static void instrument_store(IRSB
* const bb
, IRExpr
* addr_expr
,
526 IRExpr
* const data_expr
,
527 IRExpr
* const guard_expr
/* NULL => True */)
534 size
= sizeofIRType(typeOfIRExpr(bb
->tyenv
, data_expr
));
536 if (UNLIKELY(DRD_(any_address_is_traced
)())) {
537 IRTemp tmp
= newIRTemp(bb
->tyenv
, typeOfIRExpr(bb
->tyenv
, addr_expr
));
538 addStmtToIRSB(bb
, IRStmt_WrTmp(tmp
, addr_expr
));
539 addr_expr
= IRExpr_RdTmp(tmp
);
540 instr_trace_mem_store(bb
, addr_expr
, NULL
, data_expr
, guard_expr
);
543 if (!s_check_stack_accesses
&& is_stack_access(bb
, addr_expr
))
549 argv
= mkIRExprVec_1(addr_expr
);
550 di
= unsafeIRDirty_0_N(/*regparms*/1,
552 VG_(fnptr_to_fnentry
)(drd_trace_store_1
),
556 argv
= mkIRExprVec_1(addr_expr
);
557 di
= unsafeIRDirty_0_N(/*regparms*/1,
559 VG_(fnptr_to_fnentry
)(drd_trace_store_2
),
563 argv
= mkIRExprVec_1(addr_expr
);
564 di
= unsafeIRDirty_0_N(/*regparms*/1,
566 VG_(fnptr_to_fnentry
)(drd_trace_store_4
),
570 argv
= mkIRExprVec_1(addr_expr
);
571 di
= unsafeIRDirty_0_N(/*regparms*/1,
573 VG_(fnptr_to_fnentry
)(drd_trace_store_8
),
577 size_expr
= mkIRExpr_HWord(size
);
578 argv
= mkIRExprVec_2(addr_expr
, size_expr
);
579 di
= unsafeIRDirty_0_N(/*regparms*/2,
581 VG_(fnptr_to_fnentry
)(DRD_(trace_store
)),
585 if (guard_expr
) di
->guard
= guard_expr
;
586 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
589 IRSB
* DRD_(instrument
)(VgCallbackClosure
* const closure
,
591 const VexGuestLayout
* const layout
,
592 const VexGuestExtents
* const vge
,
593 const VexArchInfo
* archinfo_host
,
594 IRType
const gWordTy
,
595 IRType
const hWordTy
)
601 Bool instrument
= True
;
605 bb
->tyenv
= deepCopyIRTypeEnv(bb_in
->tyenv
);
606 bb
->next
= deepCopyIRExpr(bb_in
->next
);
607 bb
->jumpkind
= bb_in
->jumpkind
;
608 bb
->offsIP
= bb_in
->offsIP
;
610 for (i
= 0; i
< bb_in
->stmts_used
; i
++)
612 IRStmt
* const st
= bb_in
->stmts
[i
];
614 tl_assert(isFlatIRStmt(st
));
618 /* Note: the code for not instrumenting the code in .plt */
619 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */
620 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */
621 /* This is because on this platform dynamic library symbols are */
622 /* relocated in another way than by later binutils versions. The */
623 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
625 instrument
= VG_(DebugInfo_sect_kind
)(NULL
, st
->Ist
.IMark
.addr
)
627 addStmtToIRSB(bb
, st
);
631 switch (st
->Ist
.MBE
.event
)
634 break; /* not interesting to DRD */
635 case Imbe_CancelReservation
:
636 break; /* not interesting to DRD */
640 addStmtToIRSB(bb
, st
);
645 instrument_store(bb
, st
->Ist
.Store
.addr
, st
->Ist
.Store
.data
,
647 addStmtToIRSB(bb
, st
);
651 IRStoreG
* sg
= st
->Ist
.StoreG
.details
;
652 IRExpr
* data
= sg
->data
;
653 IRExpr
* addr
= sg
->addr
;
655 instrument_store(bb
, addr
, data
, sg
->guard
);
656 addStmtToIRSB(bb
, st
);
661 IRLoadG
* lg
= st
->Ist
.LoadG
.details
;
662 IRType type
= Ity_INVALID
; /* loaded type */
663 IRType typeWide
= Ity_INVALID
; /* after implicit widening */
664 IRExpr
* addr_expr
= lg
->addr
;
665 typeOfIRLoadGOp(lg
->cvt
, &typeWide
, &type
);
666 tl_assert(type
!= Ity_INVALID
);
667 if (UNLIKELY(DRD_(any_address_is_traced
)())) {
668 addr_expr
= instr_trace_mem_load(bb
, addr_expr
,
669 sizeofIRType(type
), lg
->guard
);
671 instrument_load(bb
, lg
->addr
,
672 sizeofIRType(type
), lg
->guard
);
673 addStmtToIRSB(bb
, st
);
679 const IRExpr
* const data
= st
->Ist
.WrTmp
.data
;
680 IRExpr
* addr_expr
= data
->Iex
.Load
.addr
;
681 if (data
->tag
== Iex_Load
) {
682 if (UNLIKELY(DRD_(any_address_is_traced
)())) {
683 addr_expr
= instr_trace_mem_load(bb
, addr_expr
,
684 sizeofIRType(data
->Iex
.Load
.ty
),
687 instrument_load(bb
, addr_expr
, sizeofIRType(data
->Iex
.Load
.ty
),
691 addStmtToIRSB(bb
, st
);
696 IRDirty
* d
= st
->Ist
.Dirty
.details
;
697 IREffect
const mFx
= d
->mFx
;
705 tl_assert(d
->mSize
> 0);
706 argv
= mkIRExprVec_2(d
->mAddr
, mkIRExpr_HWord(d
->mSize
));
707 if (mFx
== Ifx_Read
|| mFx
== Ifx_Modify
) {
708 di
= unsafeIRDirty_0_N(
711 VG_(fnptr_to_fnentry
)(DRD_(trace_load
)),
713 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
715 if (mFx
== Ifx_Write
|| mFx
== Ifx_Modify
)
717 di
= unsafeIRDirty_0_N(
720 VG_(fnptr_to_fnentry
)(DRD_(trace_store
)),
722 addStmtToIRSB(bb
, IRStmt_Dirty(di
));
729 addStmtToIRSB(bb
, st
);
735 * Treat compare-and-swap as a read. By handling atomic
736 * instructions as read instructions no data races are reported
737 * between conflicting atomic operations nor between atomic
738 * operations and non-atomic reads. Conflicts between atomic
739 * operations and non-atomic write operations are still reported
743 IRCAS
* cas
= st
->Ist
.CAS
.details
;
745 tl_assert(cas
->addr
!= NULL
);
746 tl_assert(cas
->dataLo
!= NULL
);
747 dataSize
= sizeofIRType(typeOfIRExpr(bb
->tyenv
, cas
->dataLo
));
748 if (cas
->dataHi
!= NULL
)
749 dataSize
*= 2; /* since it's a doubleword-CAS */
751 if (UNLIKELY(DRD_(any_address_is_traced
)()))
752 instr_trace_mem_store(bb
, cas
->addr
, cas
->dataHi
, cas
->dataLo
,
755 instrument_load(bb
, cas
->addr
, dataSize
, NULL
/*no guard*/);
757 addStmtToIRSB(bb
, st
);
762 * Ignore store-conditionals (except for tracing), and handle
763 * load-linked's exactly like normal loads.
767 if (st
->Ist
.LLSC
.storedata
== NULL
) {
769 dataTy
= typeOfIRTemp(bb_in
->tyenv
, st
->Ist
.LLSC
.result
);
771 IRExpr
* addr_expr
= st
->Ist
.LLSC
.addr
;
772 if (UNLIKELY(DRD_(any_address_is_traced
)()))
773 addr_expr
= instr_trace_mem_load(bb
, addr_expr
,
774 sizeofIRType(dataTy
),
775 NULL
/* no guard */);
777 instrument_load(bb
, addr_expr
, sizeofIRType(dataTy
),
782 instr_trace_mem_store(bb
, st
->Ist
.LLSC
.addr
, NULL
,
783 st
->Ist
.LLSC
.storedata
,
786 addStmtToIRSB(bb
, st
);
795 /* None of these can contain any memory references. */
796 addStmtToIRSB(bb
, st
);