1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
2 ; RUN: opt < %s -passes=asan -S | FileCheck %s
3 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8"
4 target triple = "amdgcn-amd-amdhsa"
6 define protected amdgpu_kernel void @generic_store(ptr addrspace(1) %p, i32 %i) sanitize_address {
7 ; CHECK-LABEL: define protected amdgpu_kernel void @generic_store(
8 ; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
10 ; CHECK-NEXT: [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
11 ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
12 ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
13 ; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
14 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
15 ; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP18:%.*]]
17 ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
18 ; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
19 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
20 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
21 ; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
22 ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
23 ; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP17:%.*]], !prof [[PROF0:![0-9]+]]
25 ; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP5]], 7
26 ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 3
27 ; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i8
28 ; CHECK-NEXT: [[TMP15:%.*]] = icmp sge i8 [[TMP14]], [[TMP9]]
29 ; CHECK-NEXT: br i1 [[TMP15]], label [[TMP16:%.*]], label [[TMP17]]
31 ; CHECK-NEXT: call void @__asan_report_store4(i64 [[TMP5]]) #[[ATTR3:[0-9]+]]
32 ; CHECK-NEXT: unreachable
34 ; CHECK-NEXT: br label [[TMP18]]
36 ; CHECK-NEXT: store i32 0, ptr [[Q]], align 4
37 ; CHECK-NEXT: ret void
41 %q = addrspacecast ptr addrspace(1) %p to ptr
42 store i32 0, ptr %q, align 4
46 define protected amdgpu_kernel void @generic_load(ptr addrspace(1) %p, i32 %i) sanitize_address {
47 ; CHECK-LABEL: define protected amdgpu_kernel void @generic_load(
48 ; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[I:%.*]]) #[[ATTR0]] {
50 ; CHECK-NEXT: [[Q:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
51 ; CHECK-NEXT: [[TMP0:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[Q]])
52 ; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[Q]])
53 ; CHECK-NEXT: [[TMP2:%.*]] = or i1 [[TMP0]], [[TMP1]]
54 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
55 ; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP18:%.*]]
57 ; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[Q]] to i64
58 ; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 3
59 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], 2147450880
60 ; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
61 ; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP8]], align 1
62 ; CHECK-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP9]], 0
63 ; CHECK-NEXT: br i1 [[TMP10]], label [[TMP11:%.*]], label [[TMP17:%.*]], !prof [[PROF0]]
65 ; CHECK-NEXT: [[TMP12:%.*]] = and i64 [[TMP5]], 7
66 ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 3
67 ; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i8
68 ; CHECK-NEXT: [[TMP15:%.*]] = icmp sge i8 [[TMP14]], [[TMP9]]
69 ; CHECK-NEXT: br i1 [[TMP15]], label [[TMP16:%.*]], label [[TMP17]]
71 ; CHECK-NEXT: call void @__asan_report_load4(i64 [[TMP5]]) #[[ATTR3]]
72 ; CHECK-NEXT: unreachable
74 ; CHECK-NEXT: br label [[TMP18]]
76 ; CHECK-NEXT: [[R:%.*]] = load i32, ptr [[Q]], align 4
77 ; CHECK-NEXT: ret void
81 %q = addrspacecast ptr addrspace(1) %p to ptr
82 %r = load i32, ptr %q, align 4