1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -amdgpu-lower-intrinsics -amdgpu-mem-intrinsic-expand-size=8 %s | FileCheck -check-prefix=OPT8 %s
3 ; RUN: opt -S -amdgpu-lower-intrinsics -amdgpu-mem-intrinsic-expand-size=4 %s | FileCheck -check-prefix=OPT4 %s
4 ; RUN: opt -S -amdgpu-lower-intrinsics -amdgpu-mem-intrinsic-expand-size=0 %s | FileCheck -check-prefix=OPT0 %s
5 ; RUN: opt -S -amdgpu-lower-intrinsics -amdgpu-mem-intrinsic-expand-size=-1 %s | FileCheck -check-prefix=OPT_NEG %s
7 ; Test the -amdgpu-mem-intrinsic-expand-size flag works.
9 ; Make sure we can always eliminate the intrinsic, even at 0.
10 define amdgpu_kernel void @memset_size_0(i8 addrspace(1)* %dst, i8 %val) {
11 ; OPT8-LABEL: @memset_size_0(
12 ; OPT8-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* [[DST:%.*]], i8 [[VAL:%.*]], i64 0, i1 false)
15 ; OPT4-LABEL: @memset_size_0(
16 ; OPT4-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* [[DST:%.*]], i8 [[VAL:%.*]], i64 0, i1 false)
19 ; OPT0-LABEL: @memset_size_0(
20 ; OPT0-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* [[DST:%.*]], i8 [[VAL:%.*]], i64 0, i1 false)
23 ; OPT_NEG-LABEL: @memset_size_0(
24 ; OPT_NEG-NEXT: br i1 true, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]]
25 ; OPT_NEG: loadstoreloop:
26 ; OPT_NEG-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ]
27 ; OPT_NEG-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[DST:%.*]], i64 [[TMP1]]
28 ; OPT_NEG-NEXT: store i8 [[VAL:%.*]], i8 addrspace(1)* [[TMP2]]
29 ; OPT_NEG-NEXT: [[TMP3]] = add i64 [[TMP1]], 1
30 ; OPT_NEG-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 0
31 ; OPT_NEG-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]]
33 ; OPT_NEG-NEXT: ret void
35 call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 0, i1 false)
39 define amdgpu_kernel void @memset_size_4(i8 addrspace(1)* %dst, i8 %val) {
40 ; OPT8-LABEL: @memset_size_4(
41 ; OPT8-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* [[DST:%.*]], i8 [[VAL:%.*]], i64 4, i1 false)
44 ; OPT4-LABEL: @memset_size_4(
45 ; OPT4-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* [[DST:%.*]], i8 [[VAL:%.*]], i64 4, i1 false)
48 ; OPT0-LABEL: @memset_size_4(
49 ; OPT0-NEXT: br i1 false, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]]
50 ; OPT0: loadstoreloop:
51 ; OPT0-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ]
52 ; OPT0-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[DST:%.*]], i64 [[TMP1]]
53 ; OPT0-NEXT: store i8 [[VAL:%.*]], i8 addrspace(1)* [[TMP2]]
54 ; OPT0-NEXT: [[TMP3]] = add i64 [[TMP1]], 1
55 ; OPT0-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 4
56 ; OPT0-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]]
60 ; OPT_NEG-LABEL: @memset_size_4(
61 ; OPT_NEG-NEXT: br i1 false, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]]
62 ; OPT_NEG: loadstoreloop:
63 ; OPT_NEG-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ]
64 ; OPT_NEG-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[DST:%.*]], i64 [[TMP1]]
65 ; OPT_NEG-NEXT: store i8 [[VAL:%.*]], i8 addrspace(1)* [[TMP2]]
66 ; OPT_NEG-NEXT: [[TMP3]] = add i64 [[TMP1]], 1
67 ; OPT_NEG-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 4
68 ; OPT_NEG-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]]
70 ; OPT_NEG-NEXT: ret void
72 call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 4, i1 false)
76 define amdgpu_kernel void @memset_size_8(i8 addrspace(1)* %dst, i8 %val) {
77 ; OPT8-LABEL: @memset_size_8(
78 ; OPT8-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* [[DST:%.*]], i8 [[VAL:%.*]], i64 8, i1 false)
81 ; OPT4-LABEL: @memset_size_8(
82 ; OPT4-NEXT: br i1 false, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]]
83 ; OPT4: loadstoreloop:
84 ; OPT4-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ]
85 ; OPT4-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[DST:%.*]], i64 [[TMP1]]
86 ; OPT4-NEXT: store i8 [[VAL:%.*]], i8 addrspace(1)* [[TMP2]]
87 ; OPT4-NEXT: [[TMP3]] = add i64 [[TMP1]], 1
88 ; OPT4-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 8
89 ; OPT4-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]]
93 ; OPT0-LABEL: @memset_size_8(
94 ; OPT0-NEXT: br i1 false, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]]
95 ; OPT0: loadstoreloop:
96 ; OPT0-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ]
97 ; OPT0-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[DST:%.*]], i64 [[TMP1]]
98 ; OPT0-NEXT: store i8 [[VAL:%.*]], i8 addrspace(1)* [[TMP2]]
99 ; OPT0-NEXT: [[TMP3]] = add i64 [[TMP1]], 1
100 ; OPT0-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 8
101 ; OPT0-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]]
103 ; OPT0-NEXT: ret void
105 ; OPT_NEG-LABEL: @memset_size_8(
106 ; OPT_NEG-NEXT: br i1 false, label [[SPLIT:%.*]], label [[LOADSTORELOOP:%.*]]
107 ; OPT_NEG: loadstoreloop:
108 ; OPT_NEG-NEXT: [[TMP1:%.*]] = phi i64 [ 0, [[TMP0:%.*]] ], [ [[TMP3:%.*]], [[LOADSTORELOOP]] ]
109 ; OPT_NEG-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[DST:%.*]], i64 [[TMP1]]
110 ; OPT_NEG-NEXT: store i8 [[VAL:%.*]], i8 addrspace(1)* [[TMP2]]
111 ; OPT_NEG-NEXT: [[TMP3]] = add i64 [[TMP1]], 1
112 ; OPT_NEG-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 8
113 ; OPT_NEG-NEXT: br i1 [[TMP4]], label [[LOADSTORELOOP]], label [[SPLIT]]
115 ; OPT_NEG-NEXT: ret void
117 call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 8, i1 false)
121 declare void @llvm.memset.p1i8.i64(i8 addrspace(1)* nocapture writeonly, i8, i64, i1 immarg) #0
123 attributes #0 = { argmemonly nounwind willreturn writeonly }