1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -sroa -S | FileCheck %s
3 target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-f80:128-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
5 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind
6 declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
7 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
9 ; This tests that allocas are not split into slices that are not byte width multiple
10 define void @no_split_on_non_byte_width(i32) {
11 ; CHECK-LABEL: @no_split_on_non_byte_width(
12 ; CHECK-NEXT: [[ARG_SROA_0:%.*]] = alloca i8, align 8
13 ; CHECK-NEXT: [[ARG_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[TMP0:%.*]] to i8
14 ; CHECK-NEXT: store i8 [[ARG_SROA_0_0_EXTRACT_TRUNC]], i8* [[ARG_SROA_0]], align 8
15 ; CHECK-NEXT: [[ARG_SROA_3_0_EXTRACT_SHIFT:%.*]] = lshr i32 [[TMP0]], 8
16 ; CHECK-NEXT: [[ARG_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i32 [[ARG_SROA_3_0_EXTRACT_SHIFT]] to i24
17 ; CHECK-NEXT: br label [[LOAD_I32:%.*]]
19 ; CHECK-NEXT: [[ARG_SROA_0_0_ARG_SROA_0_0_R01:%.*]] = load i8, i8* [[ARG_SROA_0]], align 8
20 ; CHECK-NEXT: br label [[LOAD_I1:%.*]]
22 ; CHECK-NEXT: [[ARG_SROA_0_0_P1_SROA_CAST4:%.*]] = bitcast i8* [[ARG_SROA_0]] to i1*
23 ; CHECK-NEXT: [[ARG_SROA_0_0_ARG_SROA_0_0_T1:%.*]] = load i1, i1* [[ARG_SROA_0_0_P1_SROA_CAST4]], align 8
24 ; CHECK-NEXT: ret void
26 %arg = alloca i32 , align 8
27 store i32 %0, i32* %arg
31 %r0 = load i32, i32* %arg
35 %p1 = bitcast i32* %arg to i1*
36 %t1 = load i1, i1* %p1
40 ; PR18726: Check that we use memcpy and memset to fill out padding when we have
41 ; a slice with a simple single type whose store size is smaller than the slice
44 %union.Foo = type { x86_fp80, i64, i64 }
46 @foo_copy_source = external constant %union.Foo
47 @i64_sink = global i64 0
49 define void @memcpy_fp80_padding() {
50 ; CHECK-LABEL: @memcpy_fp80_padding(
51 ; CHECK-NEXT: [[X_SROA_0:%.*]] = alloca x86_fp80, align 16
52 ; CHECK-NEXT: [[X_SROA_0_0_X_I8_SROA_CAST:%.*]] = bitcast x86_fp80* [[X_SROA_0]] to i8*
53 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[X_SROA_0_0_X_I8_SROA_CAST]], i8* align 16 bitcast (%union.Foo* @foo_copy_source to i8*), i32 16, i1 false)
54 ; CHECK-NEXT: [[X_SROA_1_0_COPYLOAD:%.*]] = load i64, i64* getelementptr inbounds ([[UNION_FOO:%.*]], %union.Foo* @foo_copy_source, i64 0, i32 1), align 16
55 ; CHECK-NEXT: [[X_SROA_2_0_COPYLOAD:%.*]] = load i64, i64* getelementptr inbounds ([[UNION_FOO]], %union.Foo* @foo_copy_source, i64 0, i32 2), align 8
56 ; CHECK-NEXT: store i64 [[X_SROA_1_0_COPYLOAD]], i64* @i64_sink, align 4
57 ; CHECK-NEXT: ret void
59 %x = alloca %union.Foo
62 %x_i8 = bitcast %union.Foo* %x to i8*
63 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %x_i8, i8* align 16 bitcast (%union.Foo* @foo_copy_source to i8*), i32 32, i1 false)
65 ; Access a slice of the alloca to trigger SROA.
66 %mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
67 %elt = load i64, i64* %mid_p
68 store i64 %elt, i64* @i64_sink
72 define void @memset_fp80_padding() {
73 ; CHECK-LABEL: @memset_fp80_padding(
74 ; CHECK-NEXT: [[X_SROA_0:%.*]] = alloca x86_fp80, align 16
75 ; CHECK-NEXT: [[X_SROA_0_0_X_I8_SROA_CAST1:%.*]] = bitcast x86_fp80* [[X_SROA_0]] to i8*
76 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 16 [[X_SROA_0_0_X_I8_SROA_CAST1]], i8 -1, i32 16, i1 false)
77 ; CHECK-NEXT: store i64 -1, i64* @i64_sink, align 4
78 ; CHECK-NEXT: ret void
80 %x = alloca %union.Foo
83 %x_i8 = bitcast %union.Foo* %x to i8*
84 call void @llvm.memset.p0i8.i32(i8* align 16 %x_i8, i8 -1, i32 32, i1 false)
86 ; Access a slice of the alloca to trigger SROA.
87 %mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
88 %elt = load i64, i64* %mid_p
89 store i64 %elt, i64* @i64_sink
93 %S.vec3float = type { float, float, float }
94 %U.vec3float = type { <4 x float> }
96 declare i32 @memcpy_vec3float_helper(%S.vec3float*)
98 ; PR18726: Check that SROA does not rewrite a 12-byte memcpy into a 16-byte
99 ; vector store, hence accidentally putting gibberish onto the stack.
100 define i32 @memcpy_vec3float_widening(%S.vec3float* %x) {
101 ; CHECK-LABEL: @memcpy_vec3float_widening(
103 ; CHECK-NEXT: [[TMP1_SROA_0_0_TMP1_SROA_0_0__SROA_CAST_SROA_CAST:%.*]] = bitcast %S.vec3float* [[X:%.*]] to <3 x float>*
104 ; CHECK-NEXT: [[TMP1_SROA_0_0_COPYLOAD:%.*]] = load <3 x float>, <3 x float>* [[TMP1_SROA_0_0_TMP1_SROA_0_0__SROA_CAST_SROA_CAST]], align 4
105 ; CHECK-NEXT: [[TMP1_SROA_0_0_VEC_EXPAND:%.*]] = shufflevector <3 x float> [[TMP1_SROA_0_0_COPYLOAD]], <3 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
106 ; CHECK-NEXT: [[TMP1_SROA_0_0_VECBLEND:%.*]] = select <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> [[TMP1_SROA_0_0_VEC_EXPAND]], <4 x float> undef
107 ; CHECK-NEXT: [[TMP2:%.*]] = alloca [[S_VEC3FLOAT:%.*]], align 4
108 ; CHECK-NEXT: [[TMP1_SROA_0_0_TMP1_SROA_0_0__SROA_CAST2_SROA_CAST:%.*]] = bitcast %S.vec3float* [[TMP2]] to <3 x float>*
109 ; CHECK-NEXT: [[TMP1_SROA_0_0_VEC_EXTRACT:%.*]] = shufflevector <4 x float> [[TMP1_SROA_0_0_VECBLEND]], <4 x float> poison, <3 x i32> <i32 0, i32 1, i32 2>
110 ; CHECK-NEXT: store <3 x float> [[TMP1_SROA_0_0_VEC_EXTRACT]], <3 x float>* [[TMP1_SROA_0_0_TMP1_SROA_0_0__SROA_CAST2_SROA_CAST]], align 4
111 ; CHECK-NEXT: [[RESULT:%.*]] = call i32 @memcpy_vec3float_helper(%S.vec3float* [[TMP2]])
112 ; CHECK-NEXT: ret i32 [[RESULT]]
115 ; Create a temporary variable %tmp1 and copy %x[0] into it
116 %tmp1 = alloca %S.vec3float, align 4
117 %0 = bitcast %S.vec3float* %tmp1 to i8*
118 %1 = bitcast %S.vec3float* %x to i8*
119 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %0, i8* align 4 %1, i32 12, i1 false)
121 ; The following block does nothing; but appears to confuse SROA
122 %unused1 = bitcast %S.vec3float* %tmp1 to %U.vec3float*
123 %unused2 = getelementptr inbounds %U.vec3float, %U.vec3float* %unused1, i32 0, i32 0
124 %unused3 = load <4 x float>, <4 x float>* %unused2, align 1
126 ; Create a second temporary and copy %tmp1 into it
127 %tmp2 = alloca %S.vec3float, align 4
128 %2 = bitcast %S.vec3float* %tmp2 to i8*
129 %3 = bitcast %S.vec3float* %tmp1 to i8*
130 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %2, i8* align 4 %3, i32 12, i1 false)
132 %result = call i32 @memcpy_vec3float_helper(%S.vec3float* %tmp2)
136 ; Don't crash on length that is constant expression.
138 define void @PR50888() {
139 ; CHECK-LABEL: @PR50888(
140 ; CHECK-NEXT: [[ARRAY:%.*]] = alloca i8, align 1
141 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[ARRAY]], i8 0, i64 ptrtoint (void ()* @PR50888 to i64), i1 false)
142 ; CHECK-NEXT: ret void
145 call void @llvm.memset.p0i8.i64(i8* align 16 %array, i8 0, i64 ptrtoint (void ()* @PR50888 to i64), i1 false)
149 ; Don't crash on out-of-bounds length.
151 define void @PR50910() {
152 ; CHECK-LABEL: @PR50910(
153 ; CHECK-NEXT: [[T1:%.*]] = alloca i8, i64 1, align 8
154 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[T1]], i8 0, i64 1, i1 false)
155 ; CHECK-NEXT: ret void
157 %t1 = alloca i8, i64 1, align 8
158 call void @llvm.memset.p0i8.i64(i8* align 8 %t1, i8 0, i64 4294967296, i1 false)