1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes='sroa<preserve-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-PRESERVE-CFG
3 ; RUN: opt < %s -passes='sroa<modify-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG
5 ; This test checks that SROA runs mem2reg on scalable vectors.
7 define <vscale x 16 x i1> @alloca_nxv16i1(<vscale x 16 x i1> %pg) {
8 ; CHECK-LABEL: @alloca_nxv16i1(
9 ; CHECK-NEXT: ret <vscale x 16 x i1> [[PG:%.*]]
11 %pg.addr = alloca <vscale x 16 x i1>
12 store <vscale x 16 x i1> %pg, ptr %pg.addr
13 %1 = load <vscale x 16 x i1>, ptr %pg.addr
14 ret <vscale x 16 x i1> %1
17 define <vscale x 16 x i8> @alloca_nxv16i8(<vscale x 16 x i8> %vec) {
18 ; CHECK-LABEL: @alloca_nxv16i8(
19 ; CHECK-NEXT: ret <vscale x 16 x i8> [[VEC:%.*]]
21 %vec.addr = alloca <vscale x 16 x i8>
22 store <vscale x 16 x i8> %vec, ptr %vec.addr
23 %1 = load <vscale x 16 x i8>, ptr %vec.addr
24 ret <vscale x 16 x i8> %1
27 ; Test scalable alloca that can't be promoted. Mem2Reg only considers
28 ; non-volatile loads and stores for promotion.
29 define <vscale x 16 x i8> @unpromotable_alloca(<vscale x 16 x i8> %vec) {
30 ; CHECK-LABEL: @unpromotable_alloca(
31 ; CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca <vscale x 16 x i8>, align 16
32 ; CHECK-NEXT: store volatile <vscale x 16 x i8> [[VEC:%.*]], ptr [[VEC_ADDR]], align 16
33 ; CHECK-NEXT: [[TMP1:%.*]] = load volatile <vscale x 16 x i8>, ptr [[VEC_ADDR]], align 16
34 ; CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
36 %vec.addr = alloca <vscale x 16 x i8>
37 store volatile <vscale x 16 x i8> %vec, ptr %vec.addr
38 %1 = load volatile <vscale x 16 x i8>, ptr %vec.addr
39 ret <vscale x 16 x i8> %1
42 ; Test we bail out when using an alloca of a fixed-length vector (VLS) that was
43 ; bitcasted to a scalable vector.
44 define <vscale x 4 x i32> @cast_alloca_to_svint32_t(<vscale x 4 x i32> %type.coerce) {
45 ; CHECK-LABEL: @cast_alloca_to_svint32_t(
46 ; CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 64
47 ; CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 64
48 ; CHECK-NEXT: store <vscale x 4 x i32> [[TYPE_COERCE:%.*]], ptr [[TYPE]], align 16
49 ; CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, ptr [[TYPE]], align 64
50 ; CHECK-NEXT: store <16 x i32> [[TYPE1]], ptr [[TYPE_ADDR]], align 64
51 ; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[TYPE_ADDR]], align 64
52 ; CHECK-NEXT: [[TMP2:%.*]] = load <vscale x 4 x i32>, ptr [[TYPE_ADDR]], align 16
53 ; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
55 %type = alloca <16 x i32>
56 %type.addr = alloca <16 x i32>
57 store <vscale x 4 x i32> %type.coerce, ptr %type
58 %type1 = load <16 x i32>, ptr %type
59 store <16 x i32> %type1, ptr %type.addr
60 %1 = load <16 x i32>, ptr %type.addr
61 %2 = load <vscale x 4 x i32>, ptr %type.addr
62 ret <vscale x 4 x i32> %2
65 ; When casting from VLA to VLS via memory check we bail out when producing a
66 ; GEP where the element type is a scalable vector.
67 define <vscale x 4 x i32> @cast_alloca_from_svint32_t() {
68 ; CHECK-LABEL: @cast_alloca_from_svint32_t(
69 ; CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i32>, align 16
70 ; CHECK-NEXT: store <16 x i32> undef, ptr [[RETVAL_COERCE]], align 16
71 ; CHECK-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[RETVAL_COERCE]], align 16
72 ; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
74 %retval = alloca <16 x i32>
75 %retval.coerce = alloca <vscale x 4 x i32>
76 call void @llvm.memcpy.p0.p0.i64(ptr align 16 %retval.coerce, ptr align 16 %retval, i64 64, i1 false)
77 %1 = load <vscale x 4 x i32>, ptr %retval.coerce
78 ret <vscale x 4 x i32> %1
81 ; Test we bail out when using an alloca of a fixed-length vector (VLS) that was
82 ; bitcasted to a scalable vector.
83 define void @select_load_alloca_to_svdouble_t() {
84 ; CHECK-LABEL: @select_load_alloca_to_svdouble_t(
85 ; CHECK-NEXT: [[Z:%.*]] = alloca <16 x half>, align 32
86 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 0, 0
87 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], ptr [[Z]], ptr null
88 ; CHECK-NEXT: [[VAL:%.*]] = load <vscale x 2 x double>, ptr [[COND]], align 16
89 ; CHECK-NEXT: ret void
91 %z = alloca <16 x half>
92 %cmp = icmp eq i32 0, 0
93 %cond = select i1 %cmp, ptr %z, ptr null
94 %val = load <vscale x 2 x double>, ptr %cond, align 16
98 define void @select_store_alloca_to_svdouble_t(<vscale x 2 x double> %val) {
99 ; CHECK-LABEL: @select_store_alloca_to_svdouble_t(
100 ; CHECK-NEXT: [[Z:%.*]] = alloca <16 x half>, align 32
101 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 0, 0
102 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], ptr [[Z]], ptr null
103 ; CHECK-NEXT: store <vscale x 2 x double> [[VAL:%.*]], ptr [[COND]], align 16
104 ; CHECK-NEXT: ret void
106 %z = alloca <16 x half>
107 %cmp = icmp eq i32 0, 0
108 %cond = select i1 %cmp, ptr %z, ptr null
109 store <vscale x 2 x double> %val, ptr %cond, align 16
113 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
114 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
115 ; CHECK-MODIFY-CFG: {{.*}}
116 ; CHECK-PRESERVE-CFG: {{.*}}