3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
6 define void @f1(<16 x i8> %val, <16 x i8> *%ptr) {
8 ; CHECK: vst %v24, 0(%r2), 3
10 store <16 x i8> %val, <16 x i8> *%ptr
15 define void @f2(<8 x i16> %val, <8 x i16> *%ptr) {
17 ; CHECK: vst %v24, 0(%r2), 3
19 store <8 x i16> %val, <8 x i16> *%ptr
24 define void @f3(<4 x i32> %val, <4 x i32> *%ptr) {
26 ; CHECK: vst %v24, 0(%r2), 3
28 store <4 x i32> %val, <4 x i32> *%ptr
33 define void @f4(<2 x i64> %val, <2 x i64> *%ptr) {
35 ; CHECK: vst %v24, 0(%r2), 3
37 store <2 x i64> %val, <2 x i64> *%ptr
42 define void @f5(<4 x float> %val, <4 x float> *%ptr) {
44 ; CHECK: vst %v24, 0(%r2), 3
46 store <4 x float> %val, <4 x float> *%ptr
51 define void @f6(<2 x double> %val, <2 x double> *%ptr) {
53 ; CHECK: vst %v24, 0(%r2), 3
55 store <2 x double> %val, <2 x double> *%ptr
59 ; Test the highest aligned in-range offset.
60 define void @f7(<16 x i8> %val, <16 x i8> *%base) {
62 ; CHECK: vst %v24, 4080(%r2), 3
64 %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 255
65 store <16 x i8> %val, <16 x i8> *%ptr
69 ; Test the highest unaligned in-range offset.
70 define void @f8(<16 x i8> %val, i8 *%base) {
72 ; CHECK: vst %v24, 4095(%r2)
74 %addr = getelementptr i8, i8 *%base, i64 4095
75 %ptr = bitcast i8 *%addr to <16 x i8> *
76 store <16 x i8> %val, <16 x i8> *%ptr, align 1
80 ; Test the next offset up, which requires separate address logic,
81 define void @f9(<16 x i8> %val, <16 x i8> *%base) {
83 ; CHECK: aghi %r2, 4096
84 ; CHECK: vst %v24, 0(%r2), 3
86 %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 256
87 store <16 x i8> %val, <16 x i8> *%ptr
91 ; Test negative offsets, which also require separate address logic,
92 define void @f10(<16 x i8> %val, <16 x i8> *%base) {
94 ; CHECK: aghi %r2, -16
95 ; CHECK: vst %v24, 0(%r2), 3
97 %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 -1
98 store <16 x i8> %val, <16 x i8> *%ptr
102 ; Check that indexes are allowed.
103 define void @f11(<16 x i8> %val, i8 *%base, i64 %index) {
105 ; CHECK: vst %v24, 0(%r3,%r2)
107 %addr = getelementptr i8, i8 *%base, i64 %index
108 %ptr = bitcast i8 *%addr to <16 x i8> *
109 store <16 x i8> %val, <16 x i8> *%ptr, align 1
114 define void @f12(<2 x i8> %val, <2 x i8> *%ptr) {
116 ; CHECK: vsteh %v24, 0(%r2), 0
118 store <2 x i8> %val, <2 x i8> *%ptr
123 define void @f13(<4 x i8> %val, <4 x i8> *%ptr) {
125 ; CHECK: vstef %v24, 0(%r2)
127 store <4 x i8> %val, <4 x i8> *%ptr
132 define void @f14(<8 x i8> %val, <8 x i8> *%ptr) {
134 ; CHECK: vsteg %v24, 0(%r2)
136 store <8 x i8> %val, <8 x i8> *%ptr
141 define void @f15(<2 x i16> %val, <2 x i16> *%ptr) {
143 ; CHECK: vstef %v24, 0(%r2), 0
145 store <2 x i16> %val, <2 x i16> *%ptr
150 define void @f16(<4 x i16> %val, <4 x i16> *%ptr) {
152 ; CHECK: vsteg %v24, 0(%r2)
154 store <4 x i16> %val, <4 x i16> *%ptr
159 define void @f17(<2 x i32> %val, <2 x i32> *%ptr) {
161 ; CHECK: vsteg %v24, 0(%r2), 0
163 store <2 x i32> %val, <2 x i32> *%ptr
168 define void @f18(<2 x float> %val, <2 x float> *%ptr) {
170 ; CHECK: vsteg %v24, 0(%r2), 0
172 store <2 x float> %val, <2 x float> *%ptr
176 ; Test quadword-aligned stores.
177 define void @f19(<16 x i8> %val, <16 x i8> *%ptr) {
179 ; CHECK: vst %v24, 0(%r2), 4
181 store <16 x i8> %val, <16 x i8> *%ptr, align 16
185 ; Test that the alignment hint for VST is emitted also when CFG optimizer
186 ; replaces two VSTs with just one that then carries two memoperands.
189 ; CHECK: vst %v0, 0(%r1), 3
192 switch i32 undef, label %exit [
198 %C1 = call i64* @foo()
199 %I1 = insertelement <2 x i64*> poison, i64* %C1, i64 0
200 %S1 = shufflevector <2 x i64*> %I1, <2 x i64*> poison, <2 x i32> zeroinitializer
201 store <2 x i64*> %S1, <2 x i64*>* undef, align 8
205 %C2 = call i64* @foo()
206 %I2 = insertelement <2 x i64*> poison, i64* %C2, i64 0
207 %S2 = shufflevector <2 x i64*> %I2, <2 x i64*> poison, <2 x i32> zeroinitializer
208 %U = bitcast i64** undef to <2 x i64*>*
209 store <2 x i64*> %S2, <2 x i64*>* %U, align 8