1 ; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s
4 ; When realign-stack is set to false, make sure we are not creating stack
5 ; objects that are assumed to be 64-byte aligned.
6 @T3_retval = common global <16 x float> zeroinitializer, align 16
8 define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
11 ; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]]
12 ; CHECK: mov r[[R2:[0-9]+]], r[[R1]]
13 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]!
14 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
15 ; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48
16 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
17 ; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32
18 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
19 ; CHECK: mov r[[R1:[0-9]+]], #32
20 ; CHECK: mov r[[R2:[0-9]+]], sp
21 ; CHECK: mov r[[R3:[0-9]+]], r[[R2]]
22 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128], r[[R1]]
23 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
24 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]!
25 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
26 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]!
27 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
28 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
29 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
30 ; CHECK: add r[[R1:[0-9]+]], r0, #48
31 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
32 ; CHECK: add r[[R1:[0-9]+]], r0, #32
33 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
34 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]!
35 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]
36 %retval = alloca <16 x float>, align 16
37 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
38 store <16 x float> %0, <16 x float>* %retval
39 %1 = load <16 x float>, <16 x float>* %retval
40 store <16 x float> %1, <16 x float>* %agg.result, align 16
44 define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
47 ; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]]
48 ; CHECK: mov r[[R2:[0-9]+]], r[[R1]]
49 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]!
50 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
51 ; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48
52 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
53 ; CHECK: add r[[R1:[0-9]+]], r[[R1]], #32
54 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
55 ; CHECK: mov r[[R1:[0-9]+]], #32
56 ; CHECK: mov r[[R2:[0-9]+]], sp
57 ; CHECK: mov r[[R3:[0-9]+]], r[[R2]]
58 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128], r[[R1]]
59 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
60 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]!
61 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
62 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]!
63 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
64 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
65 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
66 ; CHECK: add r[[R1:[0-9]+]], r0, #48
67 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
68 ; CHECK: add r[[R1:[0-9]+]], r0, #32
69 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
70 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]!
71 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]
74 %retval = alloca <16 x float>, align 16
75 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
76 store <16 x float> %0, <16 x float>* %retval
77 %1 = load <16 x float>, <16 x float>* %retval
78 store <16 x float> %1, <16 x float>* %agg.result, align 16