1 ; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s
4 ; When realign-stack is set to false, make sure we are not creating stack
5 ; objects that are assumed to be 64-byte aligned.
6 @T3_retval = common global <16 x float> zeroinitializer, align 16
8 define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
11 ; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]]
12 ; CHECK: mov r[[R2:[0-9]+]], r[[R1]]
13 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]!
14 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
15 ; CHECK: add r[[R3:[0-9]+]], r[[R1]], #32
16 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
17 ; CHECK: add r[[R3:[0-9]+]], r[[R1]], #48
18 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
19 ; CHECK: mov r[[R2:[0-9]+]], sp
20 ; CHECK: add r[[R3:[0-9]+]], r[[R2]], #48
21 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
22 ; CHECK: add r[[R4:[0-9]+]], r[[R2]], #32
23 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R4]]:128]
24 ; CHECK: mov r[[R5:[0-9]+]], r[[R2]]
25 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R5]]:128]!
26 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R5]]:128]
27 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R5]]:128]
28 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
29 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R4]]:128]
30 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
31 ; CHECK: add r[[R1:[0-9]+]], r0, #48
32 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
33 ; CHECK: add r[[R1:[0-9]+]], r0, #32
34 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
35 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]!
36 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]
37 %retval = alloca <16 x float>, align 16
38 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
39 store <16 x float> %0, <16 x float>* %retval
40 %1 = load <16 x float>, <16 x float>* %retval
41 store <16 x float> %1, <16 x float>* %agg.result, align 16
45 define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
48 ; CHECK: ldr r[[R1:[0-9]+]], [pc, r[[R1]]]
49 ; CHECK: add r[[R2:[0-9]+]], r[[R1]], #48
50 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
51 ; CHECK: add r[[R2:[0-9]+]], r[[R1]], #32
52 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
53 ; CHECK: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]!
54 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
55 ; CHECK: mov r[[R1:[0-9]+]], sp
56 ; CHECK: orr r[[R2:[0-9]+]], r[[R1]], #16
57 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
58 ; CHECK: mov r[[R3:[0-9]+]], #32
59 ; CHECK: mov r[[R9:[0-9]+]], r[[R1]]
60 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R9]]:128], r[[R3]]
61 ; CHECK: mov r[[R3:[0-9]+]], r[[R9]]
62 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]!
63 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
64 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R9]]:128]
65 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R3]]:128]
66 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R2]]:128]
67 ; CHECK: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
68 ; CHECK: add r[[R1:[0-9]+]], r0, #48
69 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
70 ; CHECK: add r[[R1:[0-9]+]], r0, #32
71 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r[[R1]]:128]
72 ; CHECK: vst1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]!
73 ; CHECK: vst1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128]
76 %retval = alloca <16 x float>, align 16
77 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
78 store <16 x float> %0, <16 x float>* %retval
79 %1 = load <16 x float>, <16 x float>* %retval
80 store <16 x float> %1, <16 x float>* %agg.result, align 16