1 ; RUN: llc -mtriple=thumbv7k-apple-watchos2.0 -o - %s | FileCheck %s
3 %struct = type { i8, i64, i8, double, i8, <2 x float>, i8, <4 x float> }
5 define i32 @test_i64_align() "frame-pointer"="all" {
6 ; CHECK-LABEL: test_i64_align:
8 ret i32 ptrtoint(ptr getelementptr(%struct, ptr null, i32 0, i32 1) to i32)
11 define i32 @test_f64_align() "frame-pointer"="all" {
12 ; CHECK-LABEL: test_f64_align:
14 ret i32 ptrtoint(ptr getelementptr(%struct, ptr null, i32 0, i32 3) to i32)
17 define i32 @test_v2f32_align() "frame-pointer"="all" {
18 ; CHECK-LABEL: test_v2f32_align:
20 ret i32 ptrtoint(ptr getelementptr(%struct, ptr null, i32 0, i32 5) to i32)
23 define i32 @test_v4f32_align() "frame-pointer"="all" {
24 ; CHECK-LABEL: test_v4f32_align:
26 ret i32 ptrtoint(ptr getelementptr(%struct, ptr null, i32 0, i32 7) to i32)
29 ; Key point here is than an extra register has to be saved so that the DPRs end
30 ; up in an aligned location (as prologue/epilogue inserter had calculated).
31 define void @test_dpr_unwind_align() "frame-pointer"="all" {
32 ; CHECK-LABEL: test_dpr_unwind_align:
33 ; CHECK: push {r5, r6, r7, lr}
35 ; CHECK: vpush {d8, d9}
36 ; CHECK: .cfi_offset d9, -24
37 ; CHECK: .cfi_offset d8, -32
39 ; CHECK: bl _test_i64_align
41 ; CHECK: vpop {d8, d9}
43 ; CHECK: pop {r5, r6, r7, pc}
45 call void asm sideeffect "", "~{r6},~{d8},~{d9}"()
48 call i32 @test_i64_align()
52 ; This time, there's no viable way to tack CS-registers onto the list: a real SP
53 ; adjustment needs to be performed to put d8 and d9 where they should be.
54 define void @test_dpr_unwind_align_manually() "frame-pointer"="all" {
55 ; CHECK-LABEL: test_dpr_unwind_align_manually:
56 ; CHECK: push {r4, r5, r6, r7, lr}
58 ; CHECK: push.w {r8, r11}
60 ; CHECK: vpush {d8, d9}
61 ; CHECK: .cfi_offset d9, -40
62 ; CHECK: .cfi_offset d8, -48
64 ; CHECK: bl _test_i64_align
66 ; CHECK: vpop {d8, d9}
68 ; CHECK: pop.w {r8, r11}
69 ; CHECK: pop {r4, r5, r6, r7, pc}
71 call void asm sideeffect "", "~{r4},~{r5},~{r6},~{r7},~{r8},~{d8},~{d9}"()
74 call i32 @test_i64_align()
78 ; If there's only a CS1 area, the sub should be in the right place:
79 define void @test_dpr_unwind_align_just_cs1() "frame-pointer"="all" {
80 ; CHECK-LABEL: test_dpr_unwind_align_just_cs1:
81 ; CHECK: push {r4, r5, r6, r7, lr}
83 ; CHECK: vpush {d8, d9}
84 ; CHECK: .cfi_offset d9, -32
85 ; CHECK: .cfi_offset d8, -40
88 ; CHECK: bl _test_i64_align
90 ; CHECK: vpop {d8, d9}
92 ; CHECK: pop {r4, r5, r6, r7, pc}
94 call void asm sideeffect "", "~{r4},~{r5},~{r6},~{r7},~{d8},~{d9}"()
97 call i32 @test_i64_align()
101 ; If there are no DPRs, we shouldn't try to align the stack in stages anyway
102 define void @test_dpr_unwind_align_no_dprs() "frame-pointer"="all" {
103 ; CHECK-LABEL: test_dpr_unwind_align_no_dprs:
104 ; CHECK: push {r4, r5, r6, r7, lr}
107 ; CHECK: bl _test_i64_align
109 ; CHECK: pop {r4, r5, r6, r7, pc}
111 call void asm sideeffect "", "~{r4},~{r5},~{r6},~{r7}"()
114 call i32 @test_i64_align()
118 ; 128-bit vectors should use 128-bit (i.e. correctly aligned) slots on
120 define <4 x float> @test_v128_stack_pass([8 x double], float, <4 x float> %in) "frame-pointer"="all" {
121 ; CHECK-LABEL: test_v128_stack_pass:
122 ; CHECK: add r[[ADDR:[0-9]+]], sp, #16
123 ; CHECK: vld1.64 {d0, d1}, [r[[ADDR]]:128]
128 declare void @varargs(i32, ...)
130 ; When varargs are enabled, we go down a different route. Still want 128-bit
132 define void @test_v128_stack_pass_varargs(<4 x float> %in) "frame-pointer"="all" {
133 ; CHECK-LABEL: test_v128_stack_pass_varargs:
134 ; CHECK: add r[[ADDR:[0-9]+]], sp, #16
135 ; CHECK: vst1.64 {d0, d1}, [r[[ADDR]]:128]
137 call void(i32, ...) @varargs(i32 undef, [3 x i32] undef, float undef, <4 x float> %in)
141 ; To be compatible with AAPCS's va_start model (store r0-r3 at incoming SP, give
142 ; a single pointer), 64-bit quantities must be pass
143 define i64 @test_64bit_gpr_align(i32, i64 %r2_r3, i32 %sp) "frame-pointer"="all" {
144 ; CHECK-LABEL: test_64bit_gpr_align:
145 ; CHECK: ldr [[RHS:r[0-9]+]], [sp]
146 ; CHECK: adds r0, [[RHS]], r2
147 ; CHECK: adc r1, r3, #0
149 %ext = zext i32 %sp to i64
150 %sum = add i64 %ext, %r2_r3