1 ; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=apple -aarch64-enable-stp-suppress=false -verify-machineinstrs -asm-verbose=false | FileCheck %s
3 ; CHECK-LABEL: test_strd_sturd:
4 ; CHECK-NEXT: stp d0, d1, [x0, #-8]
6 define void @test_strd_sturd(float* %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
7 %tmp1 = bitcast float* %ptr to <2 x float>*
8 store <2 x float> %v2, <2 x float>* %tmp1, align 16
9 %add.ptr = getelementptr inbounds float, float* %ptr, i64 -2
10 %tmp = bitcast float* %add.ptr to <2 x float>*
11 store <2 x float> %v1, <2 x float>* %tmp, align 16
15 ; CHECK-LABEL: test_sturd_strd:
16 ; CHECK-NEXT: stp d0, d1, [x0, #-8]
18 define void @test_sturd_strd(float* %ptr, <2 x float> %v1, <2 x float> %v2) #0 {
19 %add.ptr = getelementptr inbounds float, float* %ptr, i64 -2
20 %tmp = bitcast float* %add.ptr to <2 x float>*
21 store <2 x float> %v1, <2 x float>* %tmp, align 16
22 %tmp1 = bitcast float* %ptr to <2 x float>*
23 store <2 x float> %v2, <2 x float>* %tmp1, align 16
27 ; CHECK-LABEL: test_strq_sturq:
28 ; CHECK-NEXT: stp q0, q1, [x0, #-16]
30 define void @test_strq_sturq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
31 %tmp1 = bitcast double* %ptr to <2 x double>*
32 store <2 x double> %v2, <2 x double>* %tmp1, align 16
33 %add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
34 %tmp = bitcast double* %add.ptr to <2 x double>*
35 store <2 x double> %v1, <2 x double>* %tmp, align 16
39 ; CHECK-LABEL: test_sturq_strq:
40 ; CHECK-NEXT: stp q0, q1, [x0, #-16]
42 define void @test_sturq_strq(double* %ptr, <2 x double> %v1, <2 x double> %v2) #0 {
43 %add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
44 %tmp = bitcast double* %add.ptr to <2 x double>*
45 store <2 x double> %v1, <2 x double>* %tmp, align 16
46 %tmp1 = bitcast double* %ptr to <2 x double>*
47 store <2 x double> %v2, <2 x double>* %tmp1, align 16
51 ; CHECK-LABEL: test_ldrx_ldurx:
52 ; CHECK-NEXT: ldp [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-8]
53 ; CHECK-NEXT: add x0, [[V0]], [[V1]]
55 define i64 @test_ldrx_ldurx(i64* %p) #0 {
56 %tmp = load i64, i64* %p, align 4
57 %add.ptr = getelementptr inbounds i64, i64* %p, i64 -1
58 %tmp1 = load i64, i64* %add.ptr, align 4
59 %add = add nsw i64 %tmp1, %tmp
63 ; CHECK-LABEL: test_ldurx_ldrx:
64 ; CHECK-NEXT: ldp [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-8]
65 ; CHECK-NEXT: add x0, [[V0]], [[V1]]
67 define i64 @test_ldurx_ldrx(i64* %p) #0 {
68 %add.ptr = getelementptr inbounds i64, i64* %p, i64 -1
69 %tmp1 = load i64, i64* %add.ptr, align 4
70 %tmp = load i64, i64* %p, align 4
71 %add = add nsw i64 %tmp1, %tmp
75 ; CHECK-LABEL: test_ldrsw_ldursw:
76 ; CHECK-NEXT: ldpsw [[V0:x[0-9]+]], [[V1:x[0-9]+]], [x0, #-4]
77 ; CHECK-NEXT: add x0, [[V0]], [[V1]]
79 define i64 @test_ldrsw_ldursw(i32* %p) #0 {
80 %tmp = load i32, i32* %p, align 4
81 %add.ptr = getelementptr inbounds i32, i32* %p, i64 -1
82 %tmp1 = load i32, i32* %add.ptr, align 4
83 %sexttmp = sext i32 %tmp to i64
84 %sexttmp1 = sext i32 %tmp1 to i64
85 %add = add nsw i64 %sexttmp1, %sexttmp
89 ; Also make sure we only match valid offsets.
90 ; CHECK-LABEL: test_ldrq_ldruq_invalidoffset:
91 ; CHECK-NEXT: ldr q[[V0:[0-9]+]], [x0]
92 ; CHECK-NEXT: ldur q[[V1:[0-9]+]], [x0, #24]
93 ; CHECK-NEXT: add.2d v0, v[[V0]], v[[V1]]
95 define <2 x i64> @test_ldrq_ldruq_invalidoffset(i64* %p) #0 {
96 %a1 = bitcast i64* %p to <2 x i64>*
97 %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
98 %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 3
99 %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
100 %tmp2 = load <2 x i64>, <2 x i64>* %a2, align 8
101 %add = add nsw <2 x i64> %tmp1, %tmp2
105 ; Pair an unscaled store with a scaled store where the scaled store has a
106 ; non-zero offset. This should not hit an assert.
107 ; CHECK-LABEL: test_stur_str_no_assert
108 ; CHECK: stp xzr, xzr, [sp, #16]
110 define void @test_stur_str_no_assert() #0 {
112 %a1 = alloca i64, align 4
113 %a2 = alloca [12 x i8], align 4
114 %0 = bitcast i64* %a1 to i8*
115 %C = getelementptr inbounds [12 x i8], [12 x i8]* %a2, i64 0, i64 4
116 %1 = bitcast i8* %C to i64*
117 store i64 0, i64* %1, align 4
118 call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 8, i1 false)
122 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
125 attributes #0 = { nounwind }