1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s --mattr=+mve.fp,+fp64 -o - | FileCheck %s
4 target triple = "thumbv8.1m.main-none-none-eabi"
7 ; Expected to not transform
8 define arm_aapcs_vfpcc <2 x double> @complex_add_v2f64(<2 x double> %a, <2 x double> %b) {
9 ; CHECK-LABEL: complex_add_v2f64:
10 ; CHECK: @ %bb.0: @ %entry
11 ; CHECK-NEXT: vadd.f64 d3, d3, d0
12 ; CHECK-NEXT: vsub.f64 d2, d2, d1
13 ; CHECK-NEXT: vmov q0, q1
16 %a.real = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <1 x i32> <i32 0>
17 %a.imag = shufflevector <2 x double> %a, <2 x double> zeroinitializer, <1 x i32> <i32 1>
18 %b.real = shufflevector <2 x double> %b, <2 x double> zeroinitializer, <1 x i32> <i32 0>
19 %b.imag = shufflevector <2 x double> %b, <2 x double> zeroinitializer, <1 x i32> <i32 1>
20 %0 = fsub fast <1 x double> %b.real, %a.imag
21 %1 = fadd fast <1 x double> %b.imag, %a.real
22 %interleaved.vec = shufflevector <1 x double> %0, <1 x double> %1, <2 x i32> <i32 0, i32 1>
23 ret <2 x double> %interleaved.vec
26 ; Expected to not transform
27 define arm_aapcs_vfpcc <4 x double> @complex_add_v4f64(<4 x double> %a, <4 x double> %b) {
28 ; CHECK-LABEL: complex_add_v4f64:
29 ; CHECK: @ %bb.0: @ %entry
30 ; CHECK-NEXT: vadd.f64 d5, d5, d0
31 ; CHECK-NEXT: vsub.f64 d4, d4, d1
32 ; CHECK-NEXT: vadd.f64 d7, d7, d2
33 ; CHECK-NEXT: vmov q0, q2
34 ; CHECK-NEXT: vsub.f64 d6, d6, d3
35 ; CHECK-NEXT: vmov q1, q3
38 %a.real = shufflevector <4 x double> %a, <4 x double> zeroinitializer, <2 x i32> <i32 0, i32 2>
39 %a.imag = shufflevector <4 x double> %a, <4 x double> zeroinitializer, <2 x i32> <i32 1, i32 3>
40 %b.real = shufflevector <4 x double> %b, <4 x double> zeroinitializer, <2 x i32> <i32 0, i32 2>
41 %b.imag = shufflevector <4 x double> %b, <4 x double> zeroinitializer, <2 x i32> <i32 1, i32 3>
42 %0 = fsub fast <2 x double> %b.real, %a.imag
43 %1 = fadd fast <2 x double> %b.imag, %a.real
44 %interleaved.vec = shufflevector <2 x double> %0, <2 x double> %1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
45 ret <4 x double> %interleaved.vec
48 ; Expected to not transform
49 define arm_aapcs_vfpcc <8 x double> @complex_add_v8f64(<8 x double> %a, <8 x double> %b) {
50 ; CHECK-LABEL: complex_add_v8f64:
51 ; CHECK: @ %bb.0: @ %entry
52 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
53 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
54 ; CHECK-NEXT: add r0, sp, #32
55 ; CHECK-NEXT: vmov q4, q1
56 ; CHECK-NEXT: vmov q1, q0
57 ; CHECK-NEXT: vldrw.u32 q0, [r0]
58 ; CHECK-NEXT: add r0, sp, #48
59 ; CHECK-NEXT: vadd.f64 d1, d1, d2
60 ; CHECK-NEXT: vsub.f64 d0, d0, d3
61 ; CHECK-NEXT: vldrw.u32 q1, [r0]
62 ; CHECK-NEXT: add r0, sp, #64
63 ; CHECK-NEXT: vadd.f64 d3, d3, d8
64 ; CHECK-NEXT: vsub.f64 d2, d2, d9
65 ; CHECK-NEXT: vldrw.u32 q4, [r0]
66 ; CHECK-NEXT: add r0, sp, #80
67 ; CHECK-NEXT: vadd.f64 d9, d9, d4
68 ; CHECK-NEXT: vsub.f64 d8, d8, d5
69 ; CHECK-NEXT: vldrw.u32 q2, [r0]
70 ; CHECK-NEXT: vadd.f64 d11, d5, d6
71 ; CHECK-NEXT: vsub.f64 d10, d4, d7
72 ; CHECK-NEXT: vmov q2, q4
73 ; CHECK-NEXT: vmov q3, q5
74 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
77 %a.real = shufflevector <8 x double> %a, <8 x double> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
78 %a.imag = shufflevector <8 x double> %a, <8 x double> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
79 %b.real = shufflevector <8 x double> %b, <8 x double> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
80 %b.imag = shufflevector <8 x double> %b, <8 x double> zeroinitializer, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
81 %0 = fsub fast <4 x double> %b.real, %a.imag
82 %1 = fadd fast <4 x double> %b.imag, %a.real
83 %interleaved.vec = shufflevector <4 x double> %0, <4 x double> %1, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
84 ret <8 x double> %interleaved.vec