1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=aarch64 -mattr=+fp16fml -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
4 ; This tests that the fmlal/fmlal2 instructions only accept lo registers for
5 ; the index operand, using inline asm to force the available registers.
7 define <4 x float> @test(ptr %lhs_panel, ptr %rhs_panel, <4 x float> %a) {
9 ; CHECK: // %bb.0: // %entry
10 ; CHECK-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
11 ; CHECK-NEXT: .cfi_def_cfa_offset 16
12 ; CHECK-NEXT: .cfi_offset b8, -16
13 ; CHECK-NEXT: fmov x8, d0
14 ; CHECK-NEXT: ldr q8, [x0]
15 ; CHECK-NEXT: ldr q16, [x1]
16 ; CHECK-NEXT: lsr x9, x8, #32
19 ; CHECK-NEXT: //NO_APP
20 ; CHECK-NEXT: mov w8, w8
21 ; CHECK-NEXT: orr x8, x8, x9, lsl #32
22 ; CHECK-NEXT: fmov d0, x8
23 ; CHECK-NEXT: fmlal v0.4s, v16.4h, v8.h[0]
24 ; CHECK-NEXT: mov v1.16b, v0.16b
25 ; CHECK-NEXT: fmlal2 v1.4s, v16.4h, v8.h[0]
26 ; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s
27 ; CHECK-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
30 %0 = load <8 x half>, ptr %lhs_panel, align 2
31 %1 = load <8 x half>, ptr %rhs_panel, align 2
32 %vecinit91 = shufflevector <8 x half> %0, <8 x half> undef, <8 x i32> zeroinitializer
33 %b = call <4 x float> asm sideeffect "nop", "=r,r,~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7}"(<4 x float> %a) nounwind
34 %vfmlal_low3.i = tail call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %b, <8 x half> %1, <8 x half> %vecinit91)
35 %vfmlal_high3.i = tail call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %vfmlal_low3.i, <8 x half> %1, <8 x half> %vecinit91)
36 %z = fadd <4 x float> %vfmlal_low3.i, %vfmlal_high3.i
40 define void @loop(ptr %out_tile, ptr %lhs_panel, ptr %rhs_panel, i32 noundef %K, i32 noundef %flags) {
42 ; CHECK: // %bb.0: // %entry
43 ; CHECK-NEXT: movi v0.2d, #0000000000000000
44 ; CHECK-NEXT: movi v1.2d, #0000000000000000
45 ; CHECK-NEXT: mov w8, w3
46 ; CHECK-NEXT: .LBB1_1: // %for.body
47 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
48 ; CHECK-NEXT: ldr q2, [x1], #2
49 ; CHECK-NEXT: subs x8, x8, #1
50 ; CHECK-NEXT: ldr q3, [x2], #2
51 ; CHECK-NEXT: fmlal v0.4s, v3.4h, v2.h[0]
52 ; CHECK-NEXT: fmlal2 v1.4s, v3.4h, v2.h[0]
53 ; CHECK-NEXT: b.ne .LBB1_1
54 ; CHECK-NEXT: // %bb.2: // %for.cond.cleanup
55 ; CHECK-NEXT: stp q0, q1, [x0]
58 %wide.trip.count = zext i32 %K to i64
61 for.cond.cleanup: ; preds = %for.body
62 store <4 x float> %vfmlal_low3.i, ptr %out_tile, align 4
63 %add.ptr1399 = getelementptr inbounds float, ptr %out_tile, i64 4
64 store <4 x float> %vfmlal_high3.i, ptr %add.ptr1399, align 4
67 for.body: ; preds = %entry, %for.body
68 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
69 %acc0.01714 = phi <4 x float> [ zeroinitializer, %entry ], [ %vfmlal_low3.i, %for.body ]
70 %acc1.01713 = phi <4 x float> [ zeroinitializer, %entry ], [ %vfmlal_high3.i, %for.body ]
71 %add.ptr = getelementptr inbounds half, ptr %lhs_panel, i64 %indvars.iv
72 %0 = load <8 x half>, ptr %add.ptr, align 2
73 %add.ptr19 = getelementptr inbounds half, ptr %rhs_panel, i64 %indvars.iv
74 %1 = load <8 x half>, ptr %add.ptr19, align 2
75 %vecinit93 = shufflevector <8 x half> %0, <8 x half> undef, <8 x i32> zeroinitializer
76 %vfmlal_low3.i = tail call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %acc0.01714, <8 x half> %1, <8 x half> %vecinit93)
77 %vfmlal_high3.i = tail call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %acc1.01713, <8 x half> %1, <8 x half> %vecinit93)
78 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
79 %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
80 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
83 define void @sink(ptr %out_tile, ptr %lhs_panel, ptr %rhs_panel, i32 noundef %K, i32 noundef %flags, <8 x half> noundef %lhs) {
85 ; CHECK: // %bb.0: // %entry
86 ; CHECK-NEXT: movi v1.2d, #0000000000000000
87 ; CHECK-NEXT: movi v2.2d, #0000000000000000
88 ; CHECK-NEXT: mov w8, w3
89 ; CHECK-NEXT: .LBB2_1: // %for.body
90 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
91 ; CHECK-NEXT: ldr q3, [x2], #2
92 ; CHECK-NEXT: subs x8, x8, #1
93 ; CHECK-NEXT: fmlal v1.4s, v3.4h, v0.h[0]
94 ; CHECK-NEXT: fmlal2 v2.4s, v3.4h, v0.h[0]
95 ; CHECK-NEXT: b.ne .LBB2_1
96 ; CHECK-NEXT: // %bb.2: // %for.cond.cleanup
97 ; CHECK-NEXT: stp q1, q2, [x0]
100 %vecinit89 = shufflevector <8 x half> %lhs, <8 x half> undef, <8 x i32> zeroinitializer
101 %wide.trip.count = zext i32 %K to i64
104 for.cond.cleanup: ; preds = %for.body
105 store <4 x float> %vfmlal_low3.i, ptr %out_tile, align 4
106 %add.ptr1395 = getelementptr inbounds float, ptr %out_tile, i64 4
107 store <4 x float> %vfmlal_high3.i, ptr %add.ptr1395, align 4
110 for.body: ; preds = %entry, %for.body
111 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
112 %acc0.01702 = phi <4 x float> [ zeroinitializer, %entry ], [ %vfmlal_low3.i, %for.body ]
113 %acc1.01701 = phi <4 x float> [ zeroinitializer, %entry ], [ %vfmlal_high3.i, %for.body ]
114 %add.ptr = getelementptr inbounds half, ptr %rhs_panel, i64 %indvars.iv
115 %0 = load <8 x half>, ptr %add.ptr, align 2
116 %vfmlal_low3.i = tail call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %acc0.01702, <8 x half> %0, <8 x half> %vecinit89)
117 %vfmlal_high3.i = tail call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %acc1.01701, <8 x half> %0, <8 x half> %vecinit89)
118 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
119 %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
120 br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
124 declare <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float>, <8 x half>, <8 x half>) #2
125 declare <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float>, <8 x half>, <8 x half>) #2