1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs -tail-predication=enabled -o - %s | FileCheck %s
3 %struct.SpeexPreprocessState_ = type { i32, i32, ptr, ptr }
5 define void @foo(ptr nocapture readonly %st, ptr %x) {
7 ; CHECK: @ %bb.0: @ %entry
8 ; CHECK-NEXT: .save {r4, lr}
9 ; CHECK-NEXT: push {r4, lr}
10 ; CHECK-NEXT: ldrd r12, r2, [r0]
11 ; CHECK-NEXT: ldrd r4, r3, [r0, #8]
12 ; CHECK-NEXT: rsb r12, r12, r2, lsl #1
13 ; CHECK-NEXT: dlstp.16 lr, r12
14 ; CHECK-NEXT: .LBB0_1: @ %do.body
15 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
16 ; CHECK-NEXT: vldrh.u16 q0, [r3], #16
17 ; CHECK-NEXT: vstrh.16 q0, [r4], #16
18 ; CHECK-NEXT: letp lr, .LBB0_1
19 ; CHECK-NEXT: @ %bb.2: @ %do.end
20 ; CHECK-NEXT: ldr r2, [r0]
21 ; CHECK-NEXT: ldr r0, [r0, #8]
22 ; CHECK-NEXT: add.w r0, r0, r12, lsl #1
23 ; CHECK-NEXT: mov.w r3, #6144
24 ; CHECK-NEXT: dlstp.16 lr, r2
25 ; CHECK-NEXT: .LBB0_3: @ %do.body6
26 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
27 ; CHECK-NEXT: vldrh.u16 q0, [r1], #16
28 ; CHECK-NEXT: vcvt.f16.s16 q0, q0
29 ; CHECK-NEXT: vmul.f16 q0, q0, r3
30 ; CHECK-NEXT: vstrh.16 q0, [r0], #16
31 ; CHECK-NEXT: letp lr, .LBB0_3
32 ; CHECK-NEXT: @ %bb.4: @ %do.end13
33 ; CHECK-NEXT: pop {r4, pc}
35 %ps_size = getelementptr inbounds %struct.SpeexPreprocessState_, ptr %st, i32 0, i32 1
36 %0 = load i32, ptr %ps_size, align 4
37 %mul = shl nsw i32 %0, 1
38 %1 = load i32, ptr %st, align 4
39 %sub = sub nsw i32 %mul, %1
40 %inbuf = getelementptr inbounds %struct.SpeexPreprocessState_, ptr %st, i32 0, i32 3
41 %2 = load ptr, ptr %inbuf, align 4
42 %frame = getelementptr inbounds %struct.SpeexPreprocessState_, ptr %st, i32 0, i32 2
43 %3 = load ptr, ptr %frame, align 4
46 do.body: ; preds = %do.body, %entry
47 %pinbuff16.0 = phi ptr [ %2, %entry ], [ %add.ptr, %do.body ]
48 %blkCnt.0 = phi i32 [ %sub, %entry ], [ %sub2, %do.body ]
49 %pframef16.0 = phi ptr [ %3, %entry ], [ %add.ptr1, %do.body ]
50 %4 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.0)
51 %5 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0(ptr %pinbuff16.0, i32 2, <8 x i1> %4, <8 x half> zeroinitializer)
52 tail call void @llvm.masked.store.v8f16.p0(<8 x half> %5, ptr %pframef16.0, i32 2, <8 x i1> %4)
53 %add.ptr = getelementptr inbounds half, ptr %pinbuff16.0, i32 8
54 %add.ptr1 = getelementptr inbounds half, ptr %pframef16.0, i32 8
55 %sub2 = add nsw i32 %blkCnt.0, -8
56 %cmp = icmp sgt i32 %blkCnt.0, 8
57 br i1 %cmp, label %do.body, label %do.end
59 do.end: ; preds = %do.body
60 %6 = load ptr, ptr %frame, align 4
61 %add.ptr4 = getelementptr inbounds half, ptr %6, i32 %sub
62 %7 = load i32, ptr %st, align 4
65 do.body6: ; preds = %do.body6, %do.end
66 %px.0 = phi ptr [ %x, %do.end ], [ %add.ptr8, %do.body6 ]
67 %blkCnt.1 = phi i32 [ %7, %do.end ], [ %sub10, %do.body6 ]
68 %pframef16.1 = phi ptr [ %add.ptr4, %do.end ], [ %add.ptr9, %do.body6 ]
69 %8 = tail call <8 x i1> @llvm.arm.mve.vctp16(i32 %blkCnt.1)
70 %9 = tail call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %px.0, i32 2, <8 x i1> %8, <8 x i16> zeroinitializer)
71 %10 = tail call fast <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16> %9, i32 0, <8 x i1> %8, <8 x half> undef)
72 %11 = tail call fast <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half> %10, <8 x half> <half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800, half 0xH1800>, <8 x i1> %8, <8 x half> undef)
73 tail call void @llvm.masked.store.v8f16.p0(<8 x half> %11, ptr %pframef16.1, i32 2, <8 x i1> %8)
74 %add.ptr8 = getelementptr inbounds i16, ptr %px.0, i32 8
75 %add.ptr9 = getelementptr inbounds half, ptr %pframef16.1, i32 8
76 %sub10 = add nsw i32 %blkCnt.1, -8
77 %cmp12 = icmp sgt i32 %blkCnt.1, 8
78 br i1 %cmp12, label %do.body6, label %do.end13
80 do.end13: ; preds = %do.body6
84 declare <8 x i1> @llvm.arm.mve.vctp16(i32)
86 declare <8 x half> @llvm.masked.load.v8f16.p0(ptr, i32 immarg, <8 x i1>, <8 x half>)
88 declare void @llvm.masked.store.v8f16.p0(<8 x half>, ptr, i32 immarg, <8 x i1>)
90 declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
92 declare <8 x half> @llvm.arm.mve.vcvt.fp.int.predicated.v8f16.v8i16.v8i1(<8 x i16>, i32, <8 x i1>, <8 x half>)
94 declare <8 x half> @llvm.arm.mve.mul.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>, <8 x half>)