1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2 ; RUN: llc -mattr=+sve2,+fp8,+fp8fma < %s | FileCheck %s
3 ; RUN: llc -mattr=+sme,+fp8,+ssve-fp8fma --force-streaming < %s | FileCheck %s
5 target triple = "aarch64-linux"
7 define <vscale x 8 x half> @fmla_2way_bot(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
8 ; CHECK-LABEL: fmla_2way_bot:
10 ; CHECK-NEXT: fmlalb z0.h, z1.b, z2.b
12 %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalb.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2)
13 ret <vscale x 8 x half> %r
16 define <vscale x 8 x half> @fmla_2way_top(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
17 ; CHECK-LABEL: fmla_2way_top:
19 ; CHECK-NEXT: fmlalt z0.h, z1.b, z2.b
21 %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalt.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2)
22 ret <vscale x 8 x half> %r
25 define <vscale x 8 x half> @fmla_2way_bot_lane(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
26 ; CHECK-LABEL: fmla_2way_bot_lane:
28 ; CHECK-NEXT: fmlalb z0.h, z1.b, z2.b[3]
30 %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalb.lane.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3)
31 ret <vscale x 8 x half> %r
34 define <vscale x 8 x half> @fmla_2way_top_lane(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
35 ; CHECK-LABEL: fmla_2way_top_lane:
37 ; CHECK-NEXT: fmlalt z0.h, z1.b, z2.b[3]
39 %r = call <vscale x 8 x half> @llvm.aarch64.sve.fp8.fmlalt.lane.nxv8f16(<vscale x 8 x half> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3)
40 ret <vscale x 8 x half> %r
43 define <vscale x 4 x float> @fmla_4way_bb(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
44 ; CHECK-LABEL: fmla_4way_bb:
46 ; CHECK-NEXT: fmlallbb z0.s, z1.b, z2.b
48 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbb.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2)
49 ret <vscale x 4 x float> %r
52 define <vscale x 4 x float> @fmla_4way_bt(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
53 ; CHECK-LABEL: fmla_4way_bt:
55 ; CHECK-NEXT: fmlallbt z0.s, z1.b, z2.b
57 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbt.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2)
58 ret <vscale x 4 x float> %r
61 define <vscale x 4 x float> @fmla_4way_tb(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
62 ; CHECK-LABEL: fmla_4way_tb:
64 ; CHECK-NEXT: fmlalltb z0.s, z1.b, z2.b
66 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltb.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2)
67 ret <vscale x 4 x float> %r
70 define <vscale x 4 x float> @fmla_4way_tt(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
71 ; CHECK-LABEL: fmla_4way_tt:
73 ; CHECK-NEXT: fmlalltt z0.s, z1.b, z2.b
75 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltt.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2)
76 ret <vscale x 4 x float> %r
79 define <vscale x 4 x float> @fmla_4way_bb_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
80 ; CHECK-LABEL: fmla_4way_bb_lane:
82 ; CHECK-NEXT: fmlallbb z0.s, z1.b, z2.b[3]
84 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbb.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3)
85 ret <vscale x 4 x float> %r
88 define <vscale x 4 x float> @fmla_4way_bt_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
89 ; CHECK-LABEL: fmla_4way_bt_lane:
91 ; CHECK-NEXT: fmlallbt z0.s, z1.b, z2.b[3]
93 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlallbt.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3)
94 ret <vscale x 4 x float> %r
97 define <vscale x 4 x float> @fmla_4way_tb_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
98 ; CHECK-LABEL: fmla_4way_tb_lane:
100 ; CHECK-NEXT: fmlalltb z0.s, z1.b, z2.b[3]
102 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltb.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3)
103 ret <vscale x 4 x float> %r
106 define <vscale x 4 x float> @fmla_4way_tt_lane(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2) {
107 ; CHECK-LABEL: fmla_4way_tt_lane:
109 ; CHECK-NEXT: fmlalltt z0.s, z1.b, z2.b[3]
111 %r = call <vscale x 4 x float> @llvm.aarch64.sve.fp8.fmlalltt.lane.nxv4f32(<vscale x 4 x float> %a, <vscale x 16 x i8> %s1, <vscale x 16 x i8> %s2, i32 3)
112 ret <vscale x 4 x float> %r