1 ; RUN: llc -O0 -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -verify-machineinstrs %s -o %t.out 2> %t.err
2 ; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
3 ; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
4 ; RUN: not --crash llc -global-isel -mtriple aarch64_be %s -o - 2>&1 | FileCheck %s --check-prefix=BIG-ENDIAN
5 ; This file checks that the fallback path to selection dag works.
6 ; The test is fragile in the sense that it must be updated to expose
7 ; something that fails with global-isel.
8 ; When we cannot produce a test case anymore, that means we can remove
11 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
12 target triple = "aarch64--"
14 ; BIG-ENDIAN: unable to translate in big endian mode
15 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %{{[0-9]+}}:_(<28 x s32>) = G_CONCAT_VECTORS %{{[0-9]+}}:_(<4 x s32>), %{{[0-9]+}}:_(<4 x s32>), %{{[0-9]+}}:_(<4 x s32>), %{{[0-9]+}}:_(<4 x s32>), %{{[0-9]+}}:_(<4 x s32>), %{{[0-9]+}}:_(<4 x s32>), %{{[0-9]+}}:_(<4 x s32>) (in function: odd_vector)
16 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
17 ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
18 define void @odd_vector(<7 x i32>* %addr) {
19 %vec = load <7 x i32>, <7 x i32>* %addr
20 store <7 x i32> %vec, <7 x i32>* %addr
24 ; Make sure we don't mess up metadata arguments.
25 declare void @llvm.write_register.i64(metadata, i64)
27 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_WRITE_REGISTER !0, %0:_(s64) (in function: test_write_register_intrin)
28 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_write_register_intrin
29 ; FALLBACK-WITH-REPORT-LABEL: test_write_register_intrin:
30 define void @test_write_register_intrin() {
31 call void @llvm.write_register.i64(metadata !{!"sp"}, i64 0)
35 @_ZTIi = external global i8*
36 declare i32 @__gxx_personality_v0(...)
38 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(<2 x p0>) = G_INSERT_VECTOR_ELT %0:_, %{{[0-9]+}}:_(p0), %{{[0-9]+}}:_(s32) (in function: vector_of_pointers_insertelement)
39 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
40 ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
41 define void @vector_of_pointers_insertelement() {
45 %dummy = insertelement <2 x i16*> %vec, i16* null, i32 0
46 store <2 x i16*> %dummy, <2 x i16*>* undef
50 %vec = load <2 x i16*>, <2 x i16*>* undef
54 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: RET_ReallyLR implicit $x0 (in function: strict_align_feature)
55 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for strict_align_feature
56 ; FALLBACK-WITH-REPORT-OUT-LABEL: strict_align_feature
57 define i64 @strict_align_feature(i64* %p) #0 {
58 %x = load i64, i64* %p, align 1
62 attributes #0 = { "target-features"="+strict-align" }
64 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call
65 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for direct_mem
66 ; FALLBACK-WITH-REPORT-OUT-LABEL: direct_mem
67 define void @direct_mem(i32 %x, i32 %y) {
69 tail call void asm sideeffect "", "imr,imr,~{memory}"(i32 %x, i32 %y)
73 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower function{{.*}}scalable_arg
74 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_arg
75 define <vscale x 16 x i8> @scalable_arg(<vscale x 16 x i1> %pred, i8* %addr) #1 {
76 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
77 ret <vscale x 16 x i8> %res
80 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to lower function{{.*}}scalable_ret
81 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_ret
82 define <vscale x 16 x i8> @scalable_ret(i8* %addr) #1 {
83 %pred = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 0)
84 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
85 ret <vscale x 16 x i8> %res
88 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction{{.*}}scalable_call
89 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_call
90 define i8 @scalable_call(i8* %addr) #1 {
91 %pred = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 0)
92 %vec = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
93 %res = extractelement <vscale x 16 x i8> %vec, i32 0
97 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction{{.*}}scalable_alloca
98 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_alloca
99 define void @scalable_alloca() #1 {
100 %local0 = alloca <vscale x 16 x i8>
101 load volatile <vscale x 16 x i8>, <vscale x 16 x i8>* %local0
105 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction{{.*}}asm_indirect_output
106 ; FALLBACK-WITH-REPORT-OUT-LABEL: asm_indirect_output
107 define void @asm_indirect_output() {
109 %ap = alloca i8*, align 8
110 %0 = load i8*, i8** %ap, align 8
111 call void asm sideeffect "", "=*r|m,0,~{memory}"(i8** %ap, i8* %0)
115 %struct.foo = type { [8 x i64] }
117 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction:{{.*}}ld64b{{.*}}asm_output_ls64
118 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for asm_output_ls64
119 ; FALLBACK-WITH-REPORT-OUT-LABEL: asm_output_ls64
120 define void @asm_output_ls64(%struct.foo* %output, i8* %addr) #2 {
122 %val = call i512 asm sideeffect "ld64b $0,[$1]", "=r,r,~{memory}"(i8* %addr)
123 %outcast = bitcast %struct.foo* %output to i512*
124 store i512 %val, i512* %outcast, align 8
128 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction:{{.*}}st64b{{.*}}asm_input_ls64
129 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for asm_input_ls64
130 ; FALLBACK-WITH-REPORT-OUT-LABEL: asm_input_ls64
131 define void @asm_input_ls64(%struct.foo* %input, i8* %addr) #2 {
133 %incast = bitcast %struct.foo* %input to i512*
134 %val = load i512, i512* %incast, align 8
135 call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 %val, i8* %addr)
139 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %4:_(s128), %5:_(s1) = G_UMULO %0:_, %6:_ (in function: umul_s128)
140 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for umul_s128
141 ; FALLBACK-WITH-REPORT-OUT-LABEL: umul_s128
142 declare {i128, i1} @llvm.umul.with.overflow.i128(i128, i128) nounwind readnone
143 define zeroext i1 @umul_s128(i128 %v1, i128* %res) {
145 %t = call {i128, i1} @llvm.umul.with.overflow.i128(i128 %v1, i128 2)
146 %val = extractvalue {i128, i1} %t, 0
147 %obit = extractvalue {i128, i1} %t, 1
148 store i128 %val, i128* %res
152 attributes #1 = { "target-features"="+sve" }
153 attributes #2 = { "target-features"="+ls64" }
155 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
156 declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)