1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-unknown | FileCheck %s
4 ; This test is reduced from the TSVC evaluation of vectorizers:
5 ; https://github.com/llvm/llvm-test-suite/commits/main/MultiSource/Benchmarks/TSVC/LoopRerolling-flt/tsc.c
8 ; This test is currently getting vectorized with VF=2. We should be able
9 ; to vectorize it with VF=4. Specifically, we should be able to have 1 load of
10 ; <4 x float> instead of 2 loads of <2 x float>, and there should be no need
12 ; The current issue comes from the Left-Hand-Side fmul operands.
13 ; These operands are coming from 4 loads which are not
14 ; contiguous. The score estimation needs to be corrected, so that these 4 loads
15 ; are not selected for vectorization. Instead we should vectorize with
16 ; contiguous loads, from %a plus offsets 0 to 3, or offsets 1 to 4.
18 define void @s116_modified(ptr %a) {
19 ; CHECK-LABEL: @s116_modified(
20 ; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds float, ptr [[GEP1:%.*]], i64 2
21 ; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds float, ptr [[GEP1]], i64 3
22 ; CHECK-NEXT: [[LD0:%.*]] = load float, ptr [[A]], align 4
23 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP1]], align 4
24 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[GEP3]], align 4
25 ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
26 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x float> [[TMP4]], float [[LD0]], i32 1
27 ; CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.vector.insert.v4f32.v2f32(<4 x float> [[TMP5]], <2 x float> [[TMP2]], i64 2)
28 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP6]], <4 x i32> <i32 1, i32 1, i32 5, i32 6>
29 ; CHECK-NEXT: [[TMP8:%.*]] = fmul fast <4 x float> [[TMP6]], [[TMP7]]
30 ; CHECK-NEXT: store <4 x float> [[TMP8]], ptr [[GEP1]], align 4
31 ; CHECK-NEXT: ret void
33 %gep1 = getelementptr inbounds float, ptr %a, i64 1
34 %gep2 = getelementptr inbounds float, ptr %a, i64 2
35 %gep3 = getelementptr inbounds float, ptr %a, i64 3
36 %gep4 = getelementptr inbounds float, ptr %a, i64 4
37 %ld0 = load float, ptr %a
38 %ld1 = load float, ptr %gep1
39 %ld2 = load float, ptr %gep2
40 %ld3 = load float, ptr %gep3
41 %ld4 = load float, ptr %gep4
42 %mul0 = fmul fast float %ld0, %ld1
43 %mul1 = fmul fast float %ld2, %ld1
44 %mul2 = fmul fast float %ld3, %ld2
45 %mul3 = fmul fast float %ld4, %ld3
46 store float %mul0, ptr %a
47 store float %mul1, ptr %gep1
48 store float %mul2, ptr %gep2
49 store float %mul3, ptr %gep3