1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-linux-gnu -mcpu=x86-64-v2 -passes=slp-vectorizer -S | FileCheck %s
4 ; FIXME: Ensure llvm.powi.* intrinsics are vectorized.
6 define <2 x double> @PR53887_v2f64(<2 x double> noundef %x) {
7 ; CHECK-LABEL: @PR53887_v2f64(
9 ; CHECK-NEXT: [[TMP0:%.*]] = call fast <2 x double> @llvm.powi.v2f64.i32(<2 x double> [[X:%.*]], i32 6)
10 ; CHECK-NEXT: ret <2 x double> [[TMP0]]
13 %vecext = extractelement <2 x double> %x, i64 0
14 %0 = tail call fast double @llvm.powi.f64.i32(double %vecext, i32 6)
15 %vecinit = insertelement <2 x double> undef, double %0, i64 0
16 %vecext1 = extractelement <2 x double> %x, i64 1
17 %1 = tail call fast double @llvm.powi.f64.i32(double %vecext1, i32 6)
18 %vecinit3 = insertelement <2 x double> %vecinit, double %1, i64 1
19 ret <2 x double> %vecinit3
22 define <4 x double> @PR53887_v4f64(<4 x double> noundef %x) {
23 ; CHECK-LABEL: @PR53887_v4f64(
25 ; CHECK-NEXT: [[TMP0:%.*]] = call fast <4 x double> @llvm.powi.v4f64.i32(<4 x double> [[X:%.*]], i32 6)
26 ; CHECK-NEXT: ret <4 x double> [[TMP0]]
29 %vecext = extractelement <4 x double> %x, i64 0
30 %0 = tail call fast double @llvm.powi.f64.i32(double %vecext, i32 6) #2
31 %vecinit = insertelement <4 x double> undef, double %0, i64 0
32 %vecext1 = extractelement <4 x double> %x, i64 1
33 %1 = tail call fast double @llvm.powi.f64.i32(double %vecext1, i32 6) #2
34 %vecinit3 = insertelement <4 x double> %vecinit, double %1, i64 1
35 %vecext4 = extractelement <4 x double> %x, i64 2
36 %2 = tail call fast double @llvm.powi.f64.i32(double %vecext4, i32 6) #2
37 %vecinit6 = insertelement <4 x double> %vecinit3, double %2, i64 2
38 %vecext7 = extractelement <4 x double> %x, i64 3
39 %3 = tail call fast double @llvm.powi.f64.i32(double %vecext7, i32 6) #2
40 %vecinit9 = insertelement <4 x double> %vecinit6, double %3, i64 3
41 ret <4 x double> %vecinit9
44 declare double @llvm.powi.f64.i32(double, i32)