1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
4 declare <2 x i32> @llvm.vp.add.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
5 declare <2 x i32> @llvm.vp.sub.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
7 declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
8 declare <2 x i8> @llvm.vp.sub.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
10 ; Constant folding should just work.
11 define <2 x i32> @constant_vp_add(<2 x i1> %mask, i32 %evl) {
12 ; CHECK-LABEL: @constant_vp_add(
13 ; CHECK-NEXT: [[Q:%.*]] = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> <i32 3, i32 3>, <2 x i32> <i32 7, i32 7>, <2 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
14 ; CHECK-NEXT: ret <2 x i32> [[Q]]
16 %Q = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> <i32 3, i32 3>, <2 x i32> <i32 7, i32 7>, <2 x i1> %mask, i32 %evl)
20 ; Simplifying pure VP intrinsic patterns.
21 define <2 x i32> @common_sub_operand(<2 x i32> %X, <2 x i32> %Y, <2 x i1> %mask, i32 %evl) {
22 ; CHECK-LABEL: @common_sub_operand(
23 ; CHECK-NEXT: [[Z:%.*]] = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]], <2 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
24 ; CHECK-NEXT: [[Q:%.*]] = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> [[Z]], <2 x i32> [[Y]], <2 x i1> [[MASK]], i32 [[EVL]])
25 ; CHECK-NEXT: ret <2 x i32> [[Q]]
27 ; %Z = sub i32 %X, %Y, vp(%mask, %evl)
28 %Z = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %X, <2 x i32> %Y, <2 x i1> %mask, i32 %evl)
29 ; %Q = add i32 %Z, %Y, vp(%mask, %evl)
30 %Q = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %Z, <2 x i32> %Y, <2 x i1> %mask, i32 %evl)
34 ; Mixing regular SIMD with vp intrinsics (vp add match root).
35 define <2 x i32> @common_sub_operand_vproot(<2 x i32> %X, <2 x i32> %Y, <2 x i1> %mask, i32 %evl) {
36 ; CHECK-LABEL: @common_sub_operand_vproot(
37 ; CHECK-NEXT: [[Z:%.*]] = sub <2 x i32> [[X:%.*]], [[Y:%.*]]
38 ; CHECK-NEXT: [[Q:%.*]] = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> [[Z]], <2 x i32> [[Y]], <2 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
39 ; CHECK-NEXT: ret <2 x i32> [[Q]]
41 %Z = sub <2 x i32> %X, %Y
42 ; %Q = add i32 %Z, %Y, vp(%mask, %evl)
43 %Q = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %Z, <2 x i32> %Y, <2 x i1> %mask, i32 %evl)
47 ; Mixing regular SIMD with vp intrinsics (vp inside pattern, regular instruction root).
48 define <2 x i32> @common_sub_operand_vpinner(<2 x i32> %X, <2 x i32> %Y, <2 x i1> %mask, i32 %evl) {
49 ; CHECK-LABEL: @common_sub_operand_vpinner(
50 ; CHECK-NEXT: [[Z:%.*]] = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]], <2 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
51 ; CHECK-NEXT: [[Q:%.*]] = add <2 x i32> [[Z]], [[Y]]
52 ; CHECK-NEXT: ret <2 x i32> [[Q]]
54 ; %Z = sub i32 %X, %Y, vp(%mask, %evl)
55 %Z = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %X, <2 x i32> %Y, <2 x i1> %mask, i32 %evl)
56 %Q = add <2 x i32> %Z, %Y
60 define <2 x i32> @negated_operand(<2 x i32> %x, <2 x i1> %mask, i32 %evl) {
61 ; CHECK-LABEL: @negated_operand(
62 ; CHECK-NEXT: [[NEGX:%.*]] = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> zeroinitializer, <2 x i32> [[X:%.*]], <2 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
63 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> [[NEGX]], <2 x i32> [[X]], <2 x i1> [[MASK]], i32 [[EVL]])
64 ; CHECK-NEXT: ret <2 x i32> [[R]]
66 ; %negx = sub i32 0, %x
67 %negx = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> zeroinitializer, <2 x i32> %x, <2 x i1> %mask, i32 %evl)
68 ; %r = add i32 %negx, %x
69 %r = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %negx, <2 x i32> %x, <2 x i1> %mask, i32 %evl)
73 ; TODO Lift InstSimplify::SimplifyAdd to the trait framework to optimize this.
74 define <2 x i8> @knownnegation(<2 x i8> %x, <2 x i8> %y, <2 x i1> %mask, i32 %evl) {
75 ; TODO-CHECK-LABEL: @knownnegation(
76 ; TODO-XHECK-NEXT: ret i8 <2 x i8> zeroinitializer
79 ; CHECK-LABEL: @knownnegation(
80 ; CHECK-NEXT: [[XY:%.*]] = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]], <2 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
81 ; CHECK-NEXT: [[YX:%.*]] = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> [[Y]], <2 x i8> [[X]], <2 x i1> [[MASK]], i32 [[EVL]])
82 ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> [[XY]], <2 x i8> [[YX]], <2 x i1> [[MASK]], i32 [[EVL]])
83 ; CHECK-NEXT: ret <2 x i8> [[R]]
85 %xy = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i1> %mask, i32 %evl)
87 %yx = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %y, <2 x i8> %x, <2 x i1> %mask, i32 %evl)
88 ; %r = add i8 %xy, %yx
89 %r = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %xy, <2 x i8> %yx, <2 x i1> %mask, i32 %evl)