1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s
5 ; fold (and (or x, C), D) -> D if (C & D) == D
7 define <vscale x 4 x i32> @and_or_nxv4i32(<vscale x 4 x i32> %A) {
8 ; CHECK-LABEL: and_or_nxv4i32:
10 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
11 ; CHECK-NEXT: vmv.v.i v8, 8
13 %v1 = or <vscale x 4 x i32> %A, splat (i32 255)
14 %v2 = and <vscale x 4 x i32> %v1, splat (i32 8)
15 ret <vscale x 4 x i32> %v2
18 ; (or (and X, c1), c2) -> (and (or X, c2), c1|c2) iff (c1 & c2) != 0
20 define <vscale x 2 x i64> @or_and_nxv2i64(<vscale x 2 x i64> %a0) {
21 ; CHECK-LABEL: or_and_nxv2i64:
23 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
24 ; CHECK-NEXT: vor.vi v8, v8, 3
25 ; CHECK-NEXT: vand.vi v8, v8, 7
27 %v1 = and <vscale x 2 x i64> %a0, splat (i64 7)
28 %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
29 ret <vscale x 2 x i64> %v2
32 ; If all masked bits are going to be set, that's a constant fold.
34 define <vscale x 2 x i64> @or_and_nxv2i64_fold(<vscale x 2 x i64> %a0) {
35 ; CHECK-LABEL: or_and_nxv2i64_fold:
37 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
38 ; CHECK-NEXT: vmv.v.i v8, 3
40 %v1 = and <vscale x 2 x i64> %a0, splat (i64 1)
41 %v2 = or <vscale x 2 x i64> %v1, splat (i64 3)
42 ret <vscale x 2 x i64> %v2
45 ; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
47 define <vscale x 4 x i32> @combine_vec_shl_shl(<vscale x 4 x i32> %x) {
48 ; CHECK-LABEL: combine_vec_shl_shl:
50 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
51 ; CHECK-NEXT: vsll.vi v8, v8, 6
53 %v1 = shl <vscale x 4 x i32> %x, splat (i32 2)
54 %v2 = shl <vscale x 4 x i32> %v1, splat (i32 4)
55 ret <vscale x 4 x i32> %v2
58 ; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
60 define <vscale x 2 x i32> @combine_vec_ashr_ashr(<vscale x 2 x i32> %x) {
61 ; CHECK-LABEL: combine_vec_ashr_ashr:
63 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
64 ; CHECK-NEXT: vsra.vi v8, v8, 6
66 %v1 = ashr <vscale x 2 x i32> %x, splat (i32 2)
67 %v2 = ashr <vscale x 2 x i32> %v1, splat (i32 4)
68 ret <vscale x 2 x i32> %v2
71 ; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
73 define <vscale x 8 x i16> @combine_vec_lshr_lshr(<vscale x 8 x i16> %x) {
74 ; CHECK-LABEL: combine_vec_lshr_lshr:
76 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
77 ; CHECK-NEXT: vsrl.vi v8, v8, 8
79 %v1 = lshr <vscale x 8 x i16> %x, splat (i16 4)
80 %v2 = lshr <vscale x 8 x i16> %v1, splat (i16 4)
81 ret <vscale x 8 x i16> %v2
84 ; fold (fmul x, 1.0) -> x
85 define <vscale x 2 x float> @combine_fmul_one(<vscale x 2 x float> %x) {
86 ; CHECK-LABEL: combine_fmul_one:
89 %v = fmul <vscale x 2 x float> %x, splat (float 1.0)
90 ret <vscale x 2 x float> %v
93 ; fold (fmul 1.0, x) -> x
94 define <vscale x 2 x float> @combine_fmul_one_commuted(<vscale x 2 x float> %x) {
95 ; CHECK-LABEL: combine_fmul_one_commuted:
98 %v = fmul <vscale x 2 x float> splat (float 1.0), %x
99 ret <vscale x 2 x float> %v