1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
4 define void @and_v32i8(ptr %res, ptr %a0, ptr %a1) nounwind {
5 ; CHECK-LABEL: and_v32i8:
6 ; CHECK: # %bb.0: # %entry
7 ; CHECK-NEXT: xvld $xr0, $a1, 0
8 ; CHECK-NEXT: xvld $xr1, $a2, 0
9 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
10 ; CHECK-NEXT: xvst $xr0, $a0, 0
13 %v0 = load <32 x i8>, ptr %a0
14 %v1 = load <32 x i8>, ptr %a1
15 %v2 = and <32 x i8> %v0, %v1
16 store <32 x i8> %v2, ptr %res
20 define void @and_v16i16(ptr %res, ptr %a0, ptr %a1) nounwind {
21 ; CHECK-LABEL: and_v16i16:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: xvld $xr0, $a1, 0
24 ; CHECK-NEXT: xvld $xr1, $a2, 0
25 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
26 ; CHECK-NEXT: xvst $xr0, $a0, 0
29 %v0 = load <16 x i16>, ptr %a0
30 %v1 = load <16 x i16>, ptr %a1
31 %v2 = and <16 x i16> %v0, %v1
32 store <16 x i16> %v2, ptr %res
36 define void @and_v8i32(ptr %res, ptr %a0, ptr %a1) nounwind {
37 ; CHECK-LABEL: and_v8i32:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: xvld $xr0, $a1, 0
40 ; CHECK-NEXT: xvld $xr1, $a2, 0
41 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
42 ; CHECK-NEXT: xvst $xr0, $a0, 0
45 %v0 = load <8 x i32>, ptr %a0
46 %v1 = load <8 x i32>, ptr %a1
47 %v2 = and <8 x i32> %v0, %v1
48 store <8 x i32> %v2, ptr %res
52 define void @and_v4i64(ptr %res, ptr %a0, ptr %a1) nounwind {
53 ; CHECK-LABEL: and_v4i64:
54 ; CHECK: # %bb.0: # %entry
55 ; CHECK-NEXT: xvld $xr0, $a1, 0
56 ; CHECK-NEXT: xvld $xr1, $a2, 0
57 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
58 ; CHECK-NEXT: xvst $xr0, $a0, 0
61 %v0 = load <4 x i64>, ptr %a0
62 %v1 = load <4 x i64>, ptr %a1
63 %v2 = and <4 x i64> %v0, %v1
64 store <4 x i64> %v2, ptr %res
68 define void @and_u_v32i8(ptr %res, ptr %a0) nounwind {
69 ; CHECK-LABEL: and_u_v32i8:
70 ; CHECK: # %bb.0: # %entry
71 ; CHECK-NEXT: xvld $xr0, $a1, 0
72 ; CHECK-NEXT: xvandi.b $xr0, $xr0, 31
73 ; CHECK-NEXT: xvst $xr0, $a0, 0
76 %v0 = load <32 x i8>, ptr %a0
77 %v1 = and <32 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
78 store <32 x i8> %v1, ptr %res
82 define void @and_u_v16i16(ptr %res, ptr %a0) nounwind {
83 ; CHECK-LABEL: and_u_v16i16:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: xvld $xr0, $a1, 0
86 ; CHECK-NEXT: xvrepli.h $xr1, 31
87 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
88 ; CHECK-NEXT: xvst $xr0, $a0, 0
91 %v0 = load <16 x i16>, ptr %a0
92 %v1 = and <16 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
93 store <16 x i16> %v1, ptr %res
97 define void @and_u_v8i32(ptr %res, ptr %a0) nounwind {
98 ; CHECK-LABEL: and_u_v8i32:
99 ; CHECK: # %bb.0: # %entry
100 ; CHECK-NEXT: xvld $xr0, $a1, 0
101 ; CHECK-NEXT: xvrepli.w $xr1, 31
102 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
103 ; CHECK-NEXT: xvst $xr0, $a0, 0
106 %v0 = load <8 x i32>, ptr %a0
107 %v1 = and <8 x i32> %v0, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
108 store <8 x i32> %v1, ptr %res
112 define void @and_u_v4i64(ptr %res, ptr %a0) nounwind {
113 ; CHECK-LABEL: and_u_v4i64:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: xvld $xr0, $a1, 0
116 ; CHECK-NEXT: xvrepli.d $xr1, 31
117 ; CHECK-NEXT: xvand.v $xr0, $xr0, $xr1
118 ; CHECK-NEXT: xvst $xr0, $a0, 0
121 %v0 = load <4 x i64>, ptr %a0
122 %v1 = and <4 x i64> %v0, <i64 31, i64 31, i64 31, i64 31>
123 store <4 x i64> %v1, ptr %res