1 ; RUN: llc -march=hexagon -hexagon-hvx-widen=32 < %s | FileCheck %s
3 ; If the "rx = #N, vsetq(rx)" get reordered with the rest, update the test.
7 ; CHECK: r[[R0:[0-9]+]] = #32
8 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
9 ; CHECK: v[[V1:[0-9]+]].b = vdeal(v[[V0]].b)
10 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
11 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
12 define void @f0(<32 x i16>* %a0, <32 x i8>* %a1) #0 {
13 %v0 = load <32 x i16>, <32 x i16>* %a0, align 128
14 %v1 = trunc <32 x i16> %v0 to <32 x i8>
15 store <32 x i8> %v1, <32 x i8>* %a1, align 128
21 ; CHECK: r[[R0:[0-9]+]] = #32
22 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
23 ; CHECK: v[[V1:[0-9]+]].b = vdeale({{.*}},v[[V0]].b)
24 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
25 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
26 define void @f1(<32 x i32>* %a0, <32 x i8>* %a1) #0 {
27 %v0 = load <32 x i32>, <32 x i32>* %a0, align 128
28 %v1 = trunc <32 x i32> %v0 to <32 x i8>
29 store <32 x i8> %v1, <32 x i8>* %a1, align 128
35 ; CHECK: r[[R0:[0-9]+]] = #64
36 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
37 ; CHECK: v[[V1:[0-9]+]].b = vdeal(v[[V0]].b)
38 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
39 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
40 define void @f2(<64 x i16>* %a0, <64 x i8>* %a1) #0 {
41 %v0 = load <64 x i16>, <64 x i16>* %a0, align 128
42 %v1 = trunc <64 x i16> %v0 to <64 x i8>
43 store <64 x i8> %v1, <64 x i8>* %a1, align 128
49 ; CHECK-DAG: v[[V0:[0-9]+]] = vmem(r0+#0)
50 ; CHECK-DAG: v[[V1:[0-9]+]] = vmem(r0+#1)
51 ; CHECK-DAG: q[[Q0:[0-3]]] = vsetq
52 ; CHECK: v[[V2:[0-9]+]].h = vpacke(v[[V1]].w,v[[V0]].w)
53 ; CHECK: v[[V3:[0-9]+]].b = vpacke({{.*}},v[[V2]].h)
54 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V3]]
55 define void @f3(<64 x i32>* %a0, <64 x i8>* %a1) #0 {
56 %v0 = load <64 x i32>, <64 x i32>* %a0, align 128
57 %v1 = trunc <64 x i32> %v0 to <64 x i8>
58 store <64 x i8> %v1, <64 x i8>* %a1, align 128
64 ; CHECK: r[[R0:[0-9]+]] = #32
65 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
66 ; CHECK: v[[V1:[0-9]+]].h = vdeal(v[[V0]].h)
67 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
68 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
69 define void @f4(<16 x i32>* %a0, <16 x i16>* %a1) #0 {
70 %v0 = load <16 x i32>, <16 x i32>* %a0, align 128
71 %v1 = trunc <16 x i32> %v0 to <16 x i16>
72 store <16 x i16> %v1, <16 x i16>* %a1, align 128
78 ; CHECK: r[[R0:[0-9]+]] = #64
79 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
80 ; CHECK: v[[V1:[0-9]+]].h = vdeal(v[[V0]].h)
81 ; CHECK: q[[Q0:[0-3]]] = vsetq(r[[R0]])
82 ; CHECK: if (q[[Q0]]) vmem(r1+#0) = v[[V1]]
83 define void @f5(<32 x i32>* %a0, <32 x i16>* %a1) #0 {
84 %v0 = load <32 x i32>, <32 x i32>* %a0, align 128
85 %v1 = trunc <32 x i32> %v0 to <32 x i16>
86 store <32 x i16> %v1, <32 x i16>* %a1, align 128
92 ; CHECK: v[[V0:[0-9]+]] = vmem(r0+#0)
93 ; CHECK: v[[V1:[0-9]+]].b = vdeale({{.*}},v[[V0]].b)
94 ; CHECK: vmem(r[[R0:[0-9]+]]+#0) = v[[V1]]
95 ; CHECK-DAG: r[[R1:[0-9]+]] = memw(r[[R0]]+#0)
96 ; CHECK-DAG: r[[R2:[0-9]+]] = memw(r[[R0]]+#4)
97 ; CHECK: memd(r1+#0) = r[[R2]]:[[R1]]
98 define void @f6(<8 x i32>* %a0, <8 x i8>* %a1) #0 {
99 %v0 = load <8 x i32>, <8 x i32>* %a0, align 128
100 %v1 = trunc <8 x i32> %v0 to <8 x i8>
101 store <8 x i8> %v1, <8 x i8>* %a1, align 128
106 attributes #0 = { "target-cpu"="hexagonv65" "target-features"="+hvx,+hvx-length128b,-packets" }