1 ; RUN: llc -march=hexagon -O2 -disable-hexagon-shuffle=1 < %s | FileCheck %s
2 ; CHECK: vmemu(r{{[0-9]+}}) = v{{[0-9]*}};
4 target triple = "hexagon"
6 ; Function Attrs: nounwind
7 define void @f0(i8* %a0, i32 %a1, i8* %a2, i32 %a3, i8* %a4) #0 {
9 %v0 = alloca i8*, align 4
10 %v1 = alloca i32, align 4
11 %v2 = alloca i8*, align 4
12 %v3 = alloca i32, align 4
13 %v4 = alloca i8*, align 4
14 %v5 = alloca i32, align 4
15 %v6 = alloca i32, align 4
16 %v7 = alloca i32, align 4
17 %v8 = alloca i32, align 4
18 %v9 = alloca i32, align 4
19 %v10 = alloca <16 x i32>, align 64
20 %v11 = alloca <16 x i32>, align 64
21 %v12 = alloca <16 x i32>, align 64
22 %v13 = alloca <16 x i32>, align 64
23 %v14 = alloca <16 x i32>, align 64
24 %v15 = alloca <16 x i32>, align 64
25 %v16 = alloca <16 x i32>, align 64
26 %v17 = alloca <16 x i32>, align 64
27 %v18 = alloca <16 x i32>, align 64
28 %v19 = alloca <16 x i32>, align 64
29 %v20 = alloca <16 x i32>, align 64
30 store i8* %a0, i8** %v0, align 4
31 store i32 %a1, i32* %v1, align 4
32 store i8* %a2, i8** %v2, align 4
33 store i32 %a3, i32* %v3, align 4
34 store i8* %a4, i8** %v4, align 4
35 %v21 = load i32, i32* %v1, align 4
36 %v22 = ashr i32 %v21, 16
37 %v23 = and i32 65535, %v22
38 store i32 %v23, i32* %v8, align 4
39 %v24 = load i32, i32* %v1, align 4
40 %v25 = and i32 65535, %v24
41 store i32 %v25, i32* %v5, align 4
42 %v26 = load i32, i32* %v3, align 4
43 %v27 = and i32 65535, %v26
44 store i32 %v27, i32* %v6, align 4
45 %v28 = load i32, i32* %v3, align 4
46 %v29 = ashr i32 %v28, 16
47 %v30 = and i32 65535, %v29
48 store i32 %v30, i32* %v9, align 4
49 %v31 = load i8*, i8** %v4, align 4
50 %v32 = bitcast i8* %v31 to <16 x i32>*
51 %v33 = load <16 x i32>, <16 x i32>* %v32, align 64
52 store <16 x i32> %v33, <16 x i32>* %v10, align 64
53 %v34 = load i8*, i8** %v4, align 4
54 %v35 = getelementptr inbounds i8, i8* %v34, i32 64
55 %v36 = bitcast i8* %v35 to <16 x i32>*
56 %v37 = load <16 x i32>, <16 x i32>* %v36, align 64
57 store <16 x i32> %v37, <16 x i32>* %v12, align 64
58 %v38 = load i32, i32* %v9, align 4
59 store i32 %v38, i32* %v7, align 4
62 b1: ; preds = %b3, %b0
63 %v39 = load i32, i32* %v7, align 4
64 %v40 = icmp sge i32 %v39, 0
65 br i1 %v40, label %b2, label %b4
68 %v41 = load i8*, i8** %v0, align 4
69 %v42 = bitcast i8* %v41 to <16 x i32>*
70 %v43 = load <16 x i32>, <16 x i32>* %v42, align 4
71 store <16 x i32> %v43, <16 x i32>* %v14, align 64
72 %v44 = load i32, i32* %v5, align 4
73 %v45 = load i8*, i8** %v0, align 4
74 %v46 = getelementptr inbounds i8, i8* %v45, i32 %v44
75 store i8* %v46, i8** %v0, align 4
76 %v47 = load <16 x i32>, <16 x i32>* %v14, align 64
77 %v48 = load <16 x i32>, <16 x i32>* %v10, align 64
78 %v49 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %v47, <16 x i32> %v48)
79 store <16 x i32> %v49, <16 x i32>* %v15, align 64
80 %v50 = load <16 x i32>, <16 x i32>* %v14, align 64
81 %v51 = load <16 x i32>, <16 x i32>* %v12, align 64
82 %v52 = call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %v50, <16 x i32> %v51)
83 store <16 x i32> %v52, <16 x i32>* %v17, align 64
84 %v53 = load <16 x i32>, <16 x i32>* %v15, align 64
85 %v54 = load <16 x i32>, <16 x i32>* %v17, align 64
86 %v55 = call <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32> %v53, <16 x i32> %v54)
87 store <16 x i32> %v55, <16 x i32>* %v19, align 64
88 %v56 = load i8*, i8** %v2, align 4
89 %v57 = load <16 x i32>, <16 x i32>* %v19, align 64
90 call void asm sideeffect " vmemu($0) = $1;\0A", "r,v,~{memory}"(i8* %v56, <16 x i32> %v57) #2, !srcloc !0
94 %v58 = load i32, i32* %v6, align 4
95 %v59 = load i32, i32* %v7, align 4
96 %v60 = sub nsw i32 %v59, %v58
97 store i32 %v60, i32* %v7, align 4
104 ; Function Attrs: nounwind readnone
105 declare <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32>, <16 x i32>) #1
107 ; Function Attrs: nounwind readnone
108 declare <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32>, <16 x i32>) #1
110 attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
111 attributes #1 = { nounwind readnone }
112 attributes #2 = { nounwind }