1 ; REQUIRES: to-be-fixed
2 ; This requires further patches.
3 ; RUN: llc -march=hexagon < %s | FileCheck %s
5 ; Split all andp/orp instructions (by boosting the profitability of their
6 ; operands, which happen to be word masks).
7 ; This should result in a loop with two packets, but we don't generate
8 ; post-incremented loads, so we end up with 3 packets.
11 ; CHECK: loop0([[LOOP:.LBB[0-9_]+]],
15 ; Make sure that the 3rd packet only has an add in it.
17 ; CHECK: r[[REG:[0-9]+]] = add(r[[REG]],#16)
19 ; CHECK: }{{[ \t]*}}:endloop0
21 target triple = "hexagon"
23 define i32 @fred(i32 %a0, ptr nocapture readonly %a1) local_unnamed_addr #0 {
25 %v4 = getelementptr inbounds i32, ptr %a1, i32 1
26 %v5 = load i32, ptr %a1, align 4
27 %v6 = load i32, ptr %v4, align 4
28 %v7 = zext i32 %a0 to i64
31 b8: ; preds = %b8, %b2
32 %v9 = phi i32 [ %v6, %b2 ], [ %v49, %b8 ]
33 %v10 = phi i32 [ %v5, %b2 ], [ %v48, %b8 ]
34 %v11 = phi i32 [ 2, %b2 ], [ %v45, %b8 ]
35 %v12 = phi i64 [ 0, %b2 ], [ %v46, %b8 ]
36 %v13 = phi i64 [ 0, %b2 ], [ %v47, %b8 ]
37 %v14 = phi i32 [ 0, %b2 ], [ %v33, %b8 ]
38 %v15 = phi i32 [ 0, %b2 ], [ %v40, %b8 ]
39 %v16 = zext i32 %v10 to i64
40 %v17 = or i64 %v12, %v16
41 %v18 = tail call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %v17, i64 %v7)
42 %v19 = zext i32 %v9 to i64
43 %v20 = or i64 %v13, %v19
44 %v21 = tail call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %v20, i64 %v7)
45 %v22 = getelementptr inbounds i32, ptr %a1, i32 %v11
46 %v23 = load i32, ptr %v22, align 4
48 %v25 = getelementptr inbounds i32, ptr %a1, i32 %v24
49 %v26 = load i32, ptr %v25, align 4
50 %v27 = zext i32 %v14 to i64
51 %v28 = shl nuw i64 %v27, 32
52 %v29 = zext i32 %v23 to i64
53 %v30 = or i64 %v28, %v29
54 %v31 = tail call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %v30, i64 %v7)
55 %v32 = lshr i64 %v31, 32
56 %v33 = trunc i64 %v32 to i32
57 %v34 = zext i32 %v15 to i64
58 %v35 = shl nuw i64 %v34, 32
59 %v36 = zext i32 %v26 to i64
60 %v37 = or i64 %v35, %v36
61 %v38 = tail call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %v37, i64 %v7)
62 %v39 = lshr i64 %v38, 32
63 %v40 = trunc i64 %v39 to i32
64 %v41 = add nuw nsw i32 %v11, 2
65 %v42 = getelementptr inbounds i32, ptr %a1, i32 %v41
66 %v43 = add nuw nsw i32 %v11, 3
67 %v44 = getelementptr inbounds i32, ptr %a1, i32 %v43
68 %v45 = add nuw nsw i32 %v11, 4
69 %v46 = and i64 %v18, -4294967296
70 %v47 = and i64 %v21, -4294967296
71 %v48 = load i32, ptr %v42, align 4
72 %v49 = load i32, ptr %v44, align 4
73 %v50 = icmp ult i32 %v45, 30
74 br i1 %v50, label %b8, label %b51
77 %v52 = zext i32 %v48 to i64
78 %v53 = or i64 %v46, %v52
79 %v54 = add i64 %v53, %v7
80 %v55 = lshr i64 %v54, 32
81 %v56 = trunc i64 %v55 to i32
82 %v57 = zext i32 %v49 to i64
83 %v58 = or i64 %v47, %v57
84 %v59 = add i64 %v58, %v7
85 %v60 = lshr i64 %v59, 32
86 %v61 = trunc i64 %v60 to i32
87 %v62 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v33, i32 %v56)
88 %v63 = lshr i64 %v62, 32
89 %v64 = trunc i64 %v63 to i32
90 %v65 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v40, i32 %v61)
91 %v66 = lshr i64 %v65, 32
92 %v67 = trunc i64 %v66 to i32
93 %v68 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v67, i32 %v64)
94 %v69 = lshr i64 %v68, 32
95 %v70 = trunc i64 %v69 to i32
99 declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64) #1
100 declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64) #1
101 declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
103 attributes #0 = { nounwind readonly "target-cpu"="hexagonv60" }
104 attributes #1 = { nounwind readnone }