1 ; REQUIRES: to-be-fixed
2 ; This requires further patches.
3 ; RUN: llc -march=hexagon < %s | FileCheck %s
5 ; Split all andp/orp instructions (by boosting the profitability of their
6 ; operands, which happen to be word masks).
7 ; This should result in a loop with two packets, but we don't generate
8 ; post-incremented loads, so we end up with 3 packets.
11 ; CHECK: loop0([[LOOP:.LBB[0-9_]+]],
15 ; Make sure that the 3rd packet only has an add in it.
17 ; CHECK: r[[REG:[0-9]+]] = add(r[[REG]],#16)
19 ; CHECK: }{{[ \t]*}}:endloop0
21 target triple = "hexagon"
23 define i32 @fred(i32 %a0, i64* nocapture readonly %a1) local_unnamed_addr #0 {
25 %v3 = bitcast i64* %a1 to i32*
26 %v4 = getelementptr inbounds i32, i32* %v3, i32 1
27 %v5 = load i32, i32* %v3, align 4
28 %v6 = load i32, i32* %v4, align 4
29 %v7 = zext i32 %a0 to i64
32 b8: ; preds = %b8, %b2
33 %v9 = phi i32 [ %v6, %b2 ], [ %v49, %b8 ]
34 %v10 = phi i32 [ %v5, %b2 ], [ %v48, %b8 ]
35 %v11 = phi i32 [ 2, %b2 ], [ %v45, %b8 ]
36 %v12 = phi i64 [ 0, %b2 ], [ %v46, %b8 ]
37 %v13 = phi i64 [ 0, %b2 ], [ %v47, %b8 ]
38 %v14 = phi i32 [ 0, %b2 ], [ %v33, %b8 ]
39 %v15 = phi i32 [ 0, %b2 ], [ %v40, %b8 ]
40 %v16 = zext i32 %v10 to i64
41 %v17 = or i64 %v12, %v16
42 %v18 = tail call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %v17, i64 %v7)
43 %v19 = zext i32 %v9 to i64
44 %v20 = or i64 %v13, %v19
45 %v21 = tail call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %v20, i64 %v7)
46 %v22 = getelementptr inbounds i32, i32* %v3, i32 %v11
47 %v23 = load i32, i32* %v22, align 4
49 %v25 = getelementptr inbounds i32, i32* %v3, i32 %v24
50 %v26 = load i32, i32* %v25, align 4
51 %v27 = zext i32 %v14 to i64
52 %v28 = shl nuw i64 %v27, 32
53 %v29 = zext i32 %v23 to i64
54 %v30 = or i64 %v28, %v29
55 %v31 = tail call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %v30, i64 %v7)
56 %v32 = lshr i64 %v31, 32
57 %v33 = trunc i64 %v32 to i32
58 %v34 = zext i32 %v15 to i64
59 %v35 = shl nuw i64 %v34, 32
60 %v36 = zext i32 %v26 to i64
61 %v37 = or i64 %v35, %v36
62 %v38 = tail call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %v37, i64 %v7)
63 %v39 = lshr i64 %v38, 32
64 %v40 = trunc i64 %v39 to i32
65 %v41 = add nuw nsw i32 %v11, 2
66 %v42 = getelementptr inbounds i32, i32* %v3, i32 %v41
67 %v43 = add nuw nsw i32 %v11, 3
68 %v44 = getelementptr inbounds i32, i32* %v3, i32 %v43
69 %v45 = add nuw nsw i32 %v11, 4
70 %v46 = and i64 %v18, -4294967296
71 %v47 = and i64 %v21, -4294967296
72 %v48 = load i32, i32* %v42, align 4
73 %v49 = load i32, i32* %v44, align 4
74 %v50 = icmp ult i32 %v45, 30
75 br i1 %v50, label %b8, label %b51
78 %v52 = zext i32 %v48 to i64
79 %v53 = or i64 %v46, %v52
80 %v54 = add i64 %v53, %v7
81 %v55 = lshr i64 %v54, 32
82 %v56 = trunc i64 %v55 to i32
83 %v57 = zext i32 %v49 to i64
84 %v58 = or i64 %v47, %v57
85 %v59 = add i64 %v58, %v7
86 %v60 = lshr i64 %v59, 32
87 %v61 = trunc i64 %v60 to i32
88 %v62 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v33, i32 %v56)
89 %v63 = lshr i64 %v62, 32
90 %v64 = trunc i64 %v63 to i32
91 %v65 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v40, i32 %v61)
92 %v66 = lshr i64 %v65, 32
93 %v67 = trunc i64 %v66 to i32
94 %v68 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v67, i32 %v64)
95 %v69 = lshr i64 %v68, 32
96 %v70 = trunc i64 %v69 to i32
100 declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64) #1
101 declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64) #1
102 declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1
104 attributes #0 = { nounwind readonly "target-cpu"="hexagonv60" }
105 attributes #1 = { nounwind readnone }