2 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
3 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=exynos-m3 -verify-misched -debug-only=machine-scheduler -o - 2>&1 > /dev/null | FileCheck %s
6 ; CHECK: ********** MI Scheduling **********
7 ; CHECK-LABEL: ldr_int:%bb.0
8 ; CHECK: Cluster ld/st SU(1) - SU(2)
9 ; CHECK: SU(1): %{{[0-9]+}}:gpr32 = LDRWui
10 ; CHECK: SU(2): %{{[0-9]+}}:gpr32 = LDRWui
11 define i32 @ldr_int(ptr %a) nounwind {
12 %p1 = getelementptr inbounds i32, ptr %a, i32 1
13 %tmp1 = load i32, ptr %p1, align 2
14 %p2 = getelementptr inbounds i32, ptr %a, i32 2
15 %tmp2 = load i32, ptr %p2, align 2
16 %tmp3 = add i32 %tmp1, %tmp2
20 ; Test ldpsw clustering
21 ; CHECK: ********** MI Scheduling **********
22 ; CHECK-LABEL: ldp_sext_int:%bb.0
23 ; CHECK: Cluster ld/st SU(1) - SU(2)
24 ; CHECK: SU(1): %{{[0-9]+}}:gpr64 = LDRSWui
25 ; CHECK: SU(2): %{{[0-9]+}}:gpr64 = LDRSWui
26 define i64 @ldp_sext_int(ptr %p) nounwind {
27 %tmp = load i32, ptr %p, align 4
28 %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
29 %tmp1 = load i32, ptr %add.ptr, align 4
30 %sexttmp = sext i32 %tmp to i64
31 %sexttmp1 = sext i32 %tmp1 to i64
32 %add = add nsw i64 %sexttmp1, %sexttmp
36 ; Test ldur clustering.
37 ; CHECK: ********** MI Scheduling **********
38 ; CHECK-LABEL: ldur_int:%bb.0
39 ; CHECK: Cluster ld/st SU(1) - SU(2)
40 ; CHECK: SU(1): %{{[0-9]+}}:gpr32 = LDURWi
41 ; CHECK: SU(2): %{{[0-9]+}}:gpr32 = LDURWi
42 define i32 @ldur_int(ptr %a) nounwind {
43 %p1 = getelementptr inbounds i32, ptr %a, i32 -1
44 %tmp1 = load i32, ptr %p1, align 2
45 %p2 = getelementptr inbounds i32, ptr %a, i32 -2
46 %tmp2 = load i32, ptr %p2, align 2
47 %tmp3 = add i32 %tmp1, %tmp2
51 ; Test sext + zext clustering.
52 ; CHECK: ********** MI Scheduling **********
53 ; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0
54 ; CHECK: Cluster ld/st SU(3) - SU(4)
55 ; CHECK: SU(3): %{{[0-9]+}}:gpr64 = LDRSWui
56 ; CHECK: SU(4): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
57 define i64 @ldp_half_sext_zext_int(ptr %q, ptr %p) nounwind {
58 %tmp0 = load i64, ptr %q, align 4
59 %tmp = load i32, ptr %p, align 4
60 %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
61 %tmp1 = load i32, ptr %add.ptr, align 4
62 %sexttmp = sext i32 %tmp to i64
63 %sexttmp1 = zext i32 %tmp1 to i64
64 %add = add nsw i64 %sexttmp1, %sexttmp
65 %add1 = add nsw i64 %add, %tmp0
69 ; Test zext + sext clustering.
70 ; CHECK: ********** MI Scheduling **********
71 ; CHECK-LABEL: ldp_half_zext_sext_int:%bb.0
72 ; CHECK: Cluster ld/st SU(3) - SU(4)
73 ; CHECK: SU(3): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
74 ; CHECK: SU(4): %{{[0-9]+}}:gpr64 = LDRSWui
75 define i64 @ldp_half_zext_sext_int(ptr %q, ptr %p) nounwind {
76 %tmp0 = load i64, ptr %q, align 4
77 %tmp = load i32, ptr %p, align 4
78 %add.ptr = getelementptr inbounds i32, ptr %p, i64 1
79 %tmp1 = load i32, ptr %add.ptr, align 4
80 %sexttmp = zext i32 %tmp to i64
81 %sexttmp1 = sext i32 %tmp1 to i64
82 %add = add nsw i64 %sexttmp1, %sexttmp
83 %add1 = add nsw i64 %add, %tmp0
87 ; Verify we don't cluster volatile loads.
88 ; CHECK: ********** MI Scheduling **********
89 ; CHECK-LABEL: ldr_int_volatile:%bb.0
90 ; CHECK-NOT: Cluster ld/st
91 ; CHECK: SU(1): %{{[0-9]+}}:gpr32 = LDRWui
92 ; CHECK: SU(2): %{{[0-9]+}}:gpr32 = LDRWui
93 define i32 @ldr_int_volatile(ptr %a) nounwind {
94 %p1 = getelementptr inbounds i32, ptr %a, i32 1
95 %tmp1 = load volatile i32, ptr %p1, align 2
96 %p2 = getelementptr inbounds i32, ptr %a, i32 2
97 %tmp2 = load volatile i32, ptr %p2, align 2
98 %tmp3 = add i32 %tmp1, %tmp2
102 ; Test ldq clustering (no clustering for Exynos).
103 ; CHECK: ********** MI Scheduling **********
104 ; CHECK-LABEL: ldq_cluster:%bb.0
105 ; CHECK: Cluster ld/st SU(1) - SU(3)
106 ; CHECK: SU(1): %{{[0-9]+}}:fpr128 = LDRQui
107 ; CHECK: SU(3): %{{[0-9]+}}:fpr128 = LDRQui
108 define <2 x i64> @ldq_cluster(ptr %p) {
109 %tmp1 = load <2 x i64>, < 2 x i64>* %p, align 8
110 %add.ptr2 = getelementptr inbounds i64, ptr %p, i64 2
111 %tmp2 = add nsw <2 x i64> %tmp1, %tmp1
112 %tmp3 = load <2 x i64>, ptr %add.ptr2, align 8
113 %res = mul nsw <2 x i64> %tmp2, %tmp3