1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple aarch64 -mcpu=cortex-a57 -mattr=+slow-paired-128 < %s | FileCheck %s --check-prefixes=DEFAULT
3 ; RUN: llc -mtriple aarch64 -mcpu=cortex-a57 -mattr=+slow-paired-128 -mattr=+ascend-store-address < %s | FileCheck %s --check-prefixes=ASCEND
5 target triple = "aarch64-unknown-linux-gnu"
7 define dso_local void @memset_unroll2(ptr nocapture %array, i64 %size) {
8 ; DEFAULT-LABEL: memset_unroll2:
9 ; DEFAULT: // %bb.0: // %entry
10 ; DEFAULT-NEXT: fmov v0.2d, #2.00000000
11 ; DEFAULT-NEXT: add x8, x0, #64
12 ; DEFAULT-NEXT: .p2align 4, , 8
13 ; DEFAULT-NEXT: .LBB0_1: // %vector.body
14 ; DEFAULT-NEXT: // =>This Inner Loop Header: Depth=1
15 ; DEFAULT-NEXT: stur q0, [x8, #-64]
16 ; DEFAULT-NEXT: subs x1, x1, #4
17 ; DEFAULT-NEXT: stur q0, [x8, #-48]
18 ; DEFAULT-NEXT: str q0, [x8]
19 ; DEFAULT-NEXT: str q0, [x8, #16]
20 ; DEFAULT-NEXT: str q0, [x8, #32]
21 ; DEFAULT-NEXT: str q0, [x8, #48]
22 ; DEFAULT-NEXT: stur q0, [x8, #-32]
23 ; DEFAULT-NEXT: stur q0, [x8, #-16]
24 ; DEFAULT-NEXT: add x8, x8, #128
25 ; DEFAULT-NEXT: b.ne .LBB0_1
26 ; DEFAULT-NEXT: // %bb.2: // %cleanup
29 ; ASCEND-LABEL: memset_unroll2:
30 ; ASCEND: // %bb.0: // %entry
31 ; ASCEND-NEXT: fmov v0.2d, #2.00000000
32 ; ASCEND-NEXT: add x8, x0, #64
33 ; ASCEND-NEXT: .p2align 4, , 8
34 ; ASCEND-NEXT: .LBB0_1: // %vector.body
35 ; ASCEND-NEXT: // =>This Inner Loop Header: Depth=1
36 ; ASCEND-NEXT: stur q0, [x8, #-64]
37 ; ASCEND-NEXT: subs x1, x1, #4
38 ; ASCEND-NEXT: stur q0, [x8, #-48]
39 ; ASCEND-NEXT: stur q0, [x8, #-32]
40 ; ASCEND-NEXT: stur q0, [x8, #-16]
41 ; ASCEND-NEXT: str q0, [x8]
42 ; ASCEND-NEXT: str q0, [x8, #16]
43 ; ASCEND-NEXT: str q0, [x8, #32]
44 ; ASCEND-NEXT: str q0, [x8, #48]
45 ; ASCEND-NEXT: add x8, x8, #128
46 ; ASCEND-NEXT: b.ne .LBB0_1
47 ; ASCEND-NEXT: // %bb.2: // %cleanup
52 vector.body: ; preds = %vector.body, %entry
53 %index = phi i64 [ 0, %entry ], [ %index16, %vector.body ]
54 %niter = phi i64 [ %size, %entry ], [ %niter.nsub.3, %vector.body ]
55 %array0 = getelementptr inbounds double, ptr %array, i64 %index
56 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array0, align 8
57 %array2 = getelementptr inbounds double, ptr %array0, i64 2
58 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array2, align 8
59 %index4 = or disjoint i64 %index, 4
60 %array4 = getelementptr inbounds double, ptr %array, i64 %index4
61 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array4, align 8
62 %array6 = getelementptr inbounds double, ptr %array4, i64 2
63 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array6, align 8
64 %index8 = or disjoint i64 %index, 8
65 %array8 = getelementptr inbounds double, ptr %array, i64 %index8
66 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array8, align 8
67 %array10 = getelementptr inbounds double, ptr %array8, i64 2
68 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array10, align 8
69 %index12 = or disjoint i64 %index, 12
70 %array12 = getelementptr inbounds double, ptr %array, i64 %index12
71 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array12, align 8
72 %array14 = getelementptr inbounds double, ptr %array12, i64 2
73 store <2 x double> <double 2.000000e+00, double 2.000000e+00>, ptr %array14, align 8
74 %index16 = add i64 %index, 16
75 %niter.nsub.3 = add i64 %niter, -4
76 %niter.ncmp.3 = icmp eq i64 %niter.nsub.3, 0
77 br i1 %niter.ncmp.3, label %cleanup, label %vector.body
79 cleanup: ; preds = %vector.body