1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mattr=+sve -force-streaming-compatible-sve < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
8 define void @alloc_v4i8(ptr %st_ptr) nounwind {
9 ; CHECK-LABEL: alloc_v4i8:
11 ; CHECK-NEXT: sub sp, sp, #48
12 ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
13 ; CHECK-NEXT: mov x19, x0
14 ; CHECK-NEXT: add x0, sp, #28
15 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
16 ; CHECK-NEXT: add x20, sp, #28
18 ; CHECK-NEXT: ptrue p0.b, vl2
19 ; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x20]
20 ; CHECK-NEXT: ptrue p0.s, vl2
21 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
22 ; CHECK-NEXT: mov z2.b, z0.b[1]
23 ; CHECK-NEXT: fmov w8, s0
24 ; CHECK-NEXT: fmov w9, s2
25 ; CHECK-NEXT: stp w8, w9, [sp, #8]
26 ; CHECK-NEXT: ldr d0, [sp, #8]
27 ; CHECK-NEXT: st1b { z0.s }, p0, [x19]
28 ; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
29 ; CHECK-NEXT: add sp, sp, #48
31 %alloc = alloca [4 x i8]
32 call void @def(ptr %alloc)
33 %load = load <4 x i8>, ptr %alloc
34 %strided.vec = shufflevector <4 x i8> %load, <4 x i8> poison, <2 x i32> <i32 0, i32 2>
35 store <2 x i8> %strided.vec, ptr %st_ptr
39 define void @alloc_v6i8(ptr %st_ptr) nounwind {
40 ; CHECK-LABEL: alloc_v6i8:
42 ; CHECK-NEXT: sub sp, sp, #48
43 ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
44 ; CHECK-NEXT: mov x19, x0
45 ; CHECK-NEXT: add x0, sp, #24
46 ; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
47 ; CHECK-NEXT: add x20, sp, #24
49 ; CHECK-NEXT: ptrue p0.b, vl3
50 ; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x20]
51 ; CHECK-NEXT: ptrue p0.h, vl4
52 ; CHECK-NEXT: mov z2.b, z1.b[3]
53 ; CHECK-NEXT: fmov w8, s1
54 ; CHECK-NEXT: mov z3.b, z1.b[2]
55 ; CHECK-NEXT: mov z4.b, z1.b[1]
56 ; CHECK-NEXT: strh w8, [sp]
57 ; CHECK-NEXT: fmov w8, s2
58 ; CHECK-NEXT: fmov w9, s3
59 ; CHECK-NEXT: strh w8, [sp, #6]
60 ; CHECK-NEXT: fmov w8, s4
61 ; CHECK-NEXT: strh w9, [sp, #4]
62 ; CHECK-NEXT: strh w8, [sp, #2]
63 ; CHECK-NEXT: add x8, sp, #12
64 ; CHECK-NEXT: ldr d0, [sp]
65 ; CHECK-NEXT: st1b { z0.h }, p0, [x8]
66 ; CHECK-NEXT: ldrh w8, [sp, #12]
67 ; CHECK-NEXT: strb w9, [x19, #2]
68 ; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
69 ; CHECK-NEXT: strh w8, [x19]
70 ; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
71 ; CHECK-NEXT: add sp, sp, #48
73 %alloc = alloca [6 x i8]
74 call void @def(ptr %alloc)
75 %load = load <6 x i8>, ptr %alloc
76 %strided.vec = shufflevector <6 x i8> %load, <6 x i8> poison, <3 x i32> <i32 1, i32 3, i32 5>
77 store <3 x i8> %strided.vec, ptr %st_ptr
81 define void @alloc_v32i8(ptr %st_ptr) nounwind {
82 ; CHECK-LABEL: alloc_v32i8:
84 ; CHECK-NEXT: sub sp, sp, #64
85 ; CHECK-NEXT: stp x30, x19, [sp, #48] // 16-byte Folded Spill
86 ; CHECK-NEXT: mov x19, x0
87 ; CHECK-NEXT: add x0, sp, #16
89 ; CHECK-NEXT: ldp q0, q3, [sp, #16]
90 ; CHECK-NEXT: mov z1.b, z0.b[14]
91 ; CHECK-NEXT: fmov w8, s0
92 ; CHECK-NEXT: mov z2.b, z0.b[12]
93 ; CHECK-NEXT: mov z4.b, z0.b[10]
94 ; CHECK-NEXT: mov z5.b, z0.b[8]
95 ; CHECK-NEXT: strb w8, [sp]
96 ; CHECK-NEXT: fmov w8, s1
97 ; CHECK-NEXT: mov z1.b, z0.b[6]
98 ; CHECK-NEXT: strb w8, [sp, #7]
99 ; CHECK-NEXT: fmov w8, s2
100 ; CHECK-NEXT: mov z2.b, z0.b[4]
101 ; CHECK-NEXT: mov z0.b, z0.b[2]
102 ; CHECK-NEXT: strb w8, [sp, #6]
103 ; CHECK-NEXT: fmov w8, s4
104 ; CHECK-NEXT: strb w8, [sp, #5]
105 ; CHECK-NEXT: fmov w8, s5
106 ; CHECK-NEXT: strb w8, [sp, #4]
107 ; CHECK-NEXT: fmov w8, s1
108 ; CHECK-NEXT: strb w8, [sp, #3]
109 ; CHECK-NEXT: fmov w8, s2
110 ; CHECK-NEXT: strb w8, [sp, #2]
111 ; CHECK-NEXT: fmov w8, s0
112 ; CHECK-NEXT: strb w8, [sp, #1]
113 ; CHECK-NEXT: fmov w8, s3
114 ; CHECK-NEXT: strb w8, [x19, #8]
115 ; CHECK-NEXT: ldr q0, [sp]
116 ; CHECK-NEXT: fmov x8, d0
117 ; CHECK-NEXT: str x8, [x19]
118 ; CHECK-NEXT: ldp x30, x19, [sp, #48] // 16-byte Folded Reload
119 ; CHECK-NEXT: add sp, sp, #64
121 %alloc = alloca [32 x i8]
122 call void @def(ptr %alloc)
123 %load = load <32 x i8>, ptr %alloc
124 %strided.vec = shufflevector <32 x i8> %load, <32 x i8> poison, <9 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16>
125 store <9 x i8> %strided.vec, ptr %st_ptr
130 define void @alloc_v8f64(ptr %st_ptr) nounwind {
131 ; CHECK-LABEL: alloc_v8f64:
133 ; CHECK-NEXT: sub sp, sp, #96
134 ; CHECK-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill
135 ; CHECK-NEXT: mov x19, x0
136 ; CHECK-NEXT: mov x0, sp
137 ; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
138 ; CHECK-NEXT: mov x20, sp
140 ; CHECK-NEXT: ptrue p0.d, vl2
141 ; CHECK-NEXT: mov x8, #4 // =0x4
142 ; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x20]
143 ; CHECK-NEXT: ld2d { z2.d, z3.d }, p0/z, [x20, x8, lsl #3]
144 ; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
145 ; CHECK-NEXT: stp q0, q2, [x19]
146 ; CHECK-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload
147 ; CHECK-NEXT: add sp, sp, #96
149 %alloc = alloca [8 x double]
150 call void @def(ptr %alloc)
151 %load = load <8 x double>, ptr %alloc
152 %strided.vec = shufflevector <8 x double> %load, <8 x double> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
153 store <4 x double> %strided.vec, ptr %st_ptr