1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
3 ; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32ZDINX %s
4 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+unaligned-scalar-mem -verify-machineinstrs < %s \
5 ; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32ZDINXUALIGNED %s
6 ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
7 ; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64ZDINX %s
9 define void @foo(ptr nocapture %p, double %d) nounwind {
10 ; RV32ZDINX-LABEL: foo:
11 ; RV32ZDINX: # %bb.0: # %entry
12 ; RV32ZDINX-NEXT: mv a3, a2
13 ; RV32ZDINX-NEXT: addi a0, a0, 2047
14 ; RV32ZDINX-NEXT: mv a2, a1
15 ; RV32ZDINX-NEXT: sw a2, -3(a0)
16 ; RV32ZDINX-NEXT: sw a3, 1(a0)
19 ; RV32ZDINXUALIGNED-LABEL: foo:
20 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
21 ; RV32ZDINXUALIGNED-NEXT: mv a3, a2
22 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, 2047
23 ; RV32ZDINXUALIGNED-NEXT: mv a2, a1
24 ; RV32ZDINXUALIGNED-NEXT: sw a2, -3(a0)
25 ; RV32ZDINXUALIGNED-NEXT: sw a3, 1(a0)
26 ; RV32ZDINXUALIGNED-NEXT: ret
28 ; RV64ZDINX-LABEL: foo:
29 ; RV64ZDINX: # %bb.0: # %entry
30 ; RV64ZDINX-NEXT: sd a1, 2044(a0)
33 %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044
34 store double %d, ptr %add.ptr, align 8
38 define void @foo2(ptr nocapture %p, double %d) nounwind {
39 ; RV32ZDINX-LABEL: foo2:
40 ; RV32ZDINX: # %bb.0: # %entry
41 ; RV32ZDINX-NEXT: mv a3, a2
42 ; RV32ZDINX-NEXT: addi a0, a0, 2047
43 ; RV32ZDINX-NEXT: mv a2, a1
44 ; RV32ZDINX-NEXT: fadd.d a2, a2, a2
45 ; RV32ZDINX-NEXT: sw a2, -3(a0)
46 ; RV32ZDINX-NEXT: sw a3, 1(a0)
49 ; RV32ZDINXUALIGNED-LABEL: foo2:
50 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
51 ; RV32ZDINXUALIGNED-NEXT: mv a3, a2
52 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, 2047
53 ; RV32ZDINXUALIGNED-NEXT: mv a2, a1
54 ; RV32ZDINXUALIGNED-NEXT: fadd.d a2, a2, a2
55 ; RV32ZDINXUALIGNED-NEXT: sw a2, -3(a0)
56 ; RV32ZDINXUALIGNED-NEXT: sw a3, 1(a0)
57 ; RV32ZDINXUALIGNED-NEXT: ret
59 ; RV64ZDINX-LABEL: foo2:
60 ; RV64ZDINX: # %bb.0: # %entry
61 ; RV64ZDINX-NEXT: fadd.d a1, a1, a1
62 ; RV64ZDINX-NEXT: sd a1, 2044(a0)
65 %a = fadd double %d, %d
66 %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044
67 store double %a, ptr %add.ptr, align 8
71 @d = global double 4.2, align 8
73 define void @foo3(ptr nocapture %p) nounwind {
74 ; RV32ZDINX-LABEL: foo3:
75 ; RV32ZDINX: # %bb.0: # %entry
76 ; RV32ZDINX-NEXT: lui a1, %hi(d)
77 ; RV32ZDINX-NEXT: lw a2, %lo(d)(a1)
78 ; RV32ZDINX-NEXT: lw a3, %lo(d+4)(a1)
79 ; RV32ZDINX-NEXT: addi a0, a0, 2047
80 ; RV32ZDINX-NEXT: sw a2, -3(a0)
81 ; RV32ZDINX-NEXT: sw a3, 1(a0)
84 ; RV32ZDINXUALIGNED-LABEL: foo3:
85 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
86 ; RV32ZDINXUALIGNED-NEXT: lui a1, %hi(d)
87 ; RV32ZDINXUALIGNED-NEXT: lw a2, %lo(d)(a1)
88 ; RV32ZDINXUALIGNED-NEXT: lw a3, %lo(d+4)(a1)
89 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, 2047
90 ; RV32ZDINXUALIGNED-NEXT: sw a2, -3(a0)
91 ; RV32ZDINXUALIGNED-NEXT: sw a3, 1(a0)
92 ; RV32ZDINXUALIGNED-NEXT: ret
94 ; RV64ZDINX-LABEL: foo3:
95 ; RV64ZDINX: # %bb.0: # %entry
96 ; RV64ZDINX-NEXT: lui a1, %hi(d)
97 ; RV64ZDINX-NEXT: ld a1, %lo(d)(a1)
98 ; RV64ZDINX-NEXT: sd a1, 2044(a0)
101 %0 = load double, ptr @d, align 8
102 %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044
103 store double %0, ptr %add.ptr, align 8
107 define void @foo4(ptr %p) nounwind {
108 ; RV32ZDINX-LABEL: foo4:
109 ; RV32ZDINX: # %bb.0: # %entry
110 ; RV32ZDINX-NEXT: addi sp, sp, -16
111 ; RV32ZDINX-NEXT: addi a1, a0, 2047
112 ; RV32ZDINX-NEXT: lw a2, -3(a1)
113 ; RV32ZDINX-NEXT: lw a3, 1(a1)
114 ; RV32ZDINX-NEXT: sw a0, 8(sp)
115 ; RV32ZDINX-NEXT: lui a0, %hi(d)
116 ; RV32ZDINX-NEXT: sw a2, %lo(d)(a0)
117 ; RV32ZDINX-NEXT: sw a3, %lo(d+4)(a0)
118 ; RV32ZDINX-NEXT: addi sp, sp, 16
119 ; RV32ZDINX-NEXT: ret
121 ; RV32ZDINXUALIGNED-LABEL: foo4:
122 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
123 ; RV32ZDINXUALIGNED-NEXT: addi sp, sp, -16
124 ; RV32ZDINXUALIGNED-NEXT: addi a1, a0, 2047
125 ; RV32ZDINXUALIGNED-NEXT: lw a2, -3(a1)
126 ; RV32ZDINXUALIGNED-NEXT: lw a3, 1(a1)
127 ; RV32ZDINXUALIGNED-NEXT: sw a0, 8(sp)
128 ; RV32ZDINXUALIGNED-NEXT: lui a0, %hi(d)
129 ; RV32ZDINXUALIGNED-NEXT: sw a2, %lo(d)(a0)
130 ; RV32ZDINXUALIGNED-NEXT: sw a3, %lo(d+4)(a0)
131 ; RV32ZDINXUALIGNED-NEXT: addi sp, sp, 16
132 ; RV32ZDINXUALIGNED-NEXT: ret
134 ; RV64ZDINX-LABEL: foo4:
135 ; RV64ZDINX: # %bb.0: # %entry
136 ; RV64ZDINX-NEXT: addi sp, sp, -16
137 ; RV64ZDINX-NEXT: ld a1, 2044(a0)
138 ; RV64ZDINX-NEXT: sd a0, 8(sp)
139 ; RV64ZDINX-NEXT: lui a0, %hi(d)
140 ; RV64ZDINX-NEXT: sd a1, %lo(d)(a0)
141 ; RV64ZDINX-NEXT: addi sp, sp, 16
142 ; RV64ZDINX-NEXT: ret
144 %p.addr = alloca ptr, align 8
145 store ptr %p, ptr %p.addr, align 8
146 %0 = load ptr, ptr %p.addr, align 8
147 %add.ptr = getelementptr inbounds i8, ptr %0, i64 2044
148 %1 = load double, ptr %add.ptr, align 8
149 store double %1, ptr @d, align 8
153 define void @foo5(ptr nocapture %p, double %d) nounwind {
154 ; RV32ZDINX-LABEL: foo5:
155 ; RV32ZDINX: # %bb.0: # %entry
156 ; RV32ZDINX-NEXT: mv a3, a2
157 ; RV32ZDINX-NEXT: addi a0, a0, -2048
158 ; RV32ZDINX-NEXT: mv a2, a1
159 ; RV32ZDINX-NEXT: sw a2, -1(a0)
160 ; RV32ZDINX-NEXT: sw a3, 3(a0)
161 ; RV32ZDINX-NEXT: ret
163 ; RV32ZDINXUALIGNED-LABEL: foo5:
164 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
165 ; RV32ZDINXUALIGNED-NEXT: mv a3, a2
166 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, -2048
167 ; RV32ZDINXUALIGNED-NEXT: mv a2, a1
168 ; RV32ZDINXUALIGNED-NEXT: sw a2, -1(a0)
169 ; RV32ZDINXUALIGNED-NEXT: sw a3, 3(a0)
170 ; RV32ZDINXUALIGNED-NEXT: ret
172 ; RV64ZDINX-LABEL: foo5:
173 ; RV64ZDINX: # %bb.0: # %entry
174 ; RV64ZDINX-NEXT: addi a0, a0, -2048
175 ; RV64ZDINX-NEXT: sd a1, -1(a0)
176 ; RV64ZDINX-NEXT: ret
178 %add.ptr = getelementptr inbounds i8, ptr %p, i64 -2049
179 store double %d, ptr %add.ptr, align 8
183 define void @foo6(ptr %p, double %d) nounwind {
184 ; RV32ZDINX-LABEL: foo6:
185 ; RV32ZDINX: # %bb.0: # %entry
186 ; RV32ZDINX-NEXT: mv a3, a2
187 ; RV32ZDINX-NEXT: lui a2, %hi(.LCPI5_0)
188 ; RV32ZDINX-NEXT: lw a4, %lo(.LCPI5_0)(a2)
189 ; RV32ZDINX-NEXT: lw a5, %lo(.LCPI5_0+4)(a2)
190 ; RV32ZDINX-NEXT: mv a2, a1
191 ; RV32ZDINX-NEXT: fadd.d a2, a2, a4
192 ; RV32ZDINX-NEXT: addi a0, a0, 2047
193 ; RV32ZDINX-NEXT: sw a2, -3(a0)
194 ; RV32ZDINX-NEXT: sw a3, 1(a0)
195 ; RV32ZDINX-NEXT: ret
197 ; RV32ZDINXUALIGNED-LABEL: foo6:
198 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
199 ; RV32ZDINXUALIGNED-NEXT: mv a3, a2
200 ; RV32ZDINXUALIGNED-NEXT: lui a2, %hi(.LCPI5_0)
201 ; RV32ZDINXUALIGNED-NEXT: lw a4, %lo(.LCPI5_0)(a2)
202 ; RV32ZDINXUALIGNED-NEXT: lw a5, %lo(.LCPI5_0+4)(a2)
203 ; RV32ZDINXUALIGNED-NEXT: mv a2, a1
204 ; RV32ZDINXUALIGNED-NEXT: fadd.d a2, a2, a4
205 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, 2047
206 ; RV32ZDINXUALIGNED-NEXT: sw a2, -3(a0)
207 ; RV32ZDINXUALIGNED-NEXT: sw a3, 1(a0)
208 ; RV32ZDINXUALIGNED-NEXT: ret
210 ; RV64ZDINX-LABEL: foo6:
211 ; RV64ZDINX: # %bb.0: # %entry
212 ; RV64ZDINX-NEXT: lui a2, %hi(.LCPI5_0)
213 ; RV64ZDINX-NEXT: ld a2, %lo(.LCPI5_0)(a2)
214 ; RV64ZDINX-NEXT: fadd.d a1, a1, a2
215 ; RV64ZDINX-NEXT: sd a1, 2044(a0)
216 ; RV64ZDINX-NEXT: ret
218 %add = fadd double %d, 3.140000e+00
219 %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044
220 store double %add, ptr %add.ptr, align 8
224 define void @foo7(ptr nocapture %p) nounwind {
225 ; RV32ZDINX-LABEL: foo7:
226 ; RV32ZDINX: # %bb.0: # %entry
227 ; RV32ZDINX-NEXT: addi sp, sp, -16
228 ; RV32ZDINX-NEXT: lui a1, %hi(d)
229 ; RV32ZDINX-NEXT: lw a2, %lo(d+4)(a1)
230 ; RV32ZDINX-NEXT: addi a1, a1, %lo(d)
231 ; RV32ZDINX-NEXT: sw a2, 8(sp)
232 ; RV32ZDINX-NEXT: lw a1, 8(a1)
233 ; RV32ZDINX-NEXT: sw a1, 12(sp)
234 ; RV32ZDINX-NEXT: lw a2, 8(sp)
235 ; RV32ZDINX-NEXT: lw a3, 12(sp)
236 ; RV32ZDINX-NEXT: addi a0, a0, 2047
237 ; RV32ZDINX-NEXT: sw a2, -3(a0)
238 ; RV32ZDINX-NEXT: sw a3, 1(a0)
239 ; RV32ZDINX-NEXT: addi sp, sp, 16
240 ; RV32ZDINX-NEXT: ret
242 ; RV32ZDINXUALIGNED-LABEL: foo7:
243 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
244 ; RV32ZDINXUALIGNED-NEXT: lui a1, %hi(d)
245 ; RV32ZDINXUALIGNED-NEXT: addi a1, a1, %lo(d)
246 ; RV32ZDINXUALIGNED-NEXT: lw a2, 4(a1)
247 ; RV32ZDINXUALIGNED-NEXT: lw a3, 8(a1)
248 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, 2047
249 ; RV32ZDINXUALIGNED-NEXT: sw a2, -3(a0)
250 ; RV32ZDINXUALIGNED-NEXT: sw a3, 1(a0)
251 ; RV32ZDINXUALIGNED-NEXT: ret
253 ; RV64ZDINX-LABEL: foo7:
254 ; RV64ZDINX: # %bb.0: # %entry
255 ; RV64ZDINX-NEXT: lui a1, %hi(d)
256 ; RV64ZDINX-NEXT: addi a2, a1, %lo(d)
257 ; RV64ZDINX-NEXT: lwu a2, 8(a2)
258 ; RV64ZDINX-NEXT: lwu a1, %lo(d+4)(a1)
259 ; RV64ZDINX-NEXT: slli a2, a2, 32
260 ; RV64ZDINX-NEXT: or a1, a2, a1
261 ; RV64ZDINX-NEXT: sd a1, 2044(a0)
262 ; RV64ZDINX-NEXT: ret
264 %p2 = getelementptr inbounds i8, ptr @d, i32 4
265 %0 = load double, ptr %p2, align 4
266 %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044
267 store double %0, ptr %add.ptr, align 8
271 define void @foo8(ptr %p) nounwind {
272 ; RV32ZDINX-LABEL: foo8:
273 ; RV32ZDINX: # %bb.0: # %entry
274 ; RV32ZDINX-NEXT: addi sp, sp, -16
275 ; RV32ZDINX-NEXT: addi a1, a0, 2047
276 ; RV32ZDINX-NEXT: lw a2, -3(a1)
277 ; RV32ZDINX-NEXT: lw a3, 1(a1)
278 ; RV32ZDINX-NEXT: sw a0, 8(sp)
279 ; RV32ZDINX-NEXT: sw a2, 0(sp)
280 ; RV32ZDINX-NEXT: sw a3, 4(sp)
281 ; RV32ZDINX-NEXT: lw a0, 4(sp)
282 ; RV32ZDINX-NEXT: lui a1, %hi(d)
283 ; RV32ZDINX-NEXT: addi a2, a1, %lo(d)
284 ; RV32ZDINX-NEXT: sw a0, 8(a2)
285 ; RV32ZDINX-NEXT: lw a0, 0(sp)
286 ; RV32ZDINX-NEXT: sw a0, %lo(d+4)(a1)
287 ; RV32ZDINX-NEXT: addi sp, sp, 16
288 ; RV32ZDINX-NEXT: ret
290 ; RV32ZDINXUALIGNED-LABEL: foo8:
291 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
292 ; RV32ZDINXUALIGNED-NEXT: addi sp, sp, -16
293 ; RV32ZDINXUALIGNED-NEXT: addi a1, a0, 2047
294 ; RV32ZDINXUALIGNED-NEXT: lw a2, -3(a1)
295 ; RV32ZDINXUALIGNED-NEXT: lw a3, 1(a1)
296 ; RV32ZDINXUALIGNED-NEXT: sw a0, 8(sp)
297 ; RV32ZDINXUALIGNED-NEXT: lui a0, %hi(d)
298 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, %lo(d)
299 ; RV32ZDINXUALIGNED-NEXT: sw a2, 4(a0)
300 ; RV32ZDINXUALIGNED-NEXT: sw a3, 8(a0)
301 ; RV32ZDINXUALIGNED-NEXT: addi sp, sp, 16
302 ; RV32ZDINXUALIGNED-NEXT: ret
304 ; RV64ZDINX-LABEL: foo8:
305 ; RV64ZDINX: # %bb.0: # %entry
306 ; RV64ZDINX-NEXT: addi sp, sp, -16
307 ; RV64ZDINX-NEXT: ld a1, 2044(a0)
308 ; RV64ZDINX-NEXT: sd a0, 8(sp)
309 ; RV64ZDINX-NEXT: lui a0, %hi(d)
310 ; RV64ZDINX-NEXT: addi a2, a0, %lo(d)
311 ; RV64ZDINX-NEXT: sw a1, %lo(d+4)(a0)
312 ; RV64ZDINX-NEXT: srli a1, a1, 32
313 ; RV64ZDINX-NEXT: sw a1, 8(a2)
314 ; RV64ZDINX-NEXT: addi sp, sp, 16
315 ; RV64ZDINX-NEXT: ret
317 %p.addr = alloca ptr, align 8
318 store ptr %p, ptr %p.addr, align 8
319 %0 = load ptr, ptr %p.addr, align 8
320 %add.ptr = getelementptr inbounds i8, ptr %0, i64 2044
321 %1 = load double, ptr %add.ptr, align 8
322 %p2 = getelementptr inbounds i8, ptr @d, i32 4
323 store double %1, ptr %p2, align 4
327 @e = global double 4.2, align 4
329 define void @foo9(ptr nocapture %p) nounwind {
330 ; RV32ZDINX-LABEL: foo9:
331 ; RV32ZDINX: # %bb.0: # %entry
332 ; RV32ZDINX-NEXT: addi sp, sp, -16
333 ; RV32ZDINX-NEXT: lui a1, %hi(e)
334 ; RV32ZDINX-NEXT: lw a2, %lo(e)(a1)
335 ; RV32ZDINX-NEXT: sw a2, 8(sp)
336 ; RV32ZDINX-NEXT: addi a1, a1, %lo(e)
337 ; RV32ZDINX-NEXT: lw a1, 4(a1)
338 ; RV32ZDINX-NEXT: sw a1, 12(sp)
339 ; RV32ZDINX-NEXT: lw a2, 8(sp)
340 ; RV32ZDINX-NEXT: lw a3, 12(sp)
341 ; RV32ZDINX-NEXT: addi a0, a0, 2047
342 ; RV32ZDINX-NEXT: sw a2, -3(a0)
343 ; RV32ZDINX-NEXT: sw a3, 1(a0)
344 ; RV32ZDINX-NEXT: addi sp, sp, 16
345 ; RV32ZDINX-NEXT: ret
347 ; RV32ZDINXUALIGNED-LABEL: foo9:
348 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
349 ; RV32ZDINXUALIGNED-NEXT: lui a1, %hi(e)
350 ; RV32ZDINXUALIGNED-NEXT: addi a1, a1, %lo(e)
351 ; RV32ZDINXUALIGNED-NEXT: lw a2, 0(a1)
352 ; RV32ZDINXUALIGNED-NEXT: lw a3, 4(a1)
353 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, 2047
354 ; RV32ZDINXUALIGNED-NEXT: sw a2, -3(a0)
355 ; RV32ZDINXUALIGNED-NEXT: sw a3, 1(a0)
356 ; RV32ZDINXUALIGNED-NEXT: ret
358 ; RV64ZDINX-LABEL: foo9:
359 ; RV64ZDINX: # %bb.0: # %entry
360 ; RV64ZDINX-NEXT: lui a1, %hi(e)
361 ; RV64ZDINX-NEXT: addi a2, a1, %lo(e)
362 ; RV64ZDINX-NEXT: lwu a2, 4(a2)
363 ; RV64ZDINX-NEXT: lwu a1, %lo(e)(a1)
364 ; RV64ZDINX-NEXT: slli a2, a2, 32
365 ; RV64ZDINX-NEXT: or a1, a2, a1
366 ; RV64ZDINX-NEXT: sd a1, 2044(a0)
367 ; RV64ZDINX-NEXT: ret
369 %0 = load double, ptr @e, align 4
370 %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044
371 store double %0, ptr %add.ptr, align 8
375 define void @foo10(ptr %p) nounwind {
376 ; RV32ZDINX-LABEL: foo10:
377 ; RV32ZDINX: # %bb.0: # %entry
378 ; RV32ZDINX-NEXT: addi sp, sp, -16
379 ; RV32ZDINX-NEXT: addi a1, a0, 2047
380 ; RV32ZDINX-NEXT: lw a2, -3(a1)
381 ; RV32ZDINX-NEXT: lw a3, 1(a1)
382 ; RV32ZDINX-NEXT: sw a0, 8(sp)
383 ; RV32ZDINX-NEXT: sw a2, 0(sp)
384 ; RV32ZDINX-NEXT: sw a3, 4(sp)
385 ; RV32ZDINX-NEXT: lw a0, 4(sp)
386 ; RV32ZDINX-NEXT: lui a1, %hi(e)
387 ; RV32ZDINX-NEXT: addi a2, a1, %lo(e)
388 ; RV32ZDINX-NEXT: sw a0, 4(a2)
389 ; RV32ZDINX-NEXT: lw a0, 0(sp)
390 ; RV32ZDINX-NEXT: sw a0, %lo(e)(a1)
391 ; RV32ZDINX-NEXT: addi sp, sp, 16
392 ; RV32ZDINX-NEXT: ret
394 ; RV32ZDINXUALIGNED-LABEL: foo10:
395 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
396 ; RV32ZDINXUALIGNED-NEXT: addi sp, sp, -16
397 ; RV32ZDINXUALIGNED-NEXT: addi a1, a0, 2047
398 ; RV32ZDINXUALIGNED-NEXT: lw a2, -3(a1)
399 ; RV32ZDINXUALIGNED-NEXT: lw a3, 1(a1)
400 ; RV32ZDINXUALIGNED-NEXT: sw a0, 8(sp)
401 ; RV32ZDINXUALIGNED-NEXT: lui a0, %hi(e)
402 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, %lo(e)
403 ; RV32ZDINXUALIGNED-NEXT: sw a2, 0(a0)
404 ; RV32ZDINXUALIGNED-NEXT: sw a3, 4(a0)
405 ; RV32ZDINXUALIGNED-NEXT: addi sp, sp, 16
406 ; RV32ZDINXUALIGNED-NEXT: ret
408 ; RV64ZDINX-LABEL: foo10:
409 ; RV64ZDINX: # %bb.0: # %entry
410 ; RV64ZDINX-NEXT: addi sp, sp, -16
411 ; RV64ZDINX-NEXT: ld a1, 2044(a0)
412 ; RV64ZDINX-NEXT: sd a0, 8(sp)
413 ; RV64ZDINX-NEXT: lui a0, %hi(e)
414 ; RV64ZDINX-NEXT: sw a1, %lo(e)(a0)
415 ; RV64ZDINX-NEXT: addi a0, a0, %lo(e)
416 ; RV64ZDINX-NEXT: srli a1, a1, 32
417 ; RV64ZDINX-NEXT: sw a1, 4(a0)
418 ; RV64ZDINX-NEXT: addi sp, sp, 16
419 ; RV64ZDINX-NEXT: ret
421 %p.addr = alloca ptr, align 8
422 store ptr %p, ptr %p.addr, align 8
423 %0 = load ptr, ptr %p.addr, align 8
424 %add.ptr = getelementptr inbounds i8, ptr %0, i64 2044
425 %1 = load double, ptr %add.ptr, align 8
426 store double %1, ptr @e, align 4
430 define void @foo11(ptr nocapture %p, double %d) nounwind {
431 ; RV32ZDINX-LABEL: foo11:
432 ; RV32ZDINX: # %bb.0: # %entry
433 ; RV32ZDINX-NEXT: mv a3, a2
434 ; RV32ZDINX-NEXT: lui a2, 1
435 ; RV32ZDINX-NEXT: add a0, a0, a2
436 ; RV32ZDINX-NEXT: mv a2, a1
437 ; RV32ZDINX-NEXT: sw a2, -4(a0)
438 ; RV32ZDINX-NEXT: sw a3, 0(a0)
439 ; RV32ZDINX-NEXT: ret
441 ; RV32ZDINXUALIGNED-LABEL: foo11:
442 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
443 ; RV32ZDINXUALIGNED-NEXT: mv a3, a2
444 ; RV32ZDINXUALIGNED-NEXT: lui a2, 1
445 ; RV32ZDINXUALIGNED-NEXT: add a0, a0, a2
446 ; RV32ZDINXUALIGNED-NEXT: mv a2, a1
447 ; RV32ZDINXUALIGNED-NEXT: sw a2, -4(a0)
448 ; RV32ZDINXUALIGNED-NEXT: sw a3, 0(a0)
449 ; RV32ZDINXUALIGNED-NEXT: ret
451 ; RV64ZDINX-LABEL: foo11:
452 ; RV64ZDINX: # %bb.0: # %entry
453 ; RV64ZDINX-NEXT: addi a0, a0, 2047
454 ; RV64ZDINX-NEXT: sd a1, 2045(a0)
455 ; RV64ZDINX-NEXT: ret
457 %add.ptr = getelementptr inbounds i8, ptr %p, i64 4092
458 store double %d, ptr %add.ptr, align 8
462 define void @foo12(ptr nocapture %p, double %d) nounwind {
463 ; RV32ZDINX-LABEL: foo12:
464 ; RV32ZDINX: # %bb.0: # %entry
465 ; RV32ZDINX-NEXT: mv a3, a2
466 ; RV32ZDINX-NEXT: lui a2, 2
467 ; RV32ZDINX-NEXT: addi a2, a2, 2047
468 ; RV32ZDINX-NEXT: add a0, a0, a2
469 ; RV32ZDINX-NEXT: mv a2, a1
470 ; RV32ZDINX-NEXT: sw a2, 0(a0)
471 ; RV32ZDINX-NEXT: sw a3, 4(a0)
472 ; RV32ZDINX-NEXT: ret
474 ; RV32ZDINXUALIGNED-LABEL: foo12:
475 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
476 ; RV32ZDINXUALIGNED-NEXT: mv a3, a2
477 ; RV32ZDINXUALIGNED-NEXT: lui a2, 2
478 ; RV32ZDINXUALIGNED-NEXT: addi a2, a2, 2047
479 ; RV32ZDINXUALIGNED-NEXT: add a0, a0, a2
480 ; RV32ZDINXUALIGNED-NEXT: mv a2, a1
481 ; RV32ZDINXUALIGNED-NEXT: sw a2, 0(a0)
482 ; RV32ZDINXUALIGNED-NEXT: sw a3, 4(a0)
483 ; RV32ZDINXUALIGNED-NEXT: ret
485 ; RV64ZDINX-LABEL: foo12:
486 ; RV64ZDINX: # %bb.0: # %entry
487 ; RV64ZDINX-NEXT: lui a2, 2
488 ; RV64ZDINX-NEXT: add a0, a0, a2
489 ; RV64ZDINX-NEXT: sd a1, 2047(a0)
490 ; RV64ZDINX-NEXT: ret
492 %add.ptr = getelementptr inbounds i8, ptr %p, i64 10239
493 store double %d, ptr %add.ptr, align 8
497 @f = global double 4.2, align 16
499 define double @foo13(ptr nocapture %p) nounwind {
500 ; RV32ZDINX-LABEL: foo13:
501 ; RV32ZDINX: # %bb.0: # %entry
502 ; RV32ZDINX-NEXT: addi sp, sp, -16
503 ; RV32ZDINX-NEXT: lui a0, %hi(f)
504 ; RV32ZDINX-NEXT: lw a1, %lo(f+8)(a0)
505 ; RV32ZDINX-NEXT: sw a1, 12(sp)
506 ; RV32ZDINX-NEXT: lw a0, %lo(f+4)(a0)
507 ; RV32ZDINX-NEXT: sw a0, 8(sp)
508 ; RV32ZDINX-NEXT: lw a0, 8(sp)
509 ; RV32ZDINX-NEXT: lw a1, 12(sp)
510 ; RV32ZDINX-NEXT: addi sp, sp, 16
511 ; RV32ZDINX-NEXT: ret
513 ; RV32ZDINXUALIGNED-LABEL: foo13:
514 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
515 ; RV32ZDINXUALIGNED-NEXT: lui a0, %hi(f)
516 ; RV32ZDINXUALIGNED-NEXT: addi a0, a0, %lo(f)
517 ; RV32ZDINXUALIGNED-NEXT: lw a1, 8(a0)
518 ; RV32ZDINXUALIGNED-NEXT: lw a0, 4(a0)
519 ; RV32ZDINXUALIGNED-NEXT: ret
521 ; RV64ZDINX-LABEL: foo13:
522 ; RV64ZDINX: # %bb.0: # %entry
523 ; RV64ZDINX-NEXT: lui a0, %hi(f)
524 ; RV64ZDINX-NEXT: lwu a1, %lo(f+8)(a0)
525 ; RV64ZDINX-NEXT: lwu a0, %lo(f+4)(a0)
526 ; RV64ZDINX-NEXT: slli a1, a1, 32
527 ; RV64ZDINX-NEXT: or a0, a1, a0
528 ; RV64ZDINX-NEXT: ret
530 %add.ptr = getelementptr inbounds i8, ptr @f, i64 4
531 %0 = load double, ptr %add.ptr, align 4
535 define double @foo14(ptr nocapture %p) nounwind {
536 ; RV32ZDINX-LABEL: foo14:
537 ; RV32ZDINX: # %bb.0: # %entry
538 ; RV32ZDINX-NEXT: lui a0, %hi(f)
539 ; RV32ZDINX-NEXT: lw a1, %lo(f+12)(a0)
540 ; RV32ZDINX-NEXT: lw a0, %lo(f+8)(a0)
541 ; RV32ZDINX-NEXT: ret
543 ; RV32ZDINXUALIGNED-LABEL: foo14:
544 ; RV32ZDINXUALIGNED: # %bb.0: # %entry
545 ; RV32ZDINXUALIGNED-NEXT: lui a0, %hi(f)
546 ; RV32ZDINXUALIGNED-NEXT: lw a1, %lo(f+12)(a0)
547 ; RV32ZDINXUALIGNED-NEXT: lw a0, %lo(f+8)(a0)
548 ; RV32ZDINXUALIGNED-NEXT: ret
550 ; RV64ZDINX-LABEL: foo14:
551 ; RV64ZDINX: # %bb.0: # %entry
552 ; RV64ZDINX-NEXT: lui a0, %hi(f)
553 ; RV64ZDINX-NEXT: ld a0, %lo(f+8)(a0)
554 ; RV64ZDINX-NEXT: ret
556 %add.ptr = getelementptr inbounds i8, ptr @f, i64 8
557 %0 = load double, ptr %add.ptr, align 8