1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
3 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
4 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
5 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
8 define i8 @test_load_i8(i8* %p1) {
13 define i16 @test_load_i16(i16* %p1) {
14 %r = load i16, i16* %p1
18 define i32 @test_load_i32(i32* %p1) {
19 %r = load i32, i32* %p1
23 define i64 @test_load_i64(i64* %p1) {
24 %r = load i64, i64* %p1
28 define float @test_load_float(float* %p1) {
29 %r = load float, float* %p1
33 define float @test_load_float_vecreg(float* %p1) {
34 %r = load float, float* %p1
38 define double @test_load_double(double* %p1) {
39 %r = load double, double* %p1
43 define double @test_load_double_vecreg(double* %p1) {
44 %r = load double, double* %p1
48 define i32* @test_store_i32(i32 %val, i32* %p1) {
49 store i32 %val, i32* %p1
53 define i64* @test_store_i64(i64 %val, i64* %p1) {
54 store i64 %val, i64* %p1
58 define float* @test_store_float(float %val, float* %p1) {
59 store float %val, float* %p1
63 define float* @test_store_float_vec(float %val, float* %p1) {
64 store float %val, float* %p1
68 define double* @test_store_double(double %val, double* %p1) {
69 store double %val, double* %p1
73 define double* @test_store_double_vec(double %val, double* %p1) {
74 store double %val, double* %p1
78 define i32* @test_load_ptr(i32** %ptr1) {
79 %p = load i32*, i32** %ptr1
83 define void @test_store_ptr(i32** %ptr1, i32* %a) {
84 store i32* %a, i32** %ptr1
88 define i32 @test_gep_folding(i32* %arr, i32 %val) {
89 %arrayidx = getelementptr i32, i32* %arr, i32 5
90 store i32 %val, i32* %arrayidx
91 %r = load i32, i32* %arrayidx
95 define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) #0 {
96 %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
97 store i32 %val, i32* %arrayidx
98 %r = load i32, i32* %arrayidx
106 regBankSelected: true
108 - { id: 0, class: gpr }
109 - { id: 1, class: gpr }
114 ; SSE-LABEL: name: test_load_i8
115 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
116 ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
117 ; SSE: $al = COPY [[MOV8rm]]
118 ; SSE: RET 0, implicit $al
119 ; AVX-LABEL: name: test_load_i8
120 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
121 ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
122 ; AVX: $al = COPY [[MOV8rm]]
123 ; AVX: RET 0, implicit $al
124 ; AVX512F-LABEL: name: test_load_i8
125 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
126 ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
127 ; AVX512F: $al = COPY [[MOV8rm]]
128 ; AVX512F: RET 0, implicit $al
129 ; AVX512VL-LABEL: name: test_load_i8
130 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
131 ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
132 ; AVX512VL: $al = COPY [[MOV8rm]]
133 ; AVX512VL: RET 0, implicit $al
135 %1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
144 regBankSelected: true
146 - { id: 0, class: gpr }
147 - { id: 1, class: gpr }
152 ; SSE-LABEL: name: test_load_i16
153 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
154 ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
155 ; SSE: $ax = COPY [[MOV16rm]]
156 ; SSE: RET 0, implicit $ax
157 ; AVX-LABEL: name: test_load_i16
158 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
159 ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
160 ; AVX: $ax = COPY [[MOV16rm]]
161 ; AVX: RET 0, implicit $ax
162 ; AVX512F-LABEL: name: test_load_i16
163 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
164 ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
165 ; AVX512F: $ax = COPY [[MOV16rm]]
166 ; AVX512F: RET 0, implicit $ax
167 ; AVX512VL-LABEL: name: test_load_i16
168 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
169 ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
170 ; AVX512VL: $ax = COPY [[MOV16rm]]
171 ; AVX512VL: RET 0, implicit $ax
173 %1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
182 regBankSelected: true
184 - { id: 0, class: gpr }
185 - { id: 1, class: gpr }
190 ; SSE-LABEL: name: test_load_i32
191 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
192 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
193 ; SSE: $eax = COPY [[MOV32rm]]
194 ; SSE: RET 0, implicit $eax
195 ; AVX-LABEL: name: test_load_i32
196 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
197 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
198 ; AVX: $eax = COPY [[MOV32rm]]
199 ; AVX: RET 0, implicit $eax
200 ; AVX512F-LABEL: name: test_load_i32
201 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
202 ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
203 ; AVX512F: $eax = COPY [[MOV32rm]]
204 ; AVX512F: RET 0, implicit $eax
205 ; AVX512VL-LABEL: name: test_load_i32
206 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
207 ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
208 ; AVX512VL: $eax = COPY [[MOV32rm]]
209 ; AVX512VL: RET 0, implicit $eax
211 %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
220 regBankSelected: true
222 - { id: 0, class: gpr }
223 - { id: 1, class: gpr }
228 ; SSE-LABEL: name: test_load_i64
229 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
230 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
231 ; SSE: $rax = COPY [[MOV64rm]]
232 ; SSE: RET 0, implicit $rax
233 ; AVX-LABEL: name: test_load_i64
234 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
235 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
236 ; AVX: $rax = COPY [[MOV64rm]]
237 ; AVX: RET 0, implicit $rax
238 ; AVX512F-LABEL: name: test_load_i64
239 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
240 ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
241 ; AVX512F: $rax = COPY [[MOV64rm]]
242 ; AVX512F: RET 0, implicit $rax
243 ; AVX512VL-LABEL: name: test_load_i64
244 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
245 ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
246 ; AVX512VL: $rax = COPY [[MOV64rm]]
247 ; AVX512VL: RET 0, implicit $rax
249 %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
255 name: test_load_float
258 regBankSelected: true
260 - { id: 0, class: gpr, preferred-register: '' }
261 - { id: 1, class: gpr, preferred-register: '' }
262 - { id: 2, class: vecr, preferred-register: '' }
263 - { id: 3, class: vecr, preferred-register: '' }
268 ; SSE-LABEL: name: test_load_float
269 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
270 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
271 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
272 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
273 ; SSE: $xmm0 = COPY [[COPY2]]
274 ; SSE: RET 0, implicit $xmm0
275 ; AVX-LABEL: name: test_load_float
276 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
277 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
278 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
279 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
280 ; AVX: $xmm0 = COPY [[COPY2]]
281 ; AVX: RET 0, implicit $xmm0
282 ; AVX512F-LABEL: name: test_load_float
283 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
284 ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
285 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
286 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
287 ; AVX512F: $xmm0 = COPY [[COPY2]]
288 ; AVX512F: RET 0, implicit $xmm0
289 ; AVX512VL-LABEL: name: test_load_float
290 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
291 ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
292 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
293 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
294 ; AVX512VL: $xmm0 = COPY [[COPY2]]
295 ; AVX512VL: RET 0, implicit $xmm0
296 %0:gpr(p0) = COPY $rdi
297 %1:gpr(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
298 %3:vecr(s32) = COPY %1(s32)
299 %2:vecr(s128) = G_ANYEXT %3(s32)
300 $xmm0 = COPY %2(s128)
301 RET 0, implicit $xmm0
305 name: test_load_float_vecreg
308 regBankSelected: true
310 - { id: 0, class: gpr, preferred-register: '' }
311 - { id: 1, class: gpr, preferred-register: '' }
312 - { id: 2, class: vecr, preferred-register: '' }
313 - { id: 3, class: vecr, preferred-register: '' }
318 ; SSE-LABEL: name: test_load_float_vecreg
319 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
320 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
321 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
322 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
323 ; SSE: $xmm0 = COPY [[COPY2]]
324 ; SSE: RET 0, implicit $xmm0
325 ; AVX-LABEL: name: test_load_float_vecreg
326 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
327 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
328 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
329 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
330 ; AVX: $xmm0 = COPY [[COPY2]]
331 ; AVX: RET 0, implicit $xmm0
332 ; AVX512F-LABEL: name: test_load_float_vecreg
333 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
334 ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
335 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
336 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
337 ; AVX512F: $xmm0 = COPY [[COPY2]]
338 ; AVX512F: RET 0, implicit $xmm0
339 ; AVX512VL-LABEL: name: test_load_float_vecreg
340 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
341 ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
342 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
343 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
344 ; AVX512VL: $xmm0 = COPY [[COPY2]]
345 ; AVX512VL: RET 0, implicit $xmm0
346 %0:gpr(p0) = COPY $rdi
347 %1:gpr(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
348 %3:vecr(s32) = COPY %1(s32)
349 %2:vecr(s128) = G_ANYEXT %3(s32)
350 $xmm0 = COPY %2(s128)
351 RET 0, implicit $xmm0
355 name: test_load_double
358 regBankSelected: true
360 - { id: 0, class: gpr, preferred-register: '' }
361 - { id: 1, class: gpr, preferred-register: '' }
362 - { id: 2, class: vecr, preferred-register: '' }
363 - { id: 3, class: vecr, preferred-register: '' }
368 ; SSE-LABEL: name: test_load_double
369 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
370 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
371 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
372 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
373 ; SSE: $xmm0 = COPY [[COPY2]]
374 ; SSE: RET 0, implicit $xmm0
375 ; AVX-LABEL: name: test_load_double
376 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
377 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
378 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
379 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
380 ; AVX: $xmm0 = COPY [[COPY2]]
381 ; AVX: RET 0, implicit $xmm0
382 ; AVX512F-LABEL: name: test_load_double
383 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
384 ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
385 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
386 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
387 ; AVX512F: $xmm0 = COPY [[COPY2]]
388 ; AVX512F: RET 0, implicit $xmm0
389 ; AVX512VL-LABEL: name: test_load_double
390 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
391 ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
392 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
393 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
394 ; AVX512VL: $xmm0 = COPY [[COPY2]]
395 ; AVX512VL: RET 0, implicit $xmm0
396 %0:gpr(p0) = COPY $rdi
397 %1:gpr(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
398 %3:vecr(s64) = COPY %1(s64)
399 %2:vecr(s128) = G_ANYEXT %3(s64)
400 $xmm0 = COPY %2(s128)
401 RET 0, implicit $xmm0
405 name: test_load_double_vecreg
408 regBankSelected: true
410 - { id: 0, class: gpr, preferred-register: '' }
411 - { id: 1, class: gpr, preferred-register: '' }
412 - { id: 2, class: vecr, preferred-register: '' }
413 - { id: 3, class: vecr, preferred-register: '' }
418 ; SSE-LABEL: name: test_load_double_vecreg
419 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
420 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
421 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
422 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
423 ; SSE: $xmm0 = COPY [[COPY2]]
424 ; SSE: RET 0, implicit $xmm0
425 ; AVX-LABEL: name: test_load_double_vecreg
426 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
427 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
428 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
429 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
430 ; AVX: $xmm0 = COPY [[COPY2]]
431 ; AVX: RET 0, implicit $xmm0
432 ; AVX512F-LABEL: name: test_load_double_vecreg
433 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
434 ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
435 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
436 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
437 ; AVX512F: $xmm0 = COPY [[COPY2]]
438 ; AVX512F: RET 0, implicit $xmm0
439 ; AVX512VL-LABEL: name: test_load_double_vecreg
440 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
441 ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
442 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
443 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
444 ; AVX512VL: $xmm0 = COPY [[COPY2]]
445 ; AVX512VL: RET 0, implicit $xmm0
446 %0:gpr(p0) = COPY $rdi
447 %1:gpr(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
448 %3:vecr(s64) = COPY %1(s64)
449 %2:vecr(s128) = G_ANYEXT %3(s64)
450 $xmm0 = COPY %2(s128)
451 RET 0, implicit $xmm0
458 regBankSelected: true
460 - { id: 0, class: gpr }
461 - { id: 1, class: gpr }
466 ; SSE-LABEL: name: test_store_i32
467 ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi
468 ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
469 ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
470 ; SSE: $rax = COPY [[COPY1]]
471 ; SSE: RET 0, implicit $rax
472 ; AVX-LABEL: name: test_store_i32
473 ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi
474 ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
475 ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
476 ; AVX: $rax = COPY [[COPY1]]
477 ; AVX: RET 0, implicit $rax
478 ; AVX512F-LABEL: name: test_store_i32
479 ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi
480 ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
481 ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
482 ; AVX512F: $rax = COPY [[COPY1]]
483 ; AVX512F: RET 0, implicit $rax
484 ; AVX512VL-LABEL: name: test_store_i32
485 ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
486 ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
487 ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
488 ; AVX512VL: $rax = COPY [[COPY1]]
489 ; AVX512VL: RET 0, implicit $rax
492 G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
501 regBankSelected: true
503 - { id: 0, class: gpr }
504 - { id: 1, class: gpr }
509 ; SSE-LABEL: name: test_store_i64
510 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
511 ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
512 ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
513 ; SSE: $rax = COPY [[COPY1]]
514 ; SSE: RET 0, implicit $rax
515 ; AVX-LABEL: name: test_store_i64
516 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
517 ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
518 ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
519 ; AVX: $rax = COPY [[COPY1]]
520 ; AVX: RET 0, implicit $rax
521 ; AVX512F-LABEL: name: test_store_i64
522 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
523 ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
524 ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
525 ; AVX512F: $rax = COPY [[COPY1]]
526 ; AVX512F: RET 0, implicit $rax
527 ; AVX512VL-LABEL: name: test_store_i64
528 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
529 ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
530 ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
531 ; AVX512VL: $rax = COPY [[COPY1]]
532 ; AVX512VL: RET 0, implicit $rax
535 G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
541 name: test_store_float
544 regBankSelected: true
546 - { id: 0, class: vecr, preferred-register: '' }
547 - { id: 1, class: gpr, preferred-register: '' }
548 - { id: 2, class: vecr, preferred-register: '' }
549 - { id: 3, class: gpr, preferred-register: '' }
554 ; SSE-LABEL: name: test_store_float
555 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
556 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
557 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
558 ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
559 ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
560 ; SSE: $rax = COPY [[COPY2]]
561 ; SSE: RET 0, implicit $rax
562 ; AVX-LABEL: name: test_store_float
563 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
564 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
565 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
566 ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
567 ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
568 ; AVX: $rax = COPY [[COPY2]]
569 ; AVX: RET 0, implicit $rax
570 ; AVX512F-LABEL: name: test_store_float
571 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
572 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
573 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
574 ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
575 ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
576 ; AVX512F: $rax = COPY [[COPY2]]
577 ; AVX512F: RET 0, implicit $rax
578 ; AVX512VL-LABEL: name: test_store_float
579 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
580 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
581 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
582 ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
583 ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
584 ; AVX512VL: $rax = COPY [[COPY2]]
585 ; AVX512VL: RET 0, implicit $rax
586 %2:vecr(s128) = COPY $xmm0
587 %0:vecr(s32) = G_TRUNC %2(s128)
588 %1:gpr(p0) = COPY $rdi
589 %3:gpr(s32) = COPY %0(s32)
590 G_STORE %3(s32), %1(p0) :: (store 4 into %ir.p1)
596 name: test_store_float_vec
599 regBankSelected: true
601 - { id: 0, class: vecr, preferred-register: '' }
602 - { id: 1, class: gpr, preferred-register: '' }
603 - { id: 2, class: vecr, preferred-register: '' }
604 - { id: 3, class: gpr, preferred-register: '' }
609 ; SSE-LABEL: name: test_store_float_vec
610 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
611 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
612 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
613 ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
614 ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
615 ; SSE: $rax = COPY [[COPY2]]
616 ; SSE: RET 0, implicit $rax
617 ; AVX-LABEL: name: test_store_float_vec
618 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
619 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
620 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
621 ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
622 ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
623 ; AVX: $rax = COPY [[COPY2]]
624 ; AVX: RET 0, implicit $rax
625 ; AVX512F-LABEL: name: test_store_float_vec
626 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
627 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
628 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
629 ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
630 ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
631 ; AVX512F: $rax = COPY [[COPY2]]
632 ; AVX512F: RET 0, implicit $rax
633 ; AVX512VL-LABEL: name: test_store_float_vec
634 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
635 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
636 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
637 ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
638 ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
639 ; AVX512VL: $rax = COPY [[COPY2]]
640 ; AVX512VL: RET 0, implicit $rax
641 %2:vecr(s128) = COPY $xmm0
642 %0:vecr(s32) = G_TRUNC %2(s128)
643 %1:gpr(p0) = COPY $rdi
644 %3:gpr(s32) = COPY %0(s32)
645 G_STORE %3(s32), %1(p0) :: (store 4 into %ir.p1)
651 name: test_store_double
654 regBankSelected: true
656 - { id: 0, class: vecr, preferred-register: '' }
657 - { id: 1, class: gpr, preferred-register: '' }
658 - { id: 2, class: vecr, preferred-register: '' }
659 - { id: 3, class: gpr, preferred-register: '' }
660 # NO_AVX512X: %0:fr64 = COPY $xmm0
665 ; SSE-LABEL: name: test_store_double
666 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
667 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
668 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
669 ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
670 ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
671 ; SSE: $rax = COPY [[COPY2]]
672 ; SSE: RET 0, implicit $rax
673 ; AVX-LABEL: name: test_store_double
674 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
675 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
676 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
677 ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
678 ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
679 ; AVX: $rax = COPY [[COPY2]]
680 ; AVX: RET 0, implicit $rax
681 ; AVX512F-LABEL: name: test_store_double
682 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
683 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
684 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
685 ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
686 ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
687 ; AVX512F: $rax = COPY [[COPY2]]
688 ; AVX512F: RET 0, implicit $rax
689 ; AVX512VL-LABEL: name: test_store_double
690 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
691 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
692 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
693 ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
694 ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
695 ; AVX512VL: $rax = COPY [[COPY2]]
696 ; AVX512VL: RET 0, implicit $rax
697 %2:vecr(s128) = COPY $xmm0
698 %0:vecr(s64) = G_TRUNC %2(s128)
699 %1:gpr(p0) = COPY $rdi
700 %3:gpr(s64) = COPY %0(s64)
701 G_STORE %3(s64), %1(p0) :: (store 8 into %ir.p1)
707 name: test_store_double_vec
710 regBankSelected: true
712 - { id: 0, class: vecr, preferred-register: '' }
713 - { id: 1, class: gpr, preferred-register: '' }
714 - { id: 2, class: vecr, preferred-register: '' }
715 - { id: 3, class: gpr, preferred-register: '' }
720 ; SSE-LABEL: name: test_store_double_vec
721 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
722 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
723 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
724 ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
725 ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
726 ; SSE: $rax = COPY [[COPY2]]
727 ; SSE: RET 0, implicit $rax
728 ; AVX-LABEL: name: test_store_double_vec
729 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
730 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
731 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
732 ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
733 ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
734 ; AVX: $rax = COPY [[COPY2]]
735 ; AVX: RET 0, implicit $rax
736 ; AVX512F-LABEL: name: test_store_double_vec
737 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
738 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
739 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
740 ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
741 ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
742 ; AVX512F: $rax = COPY [[COPY2]]
743 ; AVX512F: RET 0, implicit $rax
744 ; AVX512VL-LABEL: name: test_store_double_vec
745 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
746 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
747 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
748 ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
749 ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
750 ; AVX512VL: $rax = COPY [[COPY2]]
751 ; AVX512VL: RET 0, implicit $rax
752 %2:vecr(s128) = COPY $xmm0
753 %0:vecr(s64) = G_TRUNC %2(s128)
754 %1:gpr(p0) = COPY $rdi
755 %3:gpr(s64) = COPY %0(s64)
756 G_STORE %3(s64), %1(p0) :: (store 8 into %ir.p1)
765 regBankSelected: true
768 - { id: 0, class: gpr }
769 - { id: 1, class: gpr }
774 ; SSE-LABEL: name: test_load_ptr
775 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
776 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
777 ; SSE: $rax = COPY [[MOV64rm]]
778 ; SSE: RET 0, implicit $rax
779 ; AVX-LABEL: name: test_load_ptr
780 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
781 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
782 ; AVX: $rax = COPY [[MOV64rm]]
783 ; AVX: RET 0, implicit $rax
784 ; AVX512F-LABEL: name: test_load_ptr
785 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
786 ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
787 ; AVX512F: $rax = COPY [[MOV64rm]]
788 ; AVX512F: RET 0, implicit $rax
789 ; AVX512VL-LABEL: name: test_load_ptr
790 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
791 ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
792 ; AVX512VL: $rax = COPY [[MOV64rm]]
793 ; AVX512VL: RET 0, implicit $rax
795 %1(p0) = G_LOAD %0(p0) :: (load 8 from %ir.ptr1)
804 regBankSelected: true
807 - { id: 0, class: gpr }
808 - { id: 1, class: gpr }
813 ; SSE-LABEL: name: test_store_ptr
814 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
815 ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
816 ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
818 ; AVX-LABEL: name: test_store_ptr
819 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
820 ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
821 ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
823 ; AVX512F-LABEL: name: test_store_ptr
824 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
825 ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
826 ; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
828 ; AVX512VL-LABEL: name: test_store_ptr
829 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
830 ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
831 ; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
835 G_STORE %1(p0), %0(p0) :: (store 8 into %ir.ptr1)
840 name: test_gep_folding
843 regBankSelected: true
845 - { id: 0, class: gpr }
846 - { id: 1, class: gpr }
847 - { id: 2, class: gpr }
848 - { id: 3, class: gpr }
849 - { id: 4, class: gpr }
854 ; SSE-LABEL: name: test_gep_folding
855 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
856 ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
857 ; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
858 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
859 ; SSE: $eax = COPY [[MOV32rm]]
860 ; SSE: RET 0, implicit $eax
861 ; AVX-LABEL: name: test_gep_folding
862 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
863 ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
864 ; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
865 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
866 ; AVX: $eax = COPY [[MOV32rm]]
867 ; AVX: RET 0, implicit $eax
868 ; AVX512F-LABEL: name: test_gep_folding
869 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
870 ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
871 ; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
872 ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
873 ; AVX512F: $eax = COPY [[MOV32rm]]
874 ; AVX512F: RET 0, implicit $eax
875 ; AVX512VL-LABEL: name: test_gep_folding
876 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
877 ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
878 ; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
879 ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
880 ; AVX512VL: $eax = COPY [[MOV32rm]]
881 ; AVX512VL: RET 0, implicit $eax
884 %2(s64) = G_CONSTANT i64 20
885 %3(p0) = G_GEP %0, %2(s64)
886 G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
887 %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
893 name: test_gep_folding_largeGepIndex
896 regBankSelected: true
898 - { id: 0, class: gpr }
899 - { id: 1, class: gpr }
900 - { id: 2, class: gpr }
901 - { id: 3, class: gpr }
902 - { id: 4, class: gpr }
907 ; SSE-LABEL: name: test_gep_folding_largeGepIndex
908 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
909 ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
910 ; SSE: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
911 ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
912 ; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
913 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
914 ; SSE: $eax = COPY [[MOV32rm]]
915 ; SSE: RET 0, implicit $eax
916 ; AVX-LABEL: name: test_gep_folding_largeGepIndex
917 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
918 ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
919 ; AVX: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
920 ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
921 ; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
922 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
923 ; AVX: $eax = COPY [[MOV32rm]]
924 ; AVX: RET 0, implicit $eax
925 ; AVX512F-LABEL: name: test_gep_folding_largeGepIndex
926 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
927 ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
928 ; AVX512F: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
929 ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
930 ; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
931 ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
932 ; AVX512F: $eax = COPY [[MOV32rm]]
933 ; AVX512F: RET 0, implicit $eax
934 ; AVX512VL-LABEL: name: test_gep_folding_largeGepIndex
935 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
936 ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
937 ; AVX512VL: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
938 ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
939 ; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
940 ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
941 ; AVX512VL: $eax = COPY [[MOV32rm]]
942 ; AVX512VL: RET 0, implicit $eax
945 %2(s64) = G_CONSTANT i64 228719476720
946 %3(p0) = G_GEP %0, %2(s64)
947 G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
948 %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)