1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 -stop-after=finalize-isel | FileCheck %s
4 define void @split_masked_store(ptr %0) {
5 ; CHECK-LABEL: name: split_masked_store
7 ; CHECK-NEXT: liveins: $rdi
9 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
10 ; CHECK-NEXT: [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
11 ; CHECK-NEXT: [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool)
12 ; CHECK-NEXT: VMASKMOVPDYmr [[COPY]], 1, $noreg, 32, $noreg, killed [[VMOVAPSYrm]], [[AVX_SET0_]] :: (store unknown-size into %ir.0 + 32, align 8)
13 ; CHECK-NEXT: VMOVUPDYmr [[COPY]], 1, $noreg, 0, $noreg, [[AVX_SET0_]] :: (store (s256) into %ir.0, align 8)
16 call void @llvm.masked.store.v8f64.p0(<8 x double> zeroinitializer, ptr %0, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>)
20 define <8 x double> @split_masked_load(ptr %0) {
21 ; CHECK-LABEL: name: split_masked_load
23 ; CHECK-NEXT: liveins: $rdi
25 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
26 ; CHECK-NEXT: [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool)
27 ; CHECK-NEXT: [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm killed [[VMOVAPSYrm]], [[COPY]], 1, $noreg, 32, $noreg :: (load unknown-size from %ir.0 + 32, align 8)
28 ; CHECK-NEXT: [[VMOVUPDYrm:%[0-9]+]]:vr256 = VMOVUPDYrm [[COPY]], 1, $noreg, 0, $noreg :: (load (s256) from %ir.0, align 8)
29 ; CHECK-NEXT: $ymm0 = COPY [[VMOVUPDYrm]]
30 ; CHECK-NEXT: $ymm1 = COPY [[VMASKMOVPDYrm]]
31 ; CHECK-NEXT: RET 0, $ymm0, $ymm1
33 %x = call <8 x double> @llvm.masked.load.v8f64.p0(ptr %0, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>, <8 x double> poison)
37 declare <8 x double> @llvm.masked.load.v8f64.p0(ptr, i32, <8 x i1>, <8 x double>)
38 declare void @llvm.masked.store.v8f64.p0(<8 x double>, ptr, i32, <8 x i1>)