Bump version to 19.1.0-rc3
[llvm-project.git] / llvm / test / CodeGen / AMDGPU / schedule-barrier.mir
blob9429d1565962e41a6d111febc766162d9c093e55
1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=machine-scheduler -o - %s | FileCheck %s
4 ---
5 # Check that the high latency loads are both scheduled first, before the
6 # multiplies, despite the presence of a barrier in the function.
7 name: test
8 tracksRegLiveness: true
9 body: |
10   bb.0:
11     liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9
13     ; CHECK-LABEL: name: test
14     ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9
15     ; CHECK-NEXT: {{  $}}
16     ; CHECK-NEXT: undef [[COPY:%[0-9]+]].sub3:vreg_128 = COPY $vgpr9
17     ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub2:vreg_128 = COPY $vgpr8
18     ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].sub1:vreg_128 = COPY $vgpr7
19     ; CHECK-NEXT: undef [[COPY3:%[0-9]+]].sub0:vreg_128 = COPY $vgpr6
20     ; CHECK-NEXT: undef [[COPY4:%[0-9]+]].sub3:vreg_128 = COPY $vgpr5
21     ; CHECK-NEXT: undef [[COPY5:%[0-9]+]].sub2:vreg_128 = COPY $vgpr4
22     ; CHECK-NEXT: undef [[COPY6:%[0-9]+]].sub1:vreg_64 = COPY $vgpr1
23     ; CHECK-NEXT: [[COPY6:%[0-9]+]].sub0:vreg_64 = COPY $vgpr0
24     ; CHECK-NEXT: undef [[COPY7:%[0-9]+]].sub1:vreg_128 = COPY $vgpr3
25     ; CHECK-NEXT: undef [[COPY8:%[0-9]+]].sub0:vreg_128 = COPY $vgpr2
26     ; CHECK-NEXT: undef [[V_READFIRSTLANE_B32_:%[0-9]+]].sub0:sgpr_128 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
27     ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]].sub1:sgpr_128 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
28     ; CHECK-NEXT: S_BARRIER
29     ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]].sub2:sgpr_128 = V_READFIRSTLANE_B32 [[COPY5]].sub2, implicit $exec
30     ; CHECK-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]].sub3:sgpr_128 = V_READFIRSTLANE_B32 [[COPY4]].sub3, implicit $exec
31     ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[V_READFIRSTLANE_B32_]], 0, 0, 0, 0, implicit $exec
32     ; CHECK-NEXT: undef [[V_READFIRSTLANE_B32_1:%[0-9]+]].sub0:sgpr_128 = V_READFIRSTLANE_B32 [[COPY3]].sub0, implicit $exec
33     ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]].sub1:sgpr_128 = V_READFIRSTLANE_B32 [[COPY2]].sub1, implicit $exec
34     ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]].sub2:sgpr_128 = V_READFIRSTLANE_B32 [[COPY1]].sub2, implicit $exec
35     ; CHECK-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]].sub3:sgpr_128 = V_READFIRSTLANE_B32 [[COPY]].sub3, implicit $exec
36     ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[V_READFIRSTLANE_B32_1]], 0, 0, 0, 0, implicit $exec
37     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[BUFFER_LOAD_DWORD_OFFSET]], [[BUFFER_LOAD_DWORD_OFFSET]], implicit $exec
38     ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[BUFFER_LOAD_DWORD_OFFSET1]], [[BUFFER_LOAD_DWORD_OFFSET1]], implicit $exec
39     ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_MUL_LO_U32_e64_]], [[V_MUL_LO_U32_e64_1]], implicit $exec
40     ; CHECK-NEXT: GLOBAL_STORE_DWORD [[COPY6]], [[V_ADD_U32_e32_]], 0, 0, implicit $exec
41     ; CHECK-NEXT: S_ENDPGM 0
42     undef %43.sub3:vreg_128 = COPY $vgpr9
43     undef %42.sub2:vreg_128 = COPY $vgpr8
44     undef %41.sub1:vreg_128 = COPY $vgpr7
45     undef %26.sub0:vreg_128 = COPY $vgpr6
46     undef %46.sub3:vreg_128 = COPY $vgpr5
47     undef %45.sub2:vreg_128 = COPY $vgpr4
48     undef %44.sub1:vreg_128 = COPY $vgpr3
49     undef %32.sub0:vreg_128 = COPY $vgpr2
50     undef %38.sub1:vreg_64 = COPY $vgpr1
51     %38.sub0:vreg_64 = COPY $vgpr0
53     S_BARRIER
55     undef %33.sub0:sgpr_128 = V_READFIRSTLANE_B32 %32.sub0, implicit $exec
56     %33.sub1:sgpr_128 = V_READFIRSTLANE_B32 %44.sub1, implicit $exec
57     %33.sub2:sgpr_128 = V_READFIRSTLANE_B32 %45.sub2, implicit $exec
58     %33.sub3:sgpr_128 = V_READFIRSTLANE_B32 %46.sub3, implicit $exec
59     %15:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %33, 0, 0, 0, 0, implicit $exec
60     %39:vgpr_32 = V_MUL_LO_U32_e64 %15, %15, implicit $exec
62     undef %27.sub0:sgpr_128 = V_READFIRSTLANE_B32 %26.sub0, implicit $exec
63     %27.sub1:sgpr_128 = V_READFIRSTLANE_B32 %41.sub1, implicit $exec
64     %27.sub2:sgpr_128 = V_READFIRSTLANE_B32 %42.sub2, implicit $exec
65     %27.sub3:sgpr_128 = V_READFIRSTLANE_B32 %43.sub3, implicit $exec
66     %19:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %27, 0, 0, 0, 0, implicit $exec
67     %40:vgpr_32 = V_MUL_LO_U32_e64 %19, %19, implicit $exec
69     %23:vgpr_32 = V_ADD_U32_e32 %39, %40, implicit $exec
70     GLOBAL_STORE_DWORD %38, %23, 0, 0, implicit $exec
71     S_ENDPGM 0
72 ...