1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s
3 # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s
6 name: atomic_cmpxchg_local_i32
10 liveins: $sgpr0, $sgpr1, $sgpr2
11 ; CHECK-LABEL: name: atomic_cmpxchg_local_i32
12 ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
14 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
15 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
16 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
17 ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store seq_cst (s32), addrspace 3)
18 %0:_(p3) = COPY $sgpr0
19 %1:_(s32) = COPY $sgpr1
20 %2:_(s32) = COPY $sgpr2
21 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst (s32), addrspace 3)
25 name: atomic_cmpxchg_local_i64
29 liveins: $sgpr0, $sgpr1, $sgpr2
30 ; CHECK-LABEL: name: atomic_cmpxchg_local_i64
31 ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
33 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $sgpr0
34 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
35 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
36 ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p3), [[COPY1]], [[COPY2]] :: (load store seq_cst (s64), addrspace 3)
37 %0:_(p3) = COPY $sgpr0
38 %1:_(s32) = COPY $sgpr1
39 %2:_(s32) = COPY $sgpr2
40 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst (s64), addrspace 3)
44 name: atomic_cmpxchg_global_i32
48 liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
49 ; CHECK-LABEL: name: atomic_cmpxchg_global_i32
50 ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
52 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
53 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
54 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr3
55 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
56 ; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store seq_cst (s32), addrspace 1)
57 %0:_(p1) = COPY $sgpr0_sgpr1
58 %1:_(s32) = COPY $sgpr2
59 %2:_(s32) = COPY $sgpr3
60 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst (s32), addrspace 1)
64 name: atomic_cmpxchg_global_i64
68 liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
69 ; CHECK-LABEL: name: atomic_cmpxchg_global_i64
70 ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
72 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
73 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
74 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr3
75 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
76 ; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p1), [[BUILD_VECTOR]] :: (load store seq_cst (s64), addrspace 1)
77 %0:_(p1) = COPY $sgpr0_sgpr1
78 %1:_(s32) = COPY $sgpr2
79 %2:_(s32) = COPY $sgpr3
80 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst (s64), addrspace 1)
84 name: atomic_cmpxchg_flat_i32
88 liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
90 ; CHECK-LABEL: name: atomic_cmpxchg_flat_i32
91 ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
93 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $sgpr0_sgpr1
94 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
95 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr3
96 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
97 ; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p0), [[BUILD_VECTOR]] :: (load store seq_cst (s32))
98 %0:_(p0) = COPY $sgpr0_sgpr1
99 %1:_(s32) = COPY $sgpr2
100 %2:_(s32) = COPY $sgpr3
101 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst (s32), addrspace 0)
105 name: atomic_cmpxchg_flat_i64
109 liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
111 ; CHECK-LABEL: name: atomic_cmpxchg_flat_i64
112 ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3
114 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $sgpr0_sgpr1
115 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
116 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr3
117 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY1]](s32)
118 ; CHECK-NEXT: [[AMDGPU_ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_AMDGPU_ATOMIC_CMPXCHG [[COPY]](p0), [[BUILD_VECTOR]] :: (load store seq_cst (s64))
119 %0:_(p0) = COPY $sgpr0_sgpr1
120 %1:_(s32) = COPY $sgpr2
121 %2:_(s32) = COPY $sgpr3
122 %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store seq_cst (s64), addrspace 0)