1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN %s
4 define amdgpu_kernel void @uniform_sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
5 ; GCN-LABEL: uniform_sext_in_reg_i8_to_i32:
7 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
8 ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
9 ; GCN-NEXT: s_mov_b32 s3, 0xf000
10 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
11 ; GCN-NEXT: s_add_i32 s2, s4, s5
12 ; GCN-NEXT: s_sext_i32_i8 s4, s2
13 ; GCN-NEXT: s_mov_b32 s2, -1
14 ; GCN-NEXT: v_mov_b32_e32 v0, s4
15 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
17 %c = add i32 %a, %b ; add to prevent folding into extload
19 %ashr = ashr i32 %shl, 24
20 store i32 %ashr, i32 addrspace(1)* %out, align 4
24 define amdgpu_kernel void @divergent_sext_in_reg_i8_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
25 ; GCN-LABEL: divergent_sext_in_reg_i8_to_i32:
27 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
28 ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
29 ; GCN-NEXT: s_mov_b32 s3, 0xf000
30 ; GCN-NEXT: s_mov_b32 s2, -1
31 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
32 ; GCN-NEXT: s_add_i32 s4, s4, s5
33 ; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
34 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 8
35 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
37 %tid = call i32 @llvm.amdgcn.workitem.id.x()
38 %c = add i32 %a, %b ; add to prevent folding into extload
39 %c.divergent = add i32 %c, %tid
40 %shl = shl i32 %c.divergent, 24
41 %ashr = ashr i32 %shl, 24
42 store i32 %ashr, i32 addrspace(1)* %out, align 4
46 define amdgpu_kernel void @uniform_sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
47 ; GCN-LABEL: uniform_sext_in_reg_i16_to_i32:
49 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
50 ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
51 ; GCN-NEXT: s_mov_b32 s3, 0xf000
52 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
53 ; GCN-NEXT: s_add_i32 s2, s4, s5
54 ; GCN-NEXT: s_sext_i32_i16 s4, s2
55 ; GCN-NEXT: s_mov_b32 s2, -1
56 ; GCN-NEXT: v_mov_b32_e32 v0, s4
57 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
59 %c = add i32 %a, %b ; add to prevent folding into extload
61 %ashr = ashr i32 %shl, 16
62 store i32 %ashr, i32 addrspace(1)* %out, align 4
66 define amdgpu_kernel void @divergent_sext_in_reg_i16_to_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
67 ; GCN-LABEL: divergent_sext_in_reg_i16_to_i32:
69 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
70 ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
71 ; GCN-NEXT: s_mov_b32 s3, 0xf000
72 ; GCN-NEXT: s_mov_b32 s2, -1
73 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
74 ; GCN-NEXT: s_add_i32 s4, s4, s5
75 ; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
76 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
77 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
79 %tid = call i32 @llvm.amdgcn.workitem.id.x()
80 %c = add i32 %a, %b ; add to prevent folding into extload
81 %c.divergent = add i32 %c, %tid
82 %shl = shl i32 %c.divergent, 16
83 %ashr = ashr i32 %shl, 16
84 store i32 %ashr, i32 addrspace(1)* %out, align 4
88 declare i32 @llvm.amdgcn.workitem.id.x() #1
90 attributes #0 = { nounwind }
91 attributes #1 = { nounwind readnone speculatable }