1 ; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3 ; GCN-LABEL: {{^}}combine_ftrunc_frint_f64:
4 ; GCN: v_rndne_f64_e32 [[RND:v\[[0-9:]+\]]],
5 ; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], [[RND]]
6 define amdgpu_kernel void @combine_ftrunc_frint_f64(ptr addrspace(1) %p) {
7 %v = load double, ptr addrspace(1) %p, align 8
8 %round = tail call double @llvm.rint.f64(double %v)
9 %trunc = tail call double @llvm.trunc.f64(double %round)
10 store double %trunc, ptr addrspace(1) %p, align 8
14 ; GCN-LABEL: {{^}}combine_ftrunc_frint_f32:
15 ; GCN: v_rndne_f32_e32 [[RND:v[0-9]+]],
16 ; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
17 define amdgpu_kernel void @combine_ftrunc_frint_f32(ptr addrspace(1) %p) {
18 %v = load float, ptr addrspace(1) %p, align 4
19 %round = tail call float @llvm.rint.f32(float %v)
20 %trunc = tail call float @llvm.trunc.f32(float %round)
21 store float %trunc, ptr addrspace(1) %p, align 4
25 ; GCN-LABEL: {{^}}combine_ftrunc_frint_v2f32:
27 ; GCN: s_load_dwordx2 s[[[SRC1:[0-9]+]]:[[SRC2:[0-9]+]]]
28 ; GCN-DAG: v_rndne_f32_e32 v[[RND1:[0-9]+]], s[[SRC1]]
29 ; GCN-DAG: v_rndne_f32_e32 v[[RND2:[0-9]+]], s[[SRC2]]
30 ; GCN: flat_store_dwordx2 v[{{[0-9:]+}}], v[[[RND1]]:[[RND2]]]
31 define amdgpu_kernel void @combine_ftrunc_frint_v2f32(ptr addrspace(1) %p) {
32 %v = load <2 x float>, ptr addrspace(1) %p, align 8
33 %round = tail call <2 x float> @llvm.rint.v2f32(<2 x float> %v)
34 %trunc = tail call <2 x float> @llvm.trunc.v2f32(<2 x float> %round)
35 store <2 x float> %trunc, ptr addrspace(1) %p, align 8
39 ; GCN-LABEL: {{^}}combine_ftrunc_fceil_f32:
40 ; GCN: v_ceil_f32_e32 [[RND:v[0-9]+]],
41 ; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
42 define amdgpu_kernel void @combine_ftrunc_fceil_f32(ptr addrspace(1) %p) {
43 %v = load float, ptr addrspace(1) %p, align 4
44 %round = tail call float @llvm.ceil.f32(float %v)
45 %trunc = tail call float @llvm.trunc.f32(float %round)
46 store float %trunc, ptr addrspace(1) %p, align 4
50 ; GCN-LABEL: {{^}}combine_ftrunc_ffloor_f32:
51 ; GCN: v_floor_f32_e32 [[RND:v[0-9]+]],
52 ; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
53 define amdgpu_kernel void @combine_ftrunc_ffloor_f32(ptr addrspace(1) %p) {
54 %v = load float, ptr addrspace(1) %p, align 4
55 %round = tail call float @llvm.floor.f32(float %v)
56 %trunc = tail call float @llvm.trunc.f32(float %round)
57 store float %trunc, ptr addrspace(1) %p, align 4
61 ; GCN-LABEL: {{^}}combine_ftrunc_fnearbyint_f32:
62 ; GCN: v_rndne_f32_e32 [[RND:v[0-9]+]],
63 ; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
64 define amdgpu_kernel void @combine_ftrunc_fnearbyint_f32(ptr addrspace(1) %p) {
65 %v = load float, ptr addrspace(1) %p, align 4
66 %round = tail call float @llvm.nearbyint.f32(float %v)
67 %trunc = tail call float @llvm.trunc.f32(float %round)
68 store float %trunc, ptr addrspace(1) %p, align 4
72 ; GCN-LABEL: {{^}}combine_ftrunc_ftrunc_f32:
73 ; GCN: s_load_dword [[SRC:s[0-9]+]],
74 ; GCN: v_trunc_f32_e32 [[RND:v[0-9]+]], [[SRC]]
75 ; GCN: flat_store_dword v[{{[0-9:]+}}], [[RND]]
76 define amdgpu_kernel void @combine_ftrunc_ftrunc_f32(ptr addrspace(1) %p) {
77 %v = load float, ptr addrspace(1) %p, align 4
78 %round = tail call float @llvm.trunc.f32(float %v)
79 %trunc = tail call float @llvm.trunc.f32(float %round)
80 store float %trunc, ptr addrspace(1) %p, align 4
84 declare double @llvm.trunc.f64(double)
85 declare float @llvm.trunc.f32(float)
86 declare <2 x float> @llvm.trunc.v2f32(<2 x float>)
87 declare double @llvm.rint.f64(double)
88 declare float @llvm.rint.f32(float)
89 declare <2 x float> @llvm.rint.v2f32(<2 x float>)
90 declare float @llvm.ceil.f32(float)
91 declare float @llvm.floor.f32(float)
92 declare float @llvm.nearbyint.f32(float)