1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -march=amdgcn -mcpu=tahiti -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=GCN,SI
3 ; RUN: llc < %s -march=amdgcn -mcpu=fiji -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=GCN,VI
5 ; Make sure high constant 0 isn't pointlessly materialized
6 define i16 @trunc_bitcast_i64_lshr_32_i16(i64 %bar) {
7 ; GCN-LABEL: trunc_bitcast_i64_lshr_32_i16:
9 ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
10 ; GCN-NEXT: v_mov_b32_e32 v0, v1
11 ; GCN-NEXT: s_setpc_b64 s[30:31]
12 %srl = lshr i64 %bar, 32
13 %trunc = trunc i64 %srl to i16
17 define i32 @trunc_bitcast_i64_lshr_32_i32(i64 %bar) {
18 ; GCN-LABEL: trunc_bitcast_i64_lshr_32_i32:
20 ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
21 ; GCN-NEXT: v_mov_b32_e32 v0, v1
22 ; GCN-NEXT: s_setpc_b64 s[30:31]
23 %srl = lshr i64 %bar, 32
24 %trunc = trunc i64 %srl to i32
28 define i16 @trunc_bitcast_v2i32_to_i16(<2 x i32> %bar) {
29 ; SI-LABEL: trunc_bitcast_v2i32_to_i16:
31 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
32 ; SI-NEXT: s_mov_b32 s7, 0xf000
33 ; SI-NEXT: s_mov_b32 s6, -1
34 ; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
35 ; SI-NEXT: s_waitcnt vmcnt(0)
36 ; SI-NEXT: v_add_i32_e32 v0, vcc, 4, v0
37 ; SI-NEXT: s_setpc_b64 s[30:31]
39 ; VI-LABEL: trunc_bitcast_v2i32_to_i16:
41 ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
42 ; VI-NEXT: flat_load_dword v0, v[0:1]
43 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
44 ; VI-NEXT: v_add_u16_e32 v0, 4, v0
45 ; VI-NEXT: s_setpc_b64 s[30:31]
46 %load0 = load i32, i32 addrspace(1)* undef
47 %load1 = load i32, i32 addrspace(1)* null
48 %insert.0 = insertelement <2 x i32> undef, i32 %load0, i32 0
49 %insert.1 = insertelement <2 x i32> %insert.0, i32 99, i32 1
50 %bc = bitcast <2 x i32> %insert.1 to i64
51 %trunc = trunc i64 %bc to i16
52 %add = add i16 %trunc, 4
56 ; Make sure there's no crash if the source vector type is FP
57 define i16 @trunc_bitcast_v2f32_to_i16(<2 x float> %bar) {
58 ; SI-LABEL: trunc_bitcast_v2f32_to_i16:
60 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
61 ; SI-NEXT: s_mov_b32 s7, 0xf000
62 ; SI-NEXT: s_mov_b32 s6, -1
63 ; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
64 ; SI-NEXT: s_waitcnt vmcnt(0)
65 ; SI-NEXT: v_add_i32_e32 v0, vcc, 4, v0
66 ; SI-NEXT: s_setpc_b64 s[30:31]
68 ; VI-LABEL: trunc_bitcast_v2f32_to_i16:
70 ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
71 ; VI-NEXT: flat_load_dword v0, v[0:1]
72 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
73 ; VI-NEXT: v_add_u16_e32 v0, 4, v0
74 ; VI-NEXT: s_setpc_b64 s[30:31]
75 %load0 = load float, float addrspace(1)* undef
76 %load1 = load float, float addrspace(1)* null
77 %insert.0 = insertelement <2 x float> undef, float %load0, i32 0
78 %insert.1 = insertelement <2 x float> %insert.0, float 4.0, i32 1
79 %bc = bitcast <2 x float> %insert.1 to i64
80 %trunc = trunc i64 %bc to i16
81 %add = add i16 %trunc, 4
85 define amdgpu_kernel void @truncate_high_elt_extract_vector(<2 x i16> addrspace(1)* nocapture readonly %arg, <2 x i16> addrspace(1)* nocapture readonly %arg1, <2 x i16> addrspace(1)* nocapture %arg2) local_unnamed_addr {
86 ; SI-LABEL: truncate_high_elt_extract_vector:
88 ; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
89 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
90 ; SI-NEXT: s_mov_b32 s3, 0xf000
91 ; SI-NEXT: s_mov_b32 s2, -1
92 ; SI-NEXT: s_waitcnt lgkmcnt(0)
93 ; SI-NEXT: s_load_dword s4, s[4:5], 0x0
94 ; SI-NEXT: s_load_dword s5, s[6:7], 0x0
95 ; SI-NEXT: s_waitcnt lgkmcnt(0)
96 ; SI-NEXT: s_sext_i32_i16 s4, s4
97 ; SI-NEXT: s_sext_i32_i16 s5, s5
98 ; SI-NEXT: v_mov_b32_e32 v0, s4
99 ; SI-NEXT: v_mul_i32_i24_e32 v0, s5, v0
100 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
101 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
104 ; VI-LABEL: truncate_high_elt_extract_vector:
106 ; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
107 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
108 ; VI-NEXT: s_waitcnt lgkmcnt(0)
109 ; VI-NEXT: s_load_dword s2, s[4:5], 0x0
110 ; VI-NEXT: s_load_dword s3, s[6:7], 0x0
111 ; VI-NEXT: v_mov_b32_e32 v0, s0
112 ; VI-NEXT: v_mov_b32_e32 v1, s1
113 ; VI-NEXT: s_waitcnt lgkmcnt(0)
114 ; VI-NEXT: s_sext_i32_i16 s0, s2
115 ; VI-NEXT: s_sext_i32_i16 s1, s3
116 ; VI-NEXT: v_mov_b32_e32 v2, s0
117 ; VI-NEXT: v_mul_i32_i24_e32 v2, s1, v2
118 ; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
119 ; VI-NEXT: flat_store_dword v[0:1], v2
122 %tmp = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %arg, i64 undef
123 %tmp3 = load <2 x i16>, <2 x i16> addrspace(1)* %tmp, align 4
124 %tmp4 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %arg1, i64 undef
125 %tmp5 = load <2 x i16>, <2 x i16> addrspace(1)* %tmp4, align 4
126 %tmp6 = sext <2 x i16> %tmp3 to <2 x i32>
127 %tmp7 = sext <2 x i16> %tmp5 to <2 x i32>
128 %tmp8 = extractelement <2 x i32> %tmp6, i64 0
129 %tmp9 = extractelement <2 x i32> %tmp7, i64 0
130 %tmp10 = mul nsw i32 %tmp9, %tmp8
131 %tmp11 = insertelement <2 x i32> undef, i32 %tmp10, i32 0
132 %tmp12 = insertelement <2 x i32> %tmp11, i32 undef, i32 1
133 %tmp13 = lshr <2 x i32> %tmp12, <i32 16, i32 16>
134 %tmp14 = trunc <2 x i32> %tmp13 to <2 x i16>
135 %tmp15 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %arg2, i64 undef
136 store <2 x i16> %tmp14, <2 x i16> addrspace(1)* %tmp15, align 4
140 define <2 x i16> @trunc_v2i64_arg_to_v2i16(<2 x i64> %arg0) #0 {
141 ; SI-LABEL: trunc_v2i64_arg_to_v2i16:
143 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
144 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
145 ; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
146 ; SI-NEXT: v_or_b32_e32 v0, v0, v1
147 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
148 ; SI-NEXT: s_setpc_b64 s[30:31]
150 ; VI-LABEL: trunc_v2i64_arg_to_v2i16:
152 ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
153 ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
154 ; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
155 ; VI-NEXT: s_setpc_b64 s[30:31]
156 %trunc = trunc <2 x i64> %arg0 to <2 x i16>