1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64--
5 define<4 x i16> @func_16_32() {
6 %F = load <4 x i32>, <4 x i32>* undef
7 %G = trunc <4 x i32> %F to <4 x i16>
8 %H = load <4 x i32>, <4 x i32>* undef
9 %Y = trunc <4 x i32> %H to <4 x i16>
10 %T = add <4 x i16> %Y, %G
11 store <4 x i16>%T , <4 x i16>* undef
15 define<4 x i16> @func_16_64() {
16 %F = load <4 x i64>, <4 x i64>* undef
17 %G = trunc <4 x i64> %F to <4 x i16>
18 %H = load <4 x i64>, <4 x i64>* undef
19 %Y = trunc <4 x i64> %H to <4 x i16>
20 %T = xor <4 x i16> %Y, %G
21 store <4 x i16>%T , <4 x i16>* undef
25 define<4 x i32> @func_32_64() {
26 %F = load <4 x i64>, <4 x i64>* undef
27 %G = trunc <4 x i64> %F to <4 x i32>
28 %H = load <4 x i64>, <4 x i64>* undef
29 %Y = trunc <4 x i64> %H to <4 x i32>
30 %T = or <4 x i32> %Y, %G
34 define<4 x i8> @func_8_16() {
35 %F = load <4 x i16>, <4 x i16>* undef
36 %G = trunc <4 x i16> %F to <4 x i8>
37 %H = load <4 x i16>, <4 x i16>* undef
38 %Y = trunc <4 x i16> %H to <4 x i8>
39 %T = add <4 x i8> %Y, %G
43 define<4 x i8> @func_8_32() {
44 %F = load <4 x i32>, <4 x i32>* undef
45 %G = trunc <4 x i32> %F to <4 x i8>
46 %H = load <4 x i32>, <4 x i32>* undef
47 %Y = trunc <4 x i32> %H to <4 x i8>
48 %T = sub <4 x i8> %Y, %G
52 define<4 x i8> @func_8_64() {
53 %F = load <4 x i64>, <4 x i64>* undef
54 %G = trunc <4 x i64> %F to <4 x i8>
55 %H = load <4 x i64>, <4 x i64>* undef
56 %Y = trunc <4 x i64> %H to <4 x i8>
57 %T = add <4 x i8> %Y, %G
61 define<4 x i16> @const_16_32() {
62 %G = trunc <4 x i32> <i32 0, i32 3, i32 8, i32 7> to <4 x i16>
66 define<4 x i16> @const_16_64() {
67 %G = trunc <4 x i64> <i64 0, i64 3, i64 8, i64 7> to <4 x i16>
71 define void @bugOnTruncBitwidthReduce() nounwind {
73 %0 = xor <4 x i64> zeroinitializer, zeroinitializer
74 %1 = trunc <4 x i64> %0 to <4 x i32>
75 %2 = lshr <4 x i32> %1, <i32 18, i32 18, i32 18, i32 18>
76 %3 = xor <4 x i32> %2, %1