1 ; RUN: llc < %s -mtriple=x86_64--
4 define<4 x i16> @func_16_32() {
5 %F = load <4 x i32>, <4 x i32>* undef
6 %G = trunc <4 x i32> %F to <4 x i16>
7 %H = load <4 x i32>, <4 x i32>* undef
8 %Y = trunc <4 x i32> %H to <4 x i16>
9 %T = add <4 x i16> %Y, %G
10 store <4 x i16>%T , <4 x i16>* undef
14 define<4 x i16> @func_16_64() {
15 %F = load <4 x i64>, <4 x i64>* undef
16 %G = trunc <4 x i64> %F to <4 x i16>
17 %H = load <4 x i64>, <4 x i64>* undef
18 %Y = trunc <4 x i64> %H to <4 x i16>
19 %T = xor <4 x i16> %Y, %G
20 store <4 x i16>%T , <4 x i16>* undef
24 define<4 x i32> @func_32_64() {
25 %F = load <4 x i64>, <4 x i64>* undef
26 %G = trunc <4 x i64> %F to <4 x i32>
27 %H = load <4 x i64>, <4 x i64>* undef
28 %Y = trunc <4 x i64> %H to <4 x i32>
29 %T = or <4 x i32> %Y, %G
33 define<4 x i8> @func_8_16() {
34 %F = load <4 x i16>, <4 x i16>* undef
35 %G = trunc <4 x i16> %F to <4 x i8>
36 %H = load <4 x i16>, <4 x i16>* undef
37 %Y = trunc <4 x i16> %H to <4 x i8>
38 %T = add <4 x i8> %Y, %G
42 define<4 x i8> @func_8_32() {
43 %F = load <4 x i32>, <4 x i32>* undef
44 %G = trunc <4 x i32> %F to <4 x i8>
45 %H = load <4 x i32>, <4 x i32>* undef
46 %Y = trunc <4 x i32> %H to <4 x i8>
47 %T = sub <4 x i8> %Y, %G
51 define<4 x i8> @func_8_64() {
52 %F = load <4 x i64>, <4 x i64>* undef
53 %G = trunc <4 x i64> %F to <4 x i8>
54 %H = load <4 x i64>, <4 x i64>* undef
55 %Y = trunc <4 x i64> %H to <4 x i8>
56 %T = add <4 x i8> %Y, %G
60 define<4 x i16> @const_16_32() {
61 %G = trunc <4 x i32> <i32 0, i32 3, i32 8, i32 7> to <4 x i16>
65 define<4 x i16> @const_16_64() {
66 %G = trunc <4 x i64> <i64 0, i64 3, i64 8, i64 7> to <4 x i16>
70 define void @bugOnTruncBitwidthReduce() nounwind {
72 %0 = xor <4 x i64> zeroinitializer, zeroinitializer
73 %1 = trunc <4 x i64> %0 to <4 x i32>
74 %2 = lshr <4 x i32> %1, <i32 18, i32 18, i32 18, i32 18>
75 %3 = xor <4 x i32> %2, %1