1 ; RUN: llc < %s -march=x86-64 | grep paddw | count 2
2 ; RUN: llc < %s -march=x86-64 | not grep mov
4 ; The 2-addr pass should ensure that identical code is produced for these functions
5 ; no extra copy should be generated.
7 define <2 x i64> @test1(<2 x i64> %x, <2 x i64> %y) nounwind {
9 %tmp6 = bitcast <2 x i64> %y to <8 x i16> ; <<8 x i16>> [#uses=1]
10 %tmp8 = bitcast <2 x i64> %x to <8 x i16> ; <<8 x i16>> [#uses=1]
11 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
12 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
16 define <2 x i64> @test2(<2 x i64> %x, <2 x i64> %y) nounwind {
18 %tmp6 = bitcast <2 x i64> %x to <8 x i16> ; <<8 x i16>> [#uses=1]
19 %tmp8 = bitcast <2 x i64> %y to <8 x i16> ; <<8 x i16>> [#uses=1]
20 %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
21 %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
26 ; The coalescer should commute the add to avoid a copy.
27 define <4 x float> @test3(<4 x float> %V) {
29 %tmp8 = shufflevector <4 x float> %V, <4 x float> undef,
30 <4 x i32> < i32 3, i32 2, i32 1, i32 0 >
31 %add = fadd <4 x float> %tmp8, %V