1 ; RUN: llc < %s -mtriple=x86_64-- -o - | FileCheck %s
3 ; This test verifies that we produce different code for different architectures
4 ; based on target-cpu and target-features attributes.
5 ; In this case avx has a vmovss instruction and otherwise we should be using movss
6 ; to materialize constants.
7 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9 define float @_Z3barv() #0 {
11 ret float 4.000000e+00
17 define float @_Z4testv() #1 {
19 ret float 1.000000e+00
25 define float @_Z3foov() #2 {
27 ret float 4.000000e+00
33 define float @_Z3bazv() #0 {
35 ret float 4.000000e+00
41 define <2 x i64> @foo(<2 x i64> %a) #3 {
43 %a.addr = alloca <2 x i64>, align 16
44 store <2 x i64> %a, <2 x i64>* %a.addr, align 16
45 %0 = load <2 x i64>, <2 x i64>* %a.addr, align 16
46 %1 = call <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64> %0, i8 4)
50 ; Function Attrs: nounwind readnone
51 declare <2 x i64> @llvm.x86.aesni.aeskeygenassist(<2 x i64>, i8)
54 ; CHECK: aeskeygenassist
56 ; Function Attrs: nounwind uwtable
57 define i32 @bar(i32 %crc, i8* %a) #3 {
59 %crc.addr = alloca i32, align 4
60 %a.addr = alloca i8*, align 8
61 store i32 %crc, i32* %crc.addr, align 4
62 store i8* %a, i8** %a.addr, align 8
63 %0 = load i32, i32* %crc.addr, align 4
64 %1 = load i8*, i8** %a.addr, align 8
65 %incdec.ptr = getelementptr inbounds i8, i8* %1, i32 1
66 store i8* %incdec.ptr, i8** %a.addr, align 8
67 %2 = load i8, i8* %1, align 1
68 %3 = call i32 @llvm.x86.sse42.crc32.32.8(i32 %0, i8 %2)
72 ; Function Attrs: nounwind readnone
73 declare i32 @llvm.x86.sse42.crc32.32.8(i32, i8)
78 attributes #0 = { "target-cpu"="x86-64" "target-features"="+avx2" }
79 attributes #1 = { "target-cpu"="x86-64" }
80 attributes #2 = { "target-cpu"="corei7" "target-features"="+sse4.2" }
81 attributes #3 = { "target-cpu"="x86-64" "target-features"="+avx2,+aes" }