Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / X86 / x86-GCC-inline-asm-Y-constraints.c
blob92313b00fa5ddf7f0068abeae7e839b822a6a720
1 // RUN: %clang_cc1 -ffreestanding -triple=x86_64-apple-darwin -target-cpu skx %s -emit-llvm -o - | FileCheck %s
2 #include <xmmintrin.h>
3 // This test is complemented by the .ll test under llvm/test/MC/X86/.
4 // At this level we can only check if the constarints are passed correctly
5 // from inline asm to llvm IR.
7 // CHECK-LABEL: @f_Ym
8 void f_Ym(__m64 m)
10 // CHECK: movq $0, %mm1
11 // CHECK-SAME: "=^Ym,~{dirflag},~{fpsr},~{flags}"
12 __asm__ volatile ("movq %0, %%mm1\n\t"
13 :"=Ym" (m));
16 // CHECK-LABEL: f_Yi
17 void f_Yi(__m128 x, __m128 y, __m128 z)
19 // CHECK: vpaddq
20 // CHECK-SAME: "=^Yi,^Yi,^Yi,~{dirflag},~{fpsr},~{flags}"
21 __asm__ volatile ("vpaddq %0, %1, %2\n\t"
22 :"=Yi" (x)
23 :"Yi" (y),"Yi"(z));
26 // CHECK-LABEL: f_Yt
27 void f_Yt(__m128 x, __m128 y, __m128 z)
29 // CHECK: vpaddq
30 // CHECK-SAME: "=^Yt,^Yt,^Yt,~{dirflag},~{fpsr},~{flags}"
31 __asm__ volatile ("vpaddq %0, %1, %2\n\t"
32 :"=Yt" (x)
33 :"Yt" (y),"Yt"(z));
36 // CHECK-LABEL: f_Y2
37 void f_Y2(__m128 x, __m128 y, __m128 z)
39 // CHECK: vpaddq
40 // CHECK-SAME: "=^Y2,^Y2,^Y2,~{dirflag},~{fpsr},~{flags}"
41 __asm__ volatile ("vpaddq %0, %1, %2\n\t"
42 :"=Y2" (x)
43 :"Y2" (y),"Y2"(z));
46 // CHECK-LABEL: f_Yz
47 void f_Yz(__m128 x, __m128 y, __m128 z)
49 // CHECK: vpaddq
50 // CHECK-SAME: vpaddq
51 // CHECK-SAME: "=^Yi,=^Yz,^Yi,0,~{dirflag},~{fpsr},~{flags}"
52 __asm__ volatile ("vpaddq %0,%2,%1\n\t"
53 "vpaddq %1,%0,%2\n\t"
54 :"+Yi"(z),"=Yz" (x)
55 :"Yi" (y) );