Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / targetattr-crypto.c
blob006a394be77753725c19980a56fd592fdc2898f8
1 // RUN: %clang_cc1 -triple aarch64 -target-feature +v8a -verify -S %s -o -
2 // REQUIRES: aarch64-registered-target
4 #include <arm_neon.h>
6 __attribute__((target("+crypto")))
7 void test_crypto(uint8x16_t data, uint8x16_t key)
9 vaeseq_u8(data, key);
10 vsha1su1q_u32(data, key);
13 __attribute__((target("crypto")))
14 void test_pluscrypto(uint8x16_t data, uint8x16_t key)
16 vaeseq_u8(data, key);
17 vsha1su1q_u32(data, key);
20 __attribute__((target("arch=armv8.2-a+crypto")))
21 void test_archcrypto(uint8x16_t data, uint8x16_t key)
23 vaeseq_u8(data, key);
24 vsha1su1q_u32(data, key);
27 // FIXME: This shouldn't need +crypto to be consistent with -mcpu options.
28 __attribute__((target("cpu=cortex-a55+crypto")))
29 void test_a55crypto(uint8x16_t data, uint8x16_t key)
31 vaeseq_u8(data, key);
32 vsha1su1q_u32(data, key);
35 __attribute__((target("cpu=cortex-a510+crypto")))
36 void test_a510crypto(uint8x16_t data, uint8x16_t key)
38 vaeseq_u8(data, key);
39 vsha1su1q_u32(data, key);
42 __attribute__((target("+sha2+aes")))
43 void test_sha2aes(uint8x16_t data, uint8x16_t key)
45 vaeseq_u8(data, key);
46 vsha1su1q_u32(data, key);
49 void test_errors(uint8x16_t data, uint8x16_t key)
51 vaeseq_u8(data, key); // expected-error {{always_inline function 'vaeseq_u8' requires target feature 'aes'}}
52 vsha1su1q_u32(data, key); // expected-error {{always_inline function 'vsha1su1q_u32' requires target feature 'sha2'}}