1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \
3 // RUN: -target-feature +sha3 -emit-llvm -o - %s \
6 // REQUIRES: aarch64-registered-target || arm-registered-target
10 // CHECK-LABEL: @test_vsha512h(
11 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h
13 void test_vsha512h(uint64x2_t hash_ed
, uint64x2_t hash_gf
, uint64x2_t kwh_kwh2
) {
14 uint64x2_t result
= vsha512hq_u64(hash_ed
, hash_gf
, kwh_kwh2
);
17 // CHECK-LABEL: @test_vsha512h2(
18 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h2
20 void test_vsha512h2(uint64x2_t sum_ab
, uint64x2_t hash_c_
, uint64x2_t hash_ab
) {
21 uint64x2_t result
= vsha512h2q_u64(sum_ab
, hash_c_
, hash_ab
);
24 // CHECK-LABEL: @test_vsha512su0(
25 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su0
27 void test_vsha512su0(uint64x2_t w0_1
, uint64x2_t w2_
) {
28 uint64x2_t result
= vsha512su0q_u64(w0_1
, w2_
);
31 // CHECK-LABEL: @test_vsha512su1(
32 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su1
34 void test_vsha512su1(uint64x2_t s01_s02
, uint64x2_t w14_15
, uint64x2_t w9_10
) {
35 uint64x2_t result
= vsha512su1q_u64(s01_s02
, w14_15
, w9_10
);
38 // CHECK-LABEL: @test_vrax1(
39 // CHECK: call <2 x i64> @llvm.aarch64.crypto.rax1
41 void test_vrax1(uint64x2_t a
, uint64x2_t b
) {
42 uint64x2_t result
= vrax1q_u64(a
, b
);
46 // CHECK-LABEL: @test_xar(
47 // CHECK: call <2 x i64> @llvm.aarch64.crypto.xar
49 void test_xar(uint64x2_t a
, uint64x2_t b
) {
50 uint64x2_t result
= vxarq_u64(a
, b
, 10);
54 // CHECK-LABEL: @test_vbcax_u8(
55 // CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxu.v16i8
57 void test_vbcax_u8(uint8x16_t a
, uint8x16_t b
, uint8x16_t c
) {
58 uint8x16_t result
= vbcaxq_u8(a
, b
, c
);
61 // CHECK-LABEL: @test_vbcax_u16(
62 // CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxu.v8i16
64 void test_vbcax_u16(uint16x8_t a
, uint16x8_t b
, uint16x8_t c
) {
65 uint16x8_t result
= vbcaxq_u16(a
, b
, c
);
68 // CHECK-LABEL: @test_vbcax_u32(
69 // CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxu.v4i32
71 void test_vbcax_u32(uint32x4_t a
, uint32x4_t b
, uint32x4_t c
) {
72 uint32x4_t result
= vbcaxq_u32(a
, b
, c
);
75 // CHECK-LABEL: @test_vbcax_u64(
76 // CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxu.v2i64
78 void test_vbcax_u64(uint64x2_t a
, uint64x2_t b
, uint64x2_t c
) {
79 uint64x2_t result
= vbcaxq_u64(a
, b
, c
);
82 // CHECK-LABEL: @test_vbcax_s8(
83 // CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxs.v16i8
85 void test_vbcax_s8(int8x16_t a
, int8x16_t b
, int8x16_t c
) {
86 int8x16_t result
= vbcaxq_s8(a
, b
, c
);
89 // CHECK-LABEL: @test_vbcax_s16(
90 // CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxs.v8i16
92 void test_vbcax_s16(int16x8_t a
, int16x8_t b
, int16x8_t c
) {
93 int16x8_t result
= vbcaxq_s16(a
, b
, c
);
96 // CHECK-LABEL: @test_vbcax_s32(
97 // CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxs.v4i32
99 void test_vbcax_s32(int32x4_t a
, int32x4_t b
, int32x4_t c
) {
100 int32x4_t result
= vbcaxq_s32(a
, b
, c
);
103 // CHECK-LABEL: @test_vbcax_s64(
104 // CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxs.v2i64
106 void test_vbcax_s64(int64x2_t a
, int64x2_t b
, int64x2_t c
) {
107 int64x2_t result
= vbcaxq_s64(a
, b
, c
);
110 // CHECK-LABEL: @test_veor3_u8(
111 // CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3u.v16i8
113 void test_veor3_u8(uint8x16_t a
, uint8x16_t b
, uint8x16_t c
) {
114 uint8x16_t result
= veor3q_u8(a
, b
, c
);
117 // CHECK-LABEL: @test_veor3_u16(
118 // CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3u.v8i16
120 void test_veor3_u16(uint16x8_t a
, uint16x8_t b
, uint16x8_t c
) {
121 uint16x8_t result
= veor3q_u16(a
, b
, c
);
124 // CHECK-LABEL: @test_veor3_u32(
125 // CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3u.v4i32
127 void test_veor3_u32(uint32x4_t a
, uint32x4_t b
, uint32x4_t c
) {
128 uint32x4_t result
= veor3q_u32(a
, b
, c
);
131 // CHECK-LABEL: @test_veor3_u64(
132 // CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3u.v2i64
134 void test_veor3_u64(uint64x2_t a
, uint64x2_t b
, uint64x2_t c
) {
135 uint64x2_t result
= veor3q_u64(a
, b
, c
);
138 // CHECK-LABEL: @test_veor3_s8(
139 // CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3s.v16i8
141 void test_veor3_s8(int8x16_t a
, int8x16_t b
, int8x16_t c
) {
142 int8x16_t result
= veor3q_s8(a
, b
, c
);
145 // CHECK-LABEL: @test_veor3_s16(
146 // CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3s.v8i16
148 void test_veor3_s16(int16x8_t a
, int16x8_t b
, int16x8_t c
) {
149 int16x8_t result
= veor3q_s16(a
, b
, c
);
152 // CHECK-LABEL: @test_veor3_s32(
153 // CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3s.v4i32
155 void test_veor3_s32(int32x4_t a
, int32x4_t b
, int32x4_t c
) {
156 int32x4_t result
= veor3q_s32(a
, b
, c
);
159 // CHECK-LABEL: @test_veor3_s64(
160 // CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3s.v2i64
162 void test_veor3_s64(int64x2_t a
, int64x2_t b
, int64x2_t c
) {
163 int64x2_t result
= veor3q_s64(a
, b
, c
);