Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / riscv-atomics.c
blobf629ad7d72ea821c175e93b65d9201d655087841
1 // RUN: %clang_cc1 -triple riscv32 -O1 -emit-llvm %s -o - \
2 // RUN: | FileCheck %s -check-prefix=RV32I
3 // RUN: %clang_cc1 -triple riscv32 -target-feature +a -O1 -emit-llvm %s -o - \
4 // RUN: | FileCheck %s -check-prefix=RV32IA
5 // RUN: %clang_cc1 -triple riscv64 -O1 -emit-llvm %s -o - \
6 // RUN: | FileCheck %s -check-prefix=RV64I
7 // RUN: %clang_cc1 -triple riscv64 -target-feature +a -O1 -emit-llvm %s -o - \
8 // RUN: | FileCheck %s -check-prefix=RV64IA
10 // This test demonstrates that MaxAtomicInlineWidth is set appropriately when
11 // the atomics instruction set extension is enabled.
13 #include <stdatomic.h>
14 #include <stdint.h>
16 void test_i8_atomics(_Atomic(int8_t) * a, int8_t b) {
17 // RV32I: call zeroext i8 @__atomic_load_1
18 // RV32I: call void @__atomic_store_1
19 // RV32I: call zeroext i8 @__atomic_fetch_add_1
20 // RV32IA: load atomic i8, ptr %a seq_cst, align 1
21 // RV32IA: store atomic i8 %b, ptr %a seq_cst, align 1
22 // RV32IA: atomicrmw add ptr %a, i8 %b seq_cst, align 1
23 // RV64I: call zeroext i8 @__atomic_load_1
24 // RV64I: call void @__atomic_store_1
25 // RV64I: call zeroext i8 @__atomic_fetch_add_1
26 // RV64IA: load atomic i8, ptr %a seq_cst, align 1
27 // RV64IA: store atomic i8 %b, ptr %a seq_cst, align 1
28 // RV64IA: atomicrmw add ptr %a, i8 %b seq_cst, align 1
29 __c11_atomic_load(a, memory_order_seq_cst);
30 __c11_atomic_store(a, b, memory_order_seq_cst);
31 __c11_atomic_fetch_add(a, b, memory_order_seq_cst);
34 void test_i32_atomics(_Atomic(int32_t) * a, int32_t b) {
35 // RV32I: call i32 @__atomic_load_4
36 // RV32I: call void @__atomic_store_4
37 // RV32I: call i32 @__atomic_fetch_add_4
38 // RV32IA: load atomic i32, ptr %a seq_cst, align 4
39 // RV32IA: store atomic i32 %b, ptr %a seq_cst, align 4
40 // RV32IA: atomicrmw add ptr %a, i32 %b seq_cst, align 4
41 // RV64I: call signext i32 @__atomic_load_4
42 // RV64I: call void @__atomic_store_4
43 // RV64I: call signext i32 @__atomic_fetch_add_4
44 // RV64IA: load atomic i32, ptr %a seq_cst, align 4
45 // RV64IA: store atomic i32 %b, ptr %a seq_cst, align 4
46 // RV64IA: atomicrmw add ptr %a, i32 %b seq_cst, align 4
47 __c11_atomic_load(a, memory_order_seq_cst);
48 __c11_atomic_store(a, b, memory_order_seq_cst);
49 __c11_atomic_fetch_add(a, b, memory_order_seq_cst);
52 void test_i64_atomics(_Atomic(int64_t) * a, int64_t b) {
53 // RV32I: call i64 @__atomic_load_8
54 // RV32I: call void @__atomic_store_8
55 // RV32I: call i64 @__atomic_fetch_add_8
56 // RV32IA: call i64 @__atomic_load_8
57 // RV32IA: call void @__atomic_store_8
58 // RV32IA: call i64 @__atomic_fetch_add_8
59 // RV64I: call i64 @__atomic_load_8
60 // RV64I: call void @__atomic_store_8
61 // RV64I: call i64 @__atomic_fetch_add_8
62 // RV64IA: load atomic i64, ptr %a seq_cst, align 8
63 // RV64IA: store atomic i64 %b, ptr %a seq_cst, align 8
64 // RV64IA: atomicrmw add ptr %a, i64 %b seq_cst, align 8
65 __c11_atomic_load(a, memory_order_seq_cst);
66 __c11_atomic_store(a, b, memory_order_seq_cst);
67 __c11_atomic_fetch_add(a, b, memory_order_seq_cst);