[LLVM][IR] Use splat syntax when printing ConstantExpr based splats. (#116856)
[llvm-project.git] / clang / test / CodeGen / X86 / bfloat16.cpp
blobd202de22716f19a1f885e6d6453ffbffada7f01f
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-feature +fullbf16 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-NBF16 %s
5 // CHECK-LABEL: define dso_local void @_Z11test_scalarDF16bDF16b
6 // CHECK-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
7 // CHECK: [[A_ADDR:%.*]] = alloca bfloat, align 2
8 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca bfloat, align 2
9 // CHECK-NEXT: [[C:%.*]] = alloca bfloat, align 2
10 // CHECK-NEXT: store bfloat [[A]], ptr [[A_ADDR]], align 2
11 // CHECK-NEXT: store bfloat [[B]], ptr [[B_ADDR]], align 2
12 // CHECK-NEXT: [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
13 // CHECK-NEXT: [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
14 // CHECK-NEXT: [[ADD:%.*]] = fadd bfloat [[TMP0]], [[TMP1]]
15 // CHECK-NEXT: store bfloat [[ADD]], ptr [[C]], align 2
16 // CHECK-NEXT: [[TMP2:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
17 // CHECK-NEXT: [[TMP3:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
18 // CHECK-NEXT: [[SUB:%.*]] = fsub bfloat [[TMP2]], [[TMP3]]
19 // CHECK-NEXT: store bfloat [[SUB]], ptr [[C]], align 2
20 // CHECK-NEXT: [[TMP4:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
21 // CHECK-NEXT: [[TMP5:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
22 // CHECK-NEXT: [[MUL:%.*]] = fmul bfloat [[TMP4]], [[TMP5]]
23 // CHECK-NEXT: store bfloat [[MUL]], ptr [[C]], align 2
24 // CHECK-NEXT: [[TMP6:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
25 // CHECK-NEXT: [[TMP7:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
26 // CHECK-NEXT: [[DIV:%.*]] = fdiv bfloat [[TMP6]], [[TMP7]]
27 // CHECK-NEXT: store bfloat [[DIV]], ptr [[C]], align 2
28 // CHECK-NEXT: ret void
30 // CHECK-NBF16-LABEL: define dso_local void @_Z11test_scalarDF16bDF16b
31 // CHECK-NBF16-SAME: (bfloat noundef [[A:%.*]], bfloat noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
32 // CHECK-NBF16: [[A_ADDR:%.*]] = alloca bfloat, align 2
33 // CHECK-NBF16-NEXT: [[B_ADDR:%.*]] = alloca bfloat, align 2
34 // CHECK-NBF16-NEXT: [[C:%.*]] = alloca bfloat, align 2
35 // CHECK-NBF16-NEXT: store bfloat [[A]], ptr [[A_ADDR]], align 2
36 // CHECK-NBF16-NEXT: store bfloat [[B]], ptr [[B_ADDR]], align 2
37 // CHECK-NBF16-NEXT: [[TMP0:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
38 // CHECK-NBF16-NEXT: [[EXT:%.*]] = fpext bfloat [[TMP0]] to float
39 // CHECK-NBF16-NEXT: [[TMP1:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
40 // CHECK-NBF16-NEXT: [[EXT1:%.*]] = fpext bfloat [[TMP1]] to float
41 // CHECK-NBF16-NEXT: [[ADD:%.*]] = fadd float [[EXT]], [[EXT1]]
42 // CHECK-NBF16-NEXT: [[UNPROMOTION:%.*]] = fptrunc float [[ADD]] to bfloat
43 // CHECK-NBF16-NEXT: store bfloat [[UNPROMOTION]], ptr [[C]], align 2
44 // CHECK-NBF16-NEXT: [[TMP2:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
45 // CHECK-NBF16-NEXT: [[EXT2:%.*]] = fpext bfloat [[TMP2]] to float
46 // CHECK-NBF16-NEXT: [[TMP3:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
47 // CHECK-NBF16-NEXT: [[EXT3:%.*]] = fpext bfloat [[TMP3]] to float
48 // CHECK-NBF16-NEXT: [[SUB:%.*]] = fsub float [[EXT2]], [[EXT3]]
49 // CHECK-NBF16-NEXT: [[UNPROMOTION4:%.*]] = fptrunc float [[SUB]] to bfloat
50 // CHECK-NBF16-NEXT: store bfloat [[UNPROMOTION4]], ptr [[C]], align 2
51 // CHECK-NBF16-NEXT: [[TMP4:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
52 // CHECK-NBF16-NEXT: [[EXT5:%.*]] = fpext bfloat [[TMP4]] to float
53 // CHECK-NBF16-NEXT: [[TMP5:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
54 // CHECK-NBF16-NEXT: [[EXT6:%.*]] = fpext bfloat [[TMP5]] to float
55 // CHECK-NBF16-NEXT: [[MUL:%.*]] = fmul float [[EXT5]], [[EXT6]]
56 // CHECK-NBF16-NEXT: [[UNPROMOTION7:%.*]] = fptrunc float [[MUL]] to bfloat
57 // CHECK-NBF16-NEXT: store bfloat [[UNPROMOTION7]], ptr [[C]], align 2
58 // CHECK-NBF16-NEXT: [[TMP6:%.*]] = load bfloat, ptr [[A_ADDR]], align 2
59 // CHECK-NBF16-NEXT: [[EXT8:%.*]] = fpext bfloat [[TMP6]] to float
60 // CHECK-NBF16-NEXT: [[TMP7:%.*]] = load bfloat, ptr [[B_ADDR]], align 2
61 // CHECK-NBF16-NEXT: [[EXT9:%.*]] = fpext bfloat [[TMP7]] to float
62 // CHECK-NBF16-NEXT: [[DIV:%.*]] = fdiv float [[EXT8]], [[EXT9]]
63 // CHECK-NBF16-NEXT: [[UNPROMOTION10:%.*]] = fptrunc float [[DIV]] to bfloat
64 // CHECK-NBF16-NEXT: store bfloat [[UNPROMOTION10]], ptr [[C]], align 2
65 // CHECK-NBF16-NEXT: ret void
67 void test_scalar(__bf16 a, __bf16 b) {
68 __bf16 c;
69 c = a + b;
70 c = a - b;
71 c = a * b;
72 c = a / b;
75 typedef __bf16 v8bfloat16 __attribute__((__vector_size__(16)));
77 // CHECK-LABEL: define dso_local void @_Z11test_vectorDv8_DF16bS_
78 // CHECK-SAME: (<8 x bfloat> noundef [[A:%.*]], <8 x bfloat> noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
79 // CHECK: [[A_ADDR:%.*]] = alloca <8 x bfloat>, align 16
80 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca <8 x bfloat>, align 16
81 // CHECK-NEXT: [[C:%.*]] = alloca <8 x bfloat>, align 16
82 // CHECK-NEXT: store <8 x bfloat> [[A]], ptr [[A_ADDR]], align 16
83 // CHECK-NEXT: store <8 x bfloat> [[B]], ptr [[B_ADDR]], align 16
84 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
85 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
86 // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x bfloat> [[TMP0]], [[TMP1]]
87 // CHECK-NEXT: store <8 x bfloat> [[ADD]], ptr [[C]], align 16
88 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
89 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
90 // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x bfloat> [[TMP2]], [[TMP3]]
91 // CHECK-NEXT: store <8 x bfloat> [[SUB]], ptr [[C]], align 16
92 // CHECK-NEXT: [[TMP4:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
93 // CHECK-NEXT: [[TMP5:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
94 // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x bfloat> [[TMP4]], [[TMP5]]
95 // CHECK-NEXT: store <8 x bfloat> [[MUL]], ptr [[C]], align 16
96 // CHECK-NEXT: [[TMP6:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
97 // CHECK-NEXT: [[TMP7:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
98 // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x bfloat> [[TMP6]], [[TMP7]]
99 // CHECK-NEXT: store <8 x bfloat> [[DIV]], ptr [[C]], align 16
100 // CHECK-NEXT: ret void
102 // CHECK-NBF16-LABEL: define dso_local void @_Z11test_vectorDv8_DF16bS_
103 // CHECK-NBF16-SAME: (<8 x bfloat> noundef [[A:%.*]], <8 x bfloat> noundef [[B:%.*]]) #[[ATTR0:[0-9]+]] {
104 // CHECK-NBF16: [[A_ADDR:%.*]] = alloca <8 x bfloat>, align 16
105 // CHECK-NBF16-NEXT: [[B_ADDR:%.*]] = alloca <8 x bfloat>, align 16
106 // CHECK-NBF16-NEXT: [[C:%.*]] = alloca <8 x bfloat>, align 16
107 // CHECK-NBF16-NEXT: store <8 x bfloat> [[A]], ptr [[A_ADDR]], align 16
108 // CHECK-NBF16-NEXT: store <8 x bfloat> [[B]], ptr [[B_ADDR]], align 16
109 // CHECK-NBF16-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
110 // CHECK-NBF16-NEXT: [[EXT:%.*]] = fpext <8 x bfloat> [[TMP0]] to <8 x float>
111 // CHECK-NBF16-NEXT: [[TMP1:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
112 // CHECK-NBF16-NEXT: [[EXT1:%.*]] = fpext <8 x bfloat> [[TMP1]] to <8 x float>
113 // CHECK-NBF16-NEXT: [[ADD:%.*]] = fadd <8 x float> [[EXT]], [[EXT1]]
114 // CHECK-NBF16-NEXT: [[UNPROMOTION:%.*]] = fptrunc <8 x float> [[ADD]] to <8 x bfloat>
115 // CHECK-NBF16-NEXT: store <8 x bfloat> [[UNPROMOTION]], ptr [[C]], align 16
116 // CHECK-NBF16-NEXT: [[TMP2:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
117 // CHECK-NBF16-NEXT: [[EXT2:%.*]] = fpext <8 x bfloat> [[TMP2]] to <8 x float>
118 // CHECK-NBF16-NEXT: [[TMP3:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
119 // CHECK-NBF16-NEXT: [[EXT3:%.*]] = fpext <8 x bfloat> [[TMP3]] to <8 x float>
120 // CHECK-NBF16-NEXT: [[SUB:%.*]] = fsub <8 x float> [[EXT2]], [[EXT3]]
121 // CHECK-NBF16-NEXT: [[UNPROMOTION4:%.*]] = fptrunc <8 x float> [[SUB]] to <8 x bfloat>
122 // CHECK-NBF16-NEXT: store <8 x bfloat> [[UNPROMOTION4]], ptr [[C]], align 16
123 // CHECK-NBF16-NEXT: [[TMP4:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
124 // CHECK-NBF16-NEXT: [[EXT5:%.*]] = fpext <8 x bfloat> [[TMP4]] to <8 x float>
125 // CHECK-NBF16-NEXT: [[TMP5:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
126 // CHECK-NBF16-NEXT: [[EXT6:%.*]] = fpext <8 x bfloat> [[TMP5]] to <8 x float>
127 // CHECK-NBF16-NEXT: [[MUL:%.*]] = fmul <8 x float> [[EXT5]], [[EXT6]]
128 // CHECK-NBF16-NEXT: [[UNPROMOTION7:%.*]] = fptrunc <8 x float> [[MUL]] to <8 x bfloat>
129 // CHECK-NBF16-NEXT: store <8 x bfloat> [[UNPROMOTION7]], ptr [[C]], align 16
130 // CHECK-NBF16-NEXT: [[TMP6:%.*]] = load <8 x bfloat>, ptr [[A_ADDR]], align 16
131 // CHECK-NBF16-NEXT: [[EXT8:%.*]] = fpext <8 x bfloat> [[TMP6]] to <8 x float>
132 // CHECK-NBF16-NEXT: [[TMP7:%.*]] = load <8 x bfloat>, ptr [[B_ADDR]], align 16
133 // CHECK-NBF16-NEXT: [[EXT9:%.*]] = fpext <8 x bfloat> [[TMP7]] to <8 x float>
134 // CHECK-NBF16-NEXT: [[DIV:%.*]] = fdiv <8 x float> [[EXT8]], [[EXT9]]
135 // CHECK-NBF16-NEXT: [[UNPROMOTION10:%.*]] = fptrunc <8 x float> [[DIV]] to <8 x bfloat>
136 // CHECK-NBF16-NEXT: store <8 x bfloat> [[UNPROMOTION10]], ptr [[C]], align 16
137 // CHECK-NBF16-NEXT: ret void
139 void test_vector(v8bfloat16 a, v8bfloat16 b) {
140 v8bfloat16 c;
141 c = a + b;
142 c = a - b;
143 c = a * b;
144 c = a / b;