1 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s
2 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s
3 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s
4 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s
5 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a65 | FileCheck %s
6 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s
7 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s
8 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a76 | FileCheck %s
9 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a77 | FileCheck %s
10 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a78 | FileCheck %s
11 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a78c| FileCheck %s
12 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-x1 | FileCheck %s
13 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-e1 | FileCheck %s
14 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-n1 | FileCheck %s
15 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-n2 | FileCheck %s
16 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-v1 | FileCheck %s
17 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-512tvb | FileCheck %s
18 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m3 | FileCheck %s
19 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m4 | FileCheck %s
20 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m5 | FileCheck %s
22 declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
23 declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
24 declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
25 declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d)
27 define void @aesea(ptr %a0, ptr %b0, ptr %c0, <16 x i8> %d, <16 x i8> %e) {
28 %d0 = load <16 x i8>, ptr %a0
29 %a1 = getelementptr inbounds <16 x i8>, ptr %a0, i64 1
30 %d1 = load <16 x i8>, ptr %a1
31 %a2 = getelementptr inbounds <16 x i8>, ptr %a0, i64 2
32 %d2 = load <16 x i8>, ptr %a2
33 %a3 = getelementptr inbounds <16 x i8>, ptr %a0, i64 3
34 %d3 = load <16 x i8>, ptr %a3
35 %k0 = load <16 x i8>, ptr %b0
36 %e00 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d0, <16 x i8> %k0)
37 %f00 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
38 %e01 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d1, <16 x i8> %k0)
39 %f01 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
40 %e02 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d2, <16 x i8> %k0)
41 %f02 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
42 %e03 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d3, <16 x i8> %k0)
43 %f03 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
44 %b1 = getelementptr inbounds <16 x i8>, ptr %b0, i64 1
45 %k1 = load <16 x i8>, ptr %b1
46 %e10 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f00, <16 x i8> %k1)
47 %f10 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
48 %e11 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f01, <16 x i8> %k1)
49 %f11 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
50 %e12 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f02, <16 x i8> %k1)
51 %f12 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
52 %e13 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f03, <16 x i8> %k1)
53 %f13 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
54 %b2 = getelementptr inbounds <16 x i8>, ptr %b0, i64 2
55 %k2 = load <16 x i8>, ptr %b2
56 %e20 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f10, <16 x i8> %k2)
57 %f20 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e10)
58 %e21 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f11, <16 x i8> %k2)
59 %f21 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e11)
60 %e22 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f12, <16 x i8> %k2)
61 %f22 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e12)
62 %e23 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f13, <16 x i8> %k2)
63 %f23 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e13)
64 %b3 = getelementptr inbounds <16 x i8>, ptr %b0, i64 3
65 %k3 = load <16 x i8>, ptr %b3
66 %e30 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f20, <16 x i8> %k3)
67 %f30 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e20)
68 %e31 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f21, <16 x i8> %k3)
69 %f31 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e21)
70 %e32 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f22, <16 x i8> %k3)
71 %f32 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e22)
72 %e33 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f23, <16 x i8> %k3)
73 %f33 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e23)
74 %g0 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f30, <16 x i8> %d)
75 %h0 = xor <16 x i8> %g0, %e
76 %g1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f31, <16 x i8> %d)
77 %h1 = xor <16 x i8> %g1, %e
78 %g2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f32, <16 x i8> %d)
79 %h2 = xor <16 x i8> %g2, %e
80 %g3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f33, <16 x i8> %d)
81 %h3 = xor <16 x i8> %g3, %e
82 store <16 x i8> %h0, ptr %c0
83 %c1 = getelementptr inbounds <16 x i8>, ptr %c0, i64 1
84 store <16 x i8> %h1, ptr %c1
85 %c2 = getelementptr inbounds <16 x i8>, ptr %c0, i64 2
86 store <16 x i8> %h2, ptr %c2
87 %c3 = getelementptr inbounds <16 x i8>, ptr %c0, i64 3
88 store <16 x i8> %h3, ptr %c3
92 ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
93 ; CHECK: aesmc [[VA]], [[VA]]
94 ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
95 ; CHECK-NEXT: aesmc [[VB]], [[VB]]
96 ; CHECK: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
97 ; CHECK-NEXT: aesmc [[VC]], [[VC]]
98 ; CHECK: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
99 ; CHECK-NEXT: aesmc [[VD]], [[VD]]
100 ; CHECK: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
101 ; CHECK-NEXT: aesmc [[VE]], [[VE]]
102 ; CHECK: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
103 ; CHECK-NEXT: aesmc [[VF]], [[VF]]
104 ; CHECK: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
105 ; CHECK-NEXT: aesmc [[VG]], [[VG]]
106 ; CHECK: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
107 ; CHECK-NEXT: aesmc [[VH]], [[VH]]
111 define void @aesda(ptr %a0, ptr %b0, ptr %c0, <16 x i8> %d, <16 x i8> %e) {
112 %d0 = load <16 x i8>, ptr %a0
113 %a1 = getelementptr inbounds <16 x i8>, ptr %a0, i64 1
114 %d1 = load <16 x i8>, ptr %a1
115 %a2 = getelementptr inbounds <16 x i8>, ptr %a0, i64 2
116 %d2 = load <16 x i8>, ptr %a2
117 %a3 = getelementptr inbounds <16 x i8>, ptr %a0, i64 3
118 %d3 = load <16 x i8>, ptr %a3
119 %k0 = load <16 x i8>, ptr %b0
120 %e00 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d0, <16 x i8> %k0)
121 %f00 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
122 %e01 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d1, <16 x i8> %k0)
123 %f01 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
124 %e02 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d2, <16 x i8> %k0)
125 %f02 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
126 %e03 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d3, <16 x i8> %k0)
127 %f03 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
128 %b1 = getelementptr inbounds <16 x i8>, ptr %b0, i64 1
129 %k1 = load <16 x i8>, ptr %b1
130 %e10 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f00, <16 x i8> %k1)
131 %f10 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
132 %e11 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f01, <16 x i8> %k1)
133 %f11 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
134 %e12 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f02, <16 x i8> %k1)
135 %f12 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
136 %e13 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f03, <16 x i8> %k1)
137 %f13 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
138 %b2 = getelementptr inbounds <16 x i8>, ptr %b0, i64 2
139 %k2 = load <16 x i8>, ptr %b2
140 %e20 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f10, <16 x i8> %k2)
141 %f20 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e10)
142 %e21 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f11, <16 x i8> %k2)
143 %f21 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e11)
144 %e22 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f12, <16 x i8> %k2)
145 %f22 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e12)
146 %e23 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f13, <16 x i8> %k2)
147 %f23 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e13)
148 %b3 = getelementptr inbounds <16 x i8>, ptr %b0, i64 3
149 %k3 = load <16 x i8>, ptr %b3
150 %e30 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f20, <16 x i8> %k3)
151 %f30 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e20)
152 %e31 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f21, <16 x i8> %k3)
153 %f31 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e21)
154 %e32 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f22, <16 x i8> %k3)
155 %f32 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e22)
156 %e33 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f23, <16 x i8> %k3)
157 %f33 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e23)
158 %g0 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f30, <16 x i8> %d)
159 %h0 = xor <16 x i8> %g0, %e
160 %g1 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f31, <16 x i8> %d)
161 %h1 = xor <16 x i8> %g1, %e
162 %g2 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f32, <16 x i8> %d)
163 %h2 = xor <16 x i8> %g2, %e
164 %g3 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f33, <16 x i8> %d)
165 %h3 = xor <16 x i8> %g3, %e
166 store <16 x i8> %h0, ptr %c0
167 %c1 = getelementptr inbounds <16 x i8>, ptr %c0, i64 1
168 store <16 x i8> %h1, ptr %c1
169 %c2 = getelementptr inbounds <16 x i8>, ptr %c0, i64 2
170 store <16 x i8> %h2, ptr %c2
171 %c3 = getelementptr inbounds <16 x i8>, ptr %c0, i64 3
172 store <16 x i8> %h3, ptr %c3
175 ; CHECK-LABEL: aesda:
176 ; CHECK: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
177 ; CHECK: aesimc [[VA]], [[VA]]
178 ; CHECK: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
179 ; CHECK-NEXT: aesimc [[VB]], [[VB]]
180 ; CHECK: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
181 ; CHECK-NEXT: aesimc [[VC]], [[VC]]
182 ; CHECK: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
183 ; CHECK-NEXT: aesimc [[VD]], [[VD]]
184 ; CHECK: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
185 ; CHECK-NEXT: aesimc [[VE]], [[VE]]
186 ; CHECK: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
187 ; CHECK-NEXT: aesimc [[VF]], [[VF]]
188 ; CHECK: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
189 ; CHECK-NEXT: aesimc [[VG]], [[VG]]
190 ; CHECK: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
191 ; CHECK-NEXT: aesimc [[VH]], [[VH]]
195 define void @aes_load_store(ptr %p1, ptr %p2 , ptr %p3) {
197 %x1 = alloca <16 x i8>, align 16
198 %x2 = alloca <16 x i8>, align 16
199 %x3 = alloca <16 x i8>, align 16
200 %x4 = alloca <16 x i8>, align 16
201 %x5 = alloca <16 x i8>, align 16
202 %in1 = load <16 x i8>, ptr %p1, align 16
203 store <16 x i8> %in1, ptr %x1, align 16
204 %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
205 %in2 = load <16 x i8>, ptr %p2, align 16
206 %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
207 %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
208 store <16 x i8> %aesmc1, ptr %x3, align 16
209 %in3 = load <16 x i8>, ptr %p3, align 16
210 %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
211 %aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2
212 store <16 x i8> %aese3, ptr %x5, align 16
215 ; CHECK-LABEL: aes_load_store:
216 ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
217 ; aese and aesmc are described to share a unit, hence won't be scheduled on the
218 ; same cycle and the scheduler can find another instruction to place inbetween
219 ; CHECK: aesmc [[VA]], [[VA]]
220 ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
221 ; CHECK-NEXT: aesmc [[VB]], [[VB]]