1 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mattr=+fuse-aes,+crypto | FileCheck %s
2 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=generic -mattr=+crypto | FileCheck %s
3 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a53 | FileCheck %s
4 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s
5 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a65 | FileCheck %s
6 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s
7 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a73 | FileCheck %s
8 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a76 | FileCheck %s
9 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a77 | FileCheck %s
10 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a78 | FileCheck %s
11 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a78c| FileCheck %s
12 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-x1 | FileCheck %s
13 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-e1 | FileCheck %s
14 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-n1 | FileCheck %s
15 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-n2 | FileCheck %s
16 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=neoverse-v1 | FileCheck %s
17 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m3 | FileCheck %s
18 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m4 | FileCheck %s
19 ; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m5 | FileCheck %s
21 declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
22 declare <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %d)
23 declare <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d, <16 x i8> %k)
24 declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %d)
26 define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
27 %d0 = load <16 x i8>, <16 x i8>* %a0
28 %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
29 %d1 = load <16 x i8>, <16 x i8>* %a1
30 %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
31 %d2 = load <16 x i8>, <16 x i8>* %a2
32 %a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
33 %d3 = load <16 x i8>, <16 x i8>* %a3
34 %k0 = load <16 x i8>, <16 x i8>* %b0
35 %e00 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d0, <16 x i8> %k0)
36 %f00 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
37 %e01 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d1, <16 x i8> %k0)
38 %f01 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
39 %e02 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d2, <16 x i8> %k0)
40 %f02 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
41 %e03 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d3, <16 x i8> %k0)
42 %f03 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
43 %b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
44 %k1 = load <16 x i8>, <16 x i8>* %b1
45 %e10 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f00, <16 x i8> %k1)
46 %f10 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e00)
47 %e11 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f01, <16 x i8> %k1)
48 %f11 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e01)
49 %e12 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f02, <16 x i8> %k1)
50 %f12 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e02)
51 %e13 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f03, <16 x i8> %k1)
52 %f13 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e03)
53 %b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
54 %k2 = load <16 x i8>, <16 x i8>* %b2
55 %e20 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f10, <16 x i8> %k2)
56 %f20 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e10)
57 %e21 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f11, <16 x i8> %k2)
58 %f21 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e11)
59 %e22 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f12, <16 x i8> %k2)
60 %f22 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e12)
61 %e23 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f13, <16 x i8> %k2)
62 %f23 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e13)
63 %b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
64 %k3 = load <16 x i8>, <16 x i8>* %b3
65 %e30 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f20, <16 x i8> %k3)
66 %f30 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e20)
67 %e31 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f21, <16 x i8> %k3)
68 %f31 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e21)
69 %e32 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f22, <16 x i8> %k3)
70 %f32 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e22)
71 %e33 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f23, <16 x i8> %k3)
72 %f33 = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %e23)
73 %g0 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f30, <16 x i8> %d)
74 %h0 = xor <16 x i8> %g0, %e
75 %g1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f31, <16 x i8> %d)
76 %h1 = xor <16 x i8> %g1, %e
77 %g2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f32, <16 x i8> %d)
78 %h2 = xor <16 x i8> %g2, %e
79 %g3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %f33, <16 x i8> %d)
80 %h3 = xor <16 x i8> %g3, %e
81 store <16 x i8> %h0, <16 x i8>* %c0
82 %c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
83 store <16 x i8> %h1, <16 x i8>* %c1
84 %c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
85 store <16 x i8> %h2, <16 x i8>* %c2
86 %c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
87 store <16 x i8> %h3, <16 x i8>* %c3
91 ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
92 ; CHECK: aesmc [[VA]], [[VA]]
93 ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
94 ; CHECK-NEXT: aesmc [[VB]], [[VB]]
95 ; CHECK: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
96 ; CHECK-NEXT: aesmc [[VC]], [[VC]]
97 ; CHECK: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
98 ; CHECK-NEXT: aesmc [[VD]], [[VD]]
99 ; CHECK: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
100 ; CHECK-NEXT: aesmc [[VE]], [[VE]]
101 ; CHECK: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
102 ; CHECK-NEXT: aesmc [[VF]], [[VF]]
103 ; CHECK: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
104 ; CHECK-NEXT: aesmc [[VG]], [[VG]]
105 ; CHECK: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
106 ; CHECK-NEXT: aesmc [[VH]], [[VH]]
110 define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
111 %d0 = load <16 x i8>, <16 x i8>* %a0
112 %a1 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 1
113 %d1 = load <16 x i8>, <16 x i8>* %a1
114 %a2 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 2
115 %d2 = load <16 x i8>, <16 x i8>* %a2
116 %a3 = getelementptr inbounds <16 x i8>, <16 x i8>* %a0, i64 3
117 %d3 = load <16 x i8>, <16 x i8>* %a3
118 %k0 = load <16 x i8>, <16 x i8>* %b0
119 %e00 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d0, <16 x i8> %k0)
120 %f00 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
121 %e01 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d1, <16 x i8> %k0)
122 %f01 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
123 %e02 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d2, <16 x i8> %k0)
124 %f02 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
125 %e03 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %d3, <16 x i8> %k0)
126 %f03 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
127 %b1 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 1
128 %k1 = load <16 x i8>, <16 x i8>* %b1
129 %e10 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f00, <16 x i8> %k1)
130 %f10 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e00)
131 %e11 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f01, <16 x i8> %k1)
132 %f11 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e01)
133 %e12 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f02, <16 x i8> %k1)
134 %f12 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e02)
135 %e13 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f03, <16 x i8> %k1)
136 %f13 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e03)
137 %b2 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 2
138 %k2 = load <16 x i8>, <16 x i8>* %b2
139 %e20 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f10, <16 x i8> %k2)
140 %f20 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e10)
141 %e21 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f11, <16 x i8> %k2)
142 %f21 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e11)
143 %e22 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f12, <16 x i8> %k2)
144 %f22 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e12)
145 %e23 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f13, <16 x i8> %k2)
146 %f23 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e13)
147 %b3 = getelementptr inbounds <16 x i8>, <16 x i8>* %b0, i64 3
148 %k3 = load <16 x i8>, <16 x i8>* %b3
149 %e30 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f20, <16 x i8> %k3)
150 %f30 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e20)
151 %e31 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f21, <16 x i8> %k3)
152 %f31 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e21)
153 %e32 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f22, <16 x i8> %k3)
154 %f32 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e22)
155 %e33 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f23, <16 x i8> %k3)
156 %f33 = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %e23)
157 %g0 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f30, <16 x i8> %d)
158 %h0 = xor <16 x i8> %g0, %e
159 %g1 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f31, <16 x i8> %d)
160 %h1 = xor <16 x i8> %g1, %e
161 %g2 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f32, <16 x i8> %d)
162 %h2 = xor <16 x i8> %g2, %e
163 %g3 = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %f33, <16 x i8> %d)
164 %h3 = xor <16 x i8> %g3, %e
165 store <16 x i8> %h0, <16 x i8>* %c0
166 %c1 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 1
167 store <16 x i8> %h1, <16 x i8>* %c1
168 %c2 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 2
169 store <16 x i8> %h2, <16 x i8>* %c2
170 %c3 = getelementptr inbounds <16 x i8>, <16 x i8>* %c0, i64 3
171 store <16 x i8> %h3, <16 x i8>* %c3
174 ; CHECK-LABEL: aesda:
175 ; CHECK: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
176 ; CHECK: aesimc [[VA]], [[VA]]
177 ; CHECK: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
178 ; CHECK-NEXT: aesimc [[VB]], [[VB]]
179 ; CHECK: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
180 ; CHECK-NEXT: aesimc [[VC]], [[VC]]
181 ; CHECK: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
182 ; CHECK-NEXT: aesimc [[VD]], [[VD]]
183 ; CHECK: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
184 ; CHECK-NEXT: aesimc [[VE]], [[VE]]
185 ; CHECK: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
186 ; CHECK-NEXT: aesimc [[VF]], [[VF]]
187 ; CHECK: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
188 ; CHECK-NEXT: aesimc [[VG]], [[VG]]
189 ; CHECK: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
190 ; CHECK-NEXT: aesimc [[VH]], [[VH]]
194 define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) {
196 %x1 = alloca <16 x i8>, align 16
197 %x2 = alloca <16 x i8>, align 16
198 %x3 = alloca <16 x i8>, align 16
199 %x4 = alloca <16 x i8>, align 16
200 %x5 = alloca <16 x i8>, align 16
201 %in1 = load <16 x i8>, <16 x i8>* %p1, align 16
202 store <16 x i8> %in1, <16 x i8>* %x1, align 16
203 %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
204 %in2 = load <16 x i8>, <16 x i8>* %p2, align 16
205 %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
206 %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
207 store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16
208 %in3 = load <16 x i8>, <16 x i8>* %p3, align 16
209 %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
210 %aese3 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %aesmc2, <16 x i8> %in3) #2
211 store <16 x i8> %aese3, <16 x i8>* %x5, align 16
214 ; CHECK-LABEL: aes_load_store:
215 ; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
216 ; aese and aesmc are described to share a unit, hence won't be scheduled on the
217 ; same cycle and the scheduler can find another instruction to place inbetween
218 ; CHECK: aesmc [[VA]], [[VA]]
219 ; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
220 ; CHECK-NEXT: aesmc [[VB]], [[VB]]