1 //===----------------------------------------------------------------------===//
3 //===----------------------------------------------------------------------===//
5 // Pseudo instructions for VM/VM512 spill/restore
7 // These pseudo instructions are used for only spill/restore since
8 // InlineSpiller assumes storeRegToStackSlot/loadRegFromStackSlot
9 // functions emit only single instruction. Those functions emit a
10 // single store/load instruction or one of these pseudo store/load
13 // Specifies hasSideEffects = 0 to disable UnmodeledSideEffects.
15 let mayLoad = 1, hasSideEffects = 0 in {
17 (outs VM:$vmx), (ins MEMrii:$addr),
18 "# pseudo ldvm $vmx, $addr", []>;
19 def LDVM512rii : Pseudo<
20 (outs VM512:$vmx), (ins MEMrii:$addr),
21 "# pseudo ldvm512 $vmx, $addr", []>;
23 let mayStore = 1, hasSideEffects = 0 in {
25 (outs), (ins MEMrii:$addr, VM:$vmx),
26 "# pseudo stvm $addr, $vmx", []>;
27 def STVM512rii : Pseudo<
28 (outs), (ins MEMrii:$addr, VM512:$vmx),
29 "# pseudo stvm512 $addr, $vmx", []>;
32 //===----------------------------------------------------------------------===//
33 // Pseudo instructions for VM512 modifications
34 //===----------------------------------------------------------------------===//
36 // LVM/SVM instructions using VM512
37 let hasSideEffects = 0, isCodeGenOnly = 1 in {
38 let Constraints = "$vx = $vd", DisableEncoding = "$vd" in {
39 def LVMyir_y : Pseudo<(outs VM512:$vx), (ins uimm3:$sy, I64:$sz, VM512:$vd),
40 "# pseudo LVM $vx, $sy, $sz, $vd">;
41 def LVMyim_y : Pseudo<(outs VM512:$vx),
42 (ins uimm3:$sy, mimm:$sz, VM512:$vd),
43 "# pseudo LVM $vx, $sy, $sz, $vd">;
45 def LVMyir : Pseudo<(outs VM512:$vx), (ins uimm3:$sy, I64:$sz),
46 "# pseudo LVM $vx, $sy, $sz">;
47 def LVMyim : Pseudo<(outs VM512:$vx), (ins uimm3:$sy, mimm:$sz),
48 "# pseudo LVM $vx, $sy, $sz">;
49 def SVMyi : Pseudo<(outs I64:$sx), (ins VM512:$vz, uimm3:$sy),
50 "# pseudo SVM $sx, $vz, $sy">;
53 // VFMK/VFMKW/VFMKS instructions using VM512
54 let hasSideEffects = 0, isCodeGenOnly = 1, DisableEncoding = "$vl" in {
55 def VFMKyal : Pseudo<(outs VM512:$vmx), (ins I32:$vl),
56 "# pseudo-vfmk.at $vmx">;
57 def VFMKynal : Pseudo<(outs VM512:$vmx), (ins I32:$vl),
58 "# pseudo-vfmk.af $vmx">;
59 def VFMKWyvl : Pseudo<(outs VM512:$vmx),
60 (ins CCOp:$cf, V64:$vz, I32:$vl),
61 "# pseudo-vfmk.w.$cf $vmx, $vz">;
62 def VFMKWyvyl : Pseudo<(outs VM512:$vmx),
63 (ins CCOp:$cf, V64:$vz, VM512:$vm, I32:$vl),
64 "# pseudo-vfmk.w.$cf $vmx, $vz, $vm">;
65 def VFMKSyvl : Pseudo<(outs VM512:$vmx),
66 (ins CCOp:$cf, V64:$vz, I32:$vl),
67 "# pseudo-vfmk.s.$cf $vmx, $vz">;
68 def VFMKSyvyl : Pseudo<(outs VM512:$vmx),
69 (ins CCOp:$cf, V64:$vz, VM512:$vm, I32:$vl),
70 "# pseudo-vfmk.s.$cf $vmx, $vz, $vm">;
73 // ANDM/ORM/XORM/EQVM/NNDM/NEGM instructions using VM512
74 let hasSideEffects = 0, isCodeGenOnly = 1 in {
75 def ANDMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
76 "# andm $vmx, $vmy, $vmz">;
77 def ORMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
78 "# orm $vmx, $vmy, $vmz">;
79 def XORMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
80 "# xorm $vmx, $vmy, $vmz">;
81 def EQVMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
82 "# eqvm $vmx, $vmy, $vmz">;
83 def NNDMyy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy, VM512:$vmz),
84 "# nndm $vmx, $vmy, $vmz">;
85 def NEGMy : Pseudo<(outs VM512:$vmx), (ins VM512:$vmy),
89 //===----------------------------------------------------------------------===//
92 // Define all vector instructions defined in SX-Aurora TSUBASA Architecture
93 // Guide here. As those mnemonics, we use mnemonics defined in Vector Engine
94 // Assembly Language Reference Manual.
96 // Some instructions can update existing data by following instructions
102 // vbrd %v0, 2 # v0 = { 2, 2, 2, ..., 2, 2, 2 }
104 // vbrd %v0, 3 # v0 = { 3, 3, 3, ..., 3, 2, 2, 2, ..., 2, 2, 2 }
106 // In order to represent above with a virtual register, we defines instructions
107 // with an additional base register and `_v` suffiex in mnemonic.
114 // vbrd_v tv1, 2, tv0
116 // We also have some instructions uses VL register with an pseudo VL value
117 // with following suffixes in mnemonic.
119 // l: have an additional I32 register to represent the VL value.
120 // L: have an additional VL register to represent the VL value.
121 //===----------------------------------------------------------------------===//
123 //-----------------------------------------------------------------------------
124 // Section 8.9 - Vector Load/Store and Move Instructions
125 //-----------------------------------------------------------------------------
127 // Multiclass for VLD instructions
128 let mayLoad = 1, hasSideEffects = 0, Uses = [VL] in
129 multiclass VLDbm<string opcStr, bits<8>opc, RegisterClass RC, dag dag_in,
130 string disEnc = ""> {
131 let DisableEncoding = disEnc in
132 def "" : RVM<opc, (outs RC:$vx), dag_in,
133 !strconcat(opcStr, " $vx, $sy, $sz")>;
134 let Constraints = "$vx = $base", DisableEncoding = disEnc#"$base",
136 def _v : RVM<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
137 !strconcat(opcStr, " $vx, $sy, $sz")>;
139 multiclass VLDlm<string opcStr, bits<8>opc, RegisterClass RC, dag dag_in> {
140 defm "" : VLDbm<opcStr, opc, RC, dag_in>;
141 let isCodeGenOnly = 1, VE_VLInUse = 1 in {
142 defm l : VLDbm<opcStr, opc, RC, !con(dag_in, (ins I32:$vl)), "$vl,">;
143 defm L : VLDbm<opcStr, opc, RC, !con(dag_in, (ins VLS:$vl)), "$vl,">;
146 let VE_VLIndex = 3 in
147 multiclass VLDtgm<string opcStr, bits<8>opc, RegisterClass RC> {
148 defm rr : VLDlm<opcStr, opc, RC, (ins I64:$sy, I64:$sz)>;
150 defm ir : VLDlm<opcStr, opc, RC, (ins simm7:$sy, I64:$sz)>;
152 defm rz : VLDlm<opcStr, opc, RC, (ins I64:$sy, zero:$sz)>;
153 let cy = 0, cz = 0 in
154 defm iz : VLDlm<opcStr, opc, RC, (ins simm7:$sy, zero:$sz)>;
156 multiclass VLDm<string opcStr, bits<8>opc, RegisterClass RC> {
157 let vc = 1 in defm "" : VLDtgm<opcStr, opc, RC>;
158 let vc = 0 in defm NC : VLDtgm<opcStr#".nc", opc, RC>;
161 // Section 8.9.1 - VLD (Vector Load)
162 defm VLD : VLDm<"vld", 0x81, V64>;
164 // Section 8.9.2 - VLDU (Vector Load Upper)
165 defm VLDU : VLDm<"vldu", 0x82, V64>;
167 // Section 8.9.3 - VLDL (Vector Load Lower)
168 defm VLDLSX : VLDm<"vldl.sx", 0x83, V64>;
169 let cx = 1 in defm VLDLZX : VLDm<"vldl.zx", 0x83, V64>;
171 // Section 8.9.4 - VLD2D (Vector Load 2D)
172 defm VLD2D : VLDm<"vld2d", 0xc1, V64>;
174 // Section 8.9.5 - VLDU2D (Vector Load Upper 2D)
175 defm VLDU2D : VLDm<"vldu2d", 0xc2, V64>;
177 // Section 8.9.6 - VLDL2D (Vector Load Lower 2D)
178 defm VLDL2DSX : VLDm<"vldl2d.sx", 0xc3, V64>;
179 let cx = 1 in defm VLDL2DZX : VLDm<"vldl2d.zx", 0xc3, V64>;
181 // Multiclass for VST instructions
182 let mayStore = 1, hasSideEffects = 0, Uses = [VL] in
183 multiclass VSTbm<string opcStr, string argStr, bits<8>opc, dag dag_in> {
184 def "" : RVM<opc, (outs), dag_in, !strconcat(opcStr, argStr)>;
185 let DisableEncoding = "$vl", isCodeGenOnly = 1, VE_VLInUse = 1 in {
186 def l : RVM<opc, (outs), !con(dag_in, (ins I32:$vl)),
187 !strconcat(opcStr, argStr)>;
188 def L : RVM<opc, (outs), !con(dag_in, (ins VLS:$vl)),
189 !strconcat(opcStr, argStr)>;
192 multiclass VSTmm<string opcStr, bits<8>opc, dag dag_in> {
193 defm "" : VSTbm<opcStr, " $vx, $sy, $sz", opc, dag_in>;
194 let m = ?, VE_VLWithMask = 1 in
195 defm m : VSTbm<opcStr, " $vx, $sy, $sz, $m", opc, !con(dag_in, (ins VM:$m))>;
197 let VE_VLIndex = 3 in
198 multiclass VSTtgm<string opcStr, bits<8>opc, RegisterClass RC> {
199 defm rrv : VSTmm<opcStr, opc, (ins I64:$sy, I64:$sz, RC:$vx)>;
201 defm irv : VSTmm<opcStr, opc, (ins simm7:$sy, I64:$sz, RC:$vx)>;
203 defm rzv : VSTmm<opcStr, opc, (ins I64:$sy, zero:$sz, RC:$vx)>;
204 let cy = 0, cz = 0 in
205 defm izv : VSTmm<opcStr, opc, (ins simm7:$sy, zero:$sz, RC:$vx)>;
207 multiclass VSTm<string opcStr, bits<8>opc, RegisterClass RC> {
208 let vc = 1, cx = 0 in defm "" : VSTtgm<opcStr, opc, RC>;
209 let vc = 0, cx = 0 in defm NC : VSTtgm<opcStr#".nc", opc, RC>;
210 let vc = 1, cx = 1 in defm OT : VSTtgm<opcStr#".ot", opc, RC>;
211 let vc = 0, cx = 1 in defm NCOT : VSTtgm<opcStr#".nc.ot", opc, RC>;
214 // Section 8.9.7 - VST (Vector Store)
215 defm VST : VSTm<"vst", 0x91, V64>;
217 // Section 8.9.8 - VST (Vector Store Upper)
218 defm VSTU : VSTm<"vstu", 0x92, V64>;
220 // Section 8.9.9 - VSTL (Vector Store Lower)
221 defm VSTL : VSTm<"vstl", 0x93, V64>;
223 // Section 8.9.10 - VST2D (Vector Store 2D)
224 defm VST2D : VSTm<"vst2d", 0xd1, V64>;
226 // Section 8.9.11 - VSTU2D (Vector Store Upper 2D)
227 defm VSTU2D : VSTm<"vstu2d", 0xd2, V64>;
229 // Section 8.9.12 - VSTL2D (Vector Store Lower 2D)
230 defm VSTL2D : VSTm<"vstl2d", 0xd3, V64>;
232 // Multiclass for VGT instructions
233 let mayLoad = 1, hasSideEffects = 0, Uses = [VL] in
234 multiclass VGTbm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
235 dag dag_in, string disEnc = ""> {
236 let DisableEncoding = disEnc in
237 def "" : RVM<opc, (outs RC:$vx), dag_in,
238 !strconcat(opcStr, " $vx, ", argStr)>;
239 let Constraints = "$vx = $base", DisableEncoding = disEnc#"$base",
241 def _v : RVM<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
242 !strconcat(opcStr, " $vx, ", argStr)>;
244 multiclass VGTlm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
246 defm "" : VGTbm<opcStr, argStr, opc, RC, dag_in>;
247 let isCodeGenOnly = 1, VE_VLInUse = 1 in {
248 defm l : VGTbm<opcStr, argStr, opc, RC, !con(dag_in, (ins I32:$vl)),
250 defm L : VGTbm<opcStr, argStr, opc, RC, !con(dag_in, (ins VLS:$vl)),
254 multiclass VGTmm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
256 defm "" : VGTlm<opcStr, argStr, opc, RC, dag_in>;
257 let m = ?, VE_VLWithMask = 1 in
258 defm m : VGTlm<opcStr, argStr#", $m", opc, RC, !con(dag_in, (ins VM:$m))>;
260 let VE_VLIndex = 4 in
261 multiclass VGTlhm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
263 defm rr : VGTmm<opcStr, argStr#", $sy, $sz", opc, RC,
264 !con(dag_in, (ins I64:$sy, I64:$sz))>;
266 defm ir : VGTmm<opcStr, argStr#", $sy, $sz", opc, RC,
267 !con(dag_in, (ins simm7:$sy, I64:$sz))>;
269 defm rz : VGTmm<opcStr, argStr#", $sy, $sz", opc, RC,
270 !con(dag_in, (ins I64:$sy, zero:$sz))>;
271 let cy = 0, cz = 0 in
272 defm iz : VGTmm<opcStr, argStr#", $sy, $sz", opc, RC,
273 !con(dag_in, (ins simm7:$sy, zero:$sz))>;
275 multiclass VGTtgm<string opcStr, bits<8>opc, RegisterClass RC> {
276 let vy = ? in defm v : VGTlhm<opcStr, "$vy", opc, RC, (ins V64:$vy)>;
277 let cs = 1, sw = ? in defm s : VGTlhm<opcStr, "$sw", opc, RC, (ins I64:$sw)>;
279 multiclass VGTm<string opcStr, bits<8>opc, RegisterClass RC> {
280 let vc = 1 in defm "" : VGTtgm<opcStr, opc, RC>;
281 let vc = 0 in defm NC : VGTtgm<opcStr#".nc", opc, RC>;
284 // Section 8.9.13 - VGT (Vector Gather)
285 defm VGT : VGTm<"vgt", 0xa1, V64>;
287 // Section 8.9.14 - VGTU (Vector Gather Upper)
288 defm VGTU : VGTm<"vgtu", 0xa2, V64>;
290 // Section 8.9.15 - VGTL (Vector Gather Lower)
291 defm VGTLSX : VGTm<"vgtl.sx", 0xa3, V64>;
292 let cx = 1 in defm VGTLZX : VGTm<"vgtl.zx", 0xa3, V64>;
293 def : MnemonicAlias<"vgtl", "vgtl.zx">;
294 def : MnemonicAlias<"vgtl.nc", "vgtl.zx.nc">;
296 // Multiclass for VSC instructions
297 let mayStore = 1, hasSideEffects = 0, Uses = [VL] in
298 multiclass VSCbm<string opcStr, string argStr, bits<8>opc, dag dag_in> {
299 def "" : RVM<opc, (outs), dag_in, !strconcat(opcStr, argStr)>;
300 let DisableEncoding = "$vl", isCodeGenOnly = 1, VE_VLInUse = 1 in {
301 def l : RVM<opc, (outs), !con(dag_in, (ins I32:$vl)),
302 !strconcat(opcStr, argStr)>;
303 def L : RVM<opc, (outs), !con(dag_in, (ins VLS:$vl)),
304 !strconcat(opcStr, argStr)>;
307 multiclass VSCmm<string opcStr, string argStr, bits<8>opc, dag dag_in> {
308 defm "" : VSCbm<opcStr, argStr, opc, dag_in>;
309 let m = ?, VE_VLWithMask = 1 in
310 defm m : VSCbm<opcStr, argStr#", $m", opc, !con(dag_in, (ins VM:$m))>;
312 let VE_VLIndex = 4 in
313 multiclass VSClhm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
315 defm rrv : VSCmm<opcStr, " $vx, "#argStr#", $sy, $sz", opc,
316 !con(dag_in, (ins I64:$sy, I64:$sz, RC:$vx))>;
318 defm irv : VSCmm<opcStr, " $vx, "#argStr#", $sy, $sz", opc,
319 !con(dag_in, (ins simm7:$sy, I64:$sz, RC:$vx))>;
321 defm rzv : VSCmm<opcStr, " $vx, "#argStr#", $sy, $sz", opc,
322 !con(dag_in, (ins I64:$sy, zero:$sz, RC:$vx))>;
323 let cy = 0, cz = 0 in
324 defm izv : VSCmm<opcStr, " $vx, "#argStr#", $sy, $sz", opc,
325 !con(dag_in, (ins simm7:$sy, zero:$sz, RC:$vx))>;
327 multiclass VSCtgm<string opcStr, bits<8>opc, RegisterClass RC> {
328 let vy = ? in defm v : VSClhm<opcStr, "$vy", opc, RC, (ins V64:$vy)>;
329 let cs = 1, sw = ? in defm s : VSClhm<opcStr, "$sw", opc, RC, (ins I64:$sw)>;
331 multiclass VSCm<string opcStr, bits<8>opc, RegisterClass RC> {
332 let vc = 1, cx = 0 in defm "" : VSCtgm<opcStr, opc, RC>;
333 let vc = 0, cx = 0 in defm NC : VSCtgm<opcStr#".nc", opc, RC>;
334 let vc = 1, cx = 1 in defm OT : VSCtgm<opcStr#".ot", opc, RC>;
335 let vc = 0, cx = 1 in defm NCOT : VSCtgm<opcStr#".nc.ot", opc, RC>;
338 // Section 8.9.16 - VSC (Vector Scatter)
339 defm VSC : VSCm<"vsc", 0xb1, V64>;
341 // Section 8.9.17 - VSCU (Vector Scatter Upper)
342 defm VSCU : VSCm<"vscu", 0xb2, V64>;
344 // Section 8.9.18 - VSCL (Vector Scatter Lower)
345 defm VSCL : VSCm<"vscl", 0xb3, V64>;
347 // Section 8.9.19 - PFCHV (Prefetch Vector)
349 multiclass PFCHVbm<string opcStr, string argStr, bits<8>opc, dag dag_in> {
350 def "" : RVM<opc, (outs), dag_in, !strconcat(opcStr, argStr)>;
351 let DisableEncoding = "$vl", isCodeGenOnly = 1, VE_VLInUse = 1 in {
352 def l : RVM<opc, (outs), !con(dag_in, (ins I32:$vl)),
353 !strconcat(opcStr, argStr)>;
354 def L : RVM<opc, (outs), !con(dag_in, (ins VLS:$vl)),
355 !strconcat(opcStr, argStr)>;
358 let VE_VLIndex = 2 in
359 multiclass PFCHVm<string opcStr, bits<8>opc> {
360 defm rr : PFCHVbm<opcStr, " $sy, $sz", opc, (ins I64:$sy, I64:$sz)>;
362 defm ir : PFCHVbm<opcStr, " $sy, $sz", opc, (ins simm7:$sy, I64:$sz)>;
364 defm rz : PFCHVbm<opcStr, " $sy, $sz", opc, (ins I64:$sy, zero:$sz)>;
365 let cy = 0, cz = 0 in
366 defm iz : PFCHVbm<opcStr, " $sy, $sz", opc, (ins simm7:$sy, zero:$sz)>;
368 let vc = 1, vx = 0 in defm PFCHV : PFCHVm<"pfchv", 0x80>;
369 let vc = 0, vx = 0 in defm PFCHVNC : PFCHVm<"pfchv.nc", 0x80>;
371 // Section 8.9.20 - LSV (Load S to V)
372 let sx = 0, vx = ?, hasSideEffects = 0 in
373 multiclass LSVbm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
375 def "" : RR<opc, (outs RC:$vx), dag_in, !strconcat(opcStr, " ${vx}", argStr)>;
376 let Constraints = "$vx = $base", DisableEncoding = "$base",
378 def _v : RR<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
379 !strconcat(opcStr, " ${vx}", argStr)>;
381 multiclass LSVm<string opcStr, bits<8>opc, RegisterClass RC> {
382 defm rr : LSVbm<opcStr, "(${sy}), $sz", opc, RC, (ins I64:$sy, I64:$sz)>;
384 defm ir : LSVbm<opcStr, "(${sy}), $sz", opc, RC, (ins uimm7:$sy, I64:$sz)>;
386 defm rm : LSVbm<opcStr, "(${sy}), $sz", opc, RC, (ins I64:$sy, mimm:$sz)>;
387 let cy = 0, cz = 0 in
388 defm im : LSVbm<opcStr, "(${sy}), $sz", opc, RC, (ins uimm7:$sy, mimm:$sz)>;
390 defm LSV : LSVm<"lsv", 0x8e, V64>;
392 // Section 8.9.21 - LVS (Load V to S)
393 let cz = 0, sz = 0, vx = ?, hasSideEffects = 0 in
394 multiclass LVSm<string opcStr, bits<8>opc, RegisterClass RC> {
395 def vr : RR<opc, (outs I64:$sx), (ins RC:$vx, I64:$sy),
396 opcStr#" $sx, ${vx}(${sy})">;
398 def vi : RR<opc, (outs I64:$sx), (ins RC:$vx, uimm7:$sy),
399 opcStr#" $sx, ${vx}(${sy})">;
401 defm LVS : LVSm<"lvs", 0x9e, V64>;
403 // Section 8.9.22 - LVM (Load VM)
404 let sx = 0, vx = ?, hasSideEffects = 0 in
405 multiclass LVMbm<string opcStr, string argStr, bits<8>opc, RegisterClass RCM,
407 def "" : RR<opc, (outs RCM:$vx), dag_in,
408 !strconcat(opcStr, " $vx, ", argStr)>;
409 let Constraints = "$vx = $base", DisableEncoding = "$base",
410 isCodeGenOnly = 1 in {
411 def _m : RR<opc, (outs RCM:$vx), !con(dag_in, (ins RCM:$base)),
412 !strconcat(opcStr, " $vx, ", argStr)>;
415 multiclass LVMom<string opcStr, bits<8>opc, RegisterClass RCM> {
416 defm rr : LVMbm<opcStr, "$sy, $sz", opc, RCM, (ins I64:$sy, I64:$sz)>;
418 defm ir : LVMbm<opcStr, "$sy, $sz", opc, RCM, (ins uimm2:$sy, I64:$sz)>;
420 defm rm : LVMbm<opcStr, "$sy, $sz", opc, RCM, (ins I64:$sy, mimm:$sz)>;
421 let cy = 0, cz = 0 in
422 defm im : LVMbm<opcStr, "$sy, $sz", opc, RCM, (ins uimm2:$sy, mimm:$sz)>;
424 multiclass LVMm<string opcStr, bits<8>opc, RegisterClass RCM> {
425 defm "" : LVMom<opcStr, opc, RCM>;
427 defm LVM : LVMm<"lvm", 0xb7, VM>;
429 // Section 8.9.23 - SVM (Save VM)
430 let cz = 0, sz = 0, vz = ?, hasSideEffects = 0 in
431 multiclass SVMm<string opcStr, bits<8>opc, RegisterClass RCM> {
432 def mr : RR<opc, (outs I64:$sx), (ins RCM:$vz, I64:$sy),
433 opcStr#" $sx, $vz, $sy">;
435 def mi : RR<opc, (outs I64:$sx), (ins RCM:$vz, uimm2:$sy),
436 opcStr#" $sx, $vz, $sy">;
438 defm SVM : SVMm<"svm", 0xa7, VM>;
440 // Section 8.9.24 - VBRD (Vector Broadcast)
441 let vx = ?, hasSideEffects = 0, Uses = [VL] in
442 multiclass VBRDbm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
443 dag dag_in, string disEnc = ""> {
444 let DisableEncoding = disEnc in
445 def "" : RV<opc, (outs RC:$vx), dag_in,
446 !strconcat(opcStr, " $vx, ", argStr)>;
447 let Constraints = "$vx = $base", DisableEncoding = disEnc#"$base",
449 def _v : RV<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
450 !strconcat(opcStr, " $vx, ", argStr)>;
452 multiclass VBRDlm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
454 defm "" : VBRDbm<opcStr, argStr, opc, RC, dag_in>;
455 let isCodeGenOnly = 1, VE_VLInUse = 1 in {
456 defm l : VBRDbm<opcStr, argStr, opc, RC, !con(dag_in, (ins I32:$vl)),
458 defm L : VBRDbm<opcStr, argStr, opc, RC, !con(dag_in, (ins VLS:$vl)),
462 multiclass VBRDmm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
463 RegisterClass RCM, dag dag_in> {
464 defm "" : VBRDlm<opcStr, argStr, opc, RC, dag_in>;
465 let m = ?, VE_VLWithMask = 1 in
466 defm m : VBRDlm<opcStr, argStr#", $m", opc, RC, !con(dag_in, (ins RCM:$m))>;
468 let VE_VLIndex = 2 in
469 multiclass VBRDm<string opcStr, bits<8>opc, RegisterClass VRC, RegisterClass RC,
471 defm r : VBRDmm<opcStr, "$sy", opc, VRC, RCM, (ins RC:$sy)>;
473 defm i : VBRDmm<opcStr, "$sy", opc, VRC, RCM, (ins simm7:$sy)>;
475 let cx = 0, cx2 = 0 in
476 defm VBRD : VBRDm<"vbrd", 0x8c, V64, I64, VM>;
477 let cx = 0, cx2 = 1 in
478 defm VBRDL : VBRDm<"vbrdl", 0x8c, V64, I32, VM>;
479 let cx = 1, cx2 = 0 in
480 defm VBRDU : VBRDm<"vbrdu", 0x8c, V64, F32, VM>;
481 let cx = 1, cx2 = 1 in
482 defm PVBRD : VBRDm<"pvbrd", 0x8c, V64, I64, VM512>;
484 // Section 8.9.25 - VMV (Vector Move)
485 let vx = ?, vz = ?, hasSideEffects = 0, Uses = [VL] in
486 multiclass VMVbm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
487 dag dag_in, string disEnc = ""> {
488 let DisableEncoding = disEnc in
489 def "" : RV<opc, (outs RC:$vx), dag_in,
490 !strconcat(opcStr, " $vx, ", argStr)>;
491 let Constraints = "$vx = $base", DisableEncoding = disEnc#"$base",
493 def _v : RV<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
494 !strconcat(opcStr, " $vx, ", argStr)>;
496 multiclass VMVlm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
498 defm "" : VMVbm<opcStr, argStr, opc, RC, dag_in>;
499 let isCodeGenOnly = 1, VE_VLInUse = 1 in {
500 defm l : VMVbm<opcStr, argStr, opc, RC, !con(dag_in, (ins I32:$vl)),
502 defm L : VMVbm<opcStr, argStr, opc, RC, !con(dag_in, (ins VLS:$vl)),
506 multiclass VMVmm<string opcStr, bits<8>opc, RegisterClass RC,
507 RegisterClass RCM, dag dag_in> {
508 defm "" : VMVlm<opcStr, "$sy, $vz", opc, RC, dag_in>;
509 let m = ?, VE_VLWithMask = 1 in
510 defm m : VMVlm<opcStr, "$sy, $vz, $m", opc, RC, !con(dag_in, (ins RCM:$m))>;
512 let VE_VLIndex = 3 in
513 multiclass VMVm<string opcStr, bits<8>opc, RegisterClass RC,
515 defm rv : VMVmm<opcStr, opc, RC, RCM, (ins I64:$sy, RC:$vz)>;
517 defm iv : VMVmm<opcStr, opc, RC, RCM, (ins uimm7:$sy, RC:$vz)>;
519 defm VMV : VMVm<"vmv", 0x9c, V64, VM>;
521 //-----------------------------------------------------------------------------
522 // Section 8.10 - Vector Fixed-Point Arithmetic Instructions
523 //-----------------------------------------------------------------------------
525 // Multiclass for generic vector calculation
526 let vx = ?, hasSideEffects = 0, Uses = [VL] in
527 multiclass RVbm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
528 dag dag_in, string disEnc = ""> {
529 let DisableEncoding = disEnc in
530 def "" : RV<opc, (outs RC:$vx), dag_in,
531 !strconcat(opcStr, " $vx", argStr)>;
532 let Constraints = "$vx = $base", DisableEncoding = disEnc#"$base",
534 def _v : RV<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
535 !strconcat(opcStr, " $vx", argStr)>;
537 multiclass RVlm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
539 defm "" : RVbm<opcStr, argStr, opc, RC, dag_in>;
540 let isCodeGenOnly = 1, VE_VLInUse = 1 in {
541 defm l : RVbm<opcStr, argStr, opc, RC, !con(dag_in, (ins I32:$vl)),
543 defm L : RVbm<opcStr, argStr, opc, RC, !con(dag_in, (ins VLS:$vl)),
547 multiclass RVmm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
548 RegisterClass RCM, dag dag_in> {
549 defm "" : RVlm<opcStr, argStr, opc, RC, dag_in>;
550 let m = ?, VE_VLWithMask = 1 in
551 defm m : RVlm<opcStr, argStr#", $m", opc, RC, !con(dag_in, (ins RCM:$m))>;
553 // Generic RV multiclass with 2 arguments.
554 // e.g. VADD, VSUB, VMPY, and etc.
555 let VE_VLIndex = 3 in
556 multiclass RVm<string opcStr, bits<8>opc, RegisterClass VRC, RegisterClass RC,
557 RegisterClass RCM, Operand SIMM = simm7> {
558 let cy = 0, sy = 0, vy = ?, vz = ? in
559 defm vv : RVmm<opcStr, ", $vy, $vz", opc, VRC, RCM, (ins VRC:$vy, VRC:$vz)>;
560 let cs = 1, vz = ? in
561 defm rv : RVmm<opcStr, ", $sy, $vz", opc, VRC, RCM, (ins RC:$sy, VRC:$vz)>;
562 let cs = 1, cy = 0, vz = ? in
563 defm iv : RVmm<opcStr, ", $sy, $vz", opc, VRC, RCM, (ins SIMM:$sy, VRC:$vz)>;
565 // Special RV multiclass with 2 arguments using cs2.
566 // e.g. VDIV, VDVS, and VDVX.
567 let VE_VLIndex = 3 in
568 multiclass RVDIVm<string opcStr, bits<8>opc, RegisterClass VRC,
569 RegisterClass RC, RegisterClass RCM, Operand SIMM = simm7> {
570 let cy = 0, sy = 0, vy = ?, vz = ? in
571 defm vv : RVmm<opcStr, ", $vy, $vz", opc, VRC, RCM, (ins VRC:$vy, VRC:$vz)>;
572 let cs2 = 1, vy = ? in
573 defm vr : RVmm<opcStr, ", $vy, $sy", opc, VRC, RCM, (ins VRC:$vy, RC:$sy)>;
574 let cs2 = 1, cy = 0, vy = ? in
575 defm vi : RVmm<opcStr, ", $vy, $sy", opc, VRC, RCM, (ins VRC:$vy, SIMM:$sy)>;
576 let cs = 1, vz = ? in
577 defm rv : RVmm<opcStr, ", $sy, $vz", opc, VRC, RCM, (ins RC:$sy, VRC:$vz)>;
578 let cs = 1, cy = 0, vz = ? in
579 defm iv : RVmm<opcStr, ", $sy, $vz", opc, VRC, RCM, (ins SIMM:$sy, VRC:$vz)>;
581 // Generic RV multiclass with 2 arguments for logical operations.
582 // e.g. VAND, VOR, VXOR, and etc.
583 let VE_VLIndex = 3 in
584 multiclass RVLm<string opcStr, bits<8>opc, RegisterClass ScaRC,
585 RegisterClass RC, RegisterClass RCM> {
586 let cy = 0, sy = 0, vy = ?, vz = ? in
587 defm vv : RVmm<opcStr, ", $vy, $vz", opc, RC, RCM, (ins RC:$vy, RC:$vz)>;
588 let cs = 1, vz = ? in
589 defm rv : RVmm<opcStr, ", $sy, $vz", opc, RC, RCM, (ins ScaRC:$sy, RC:$vz)>;
590 let cs = 1, cy = 0, vz = ? in
591 defm mv : RVmm<opcStr, ", $sy, $vz", opc, RC, RCM, (ins mimm:$sy, RC:$vz)>;
593 // Generic RV multiclass with 1 argument.
594 // e.g. VLDZ, VPCNT, and VBRV.
595 let VE_VLIndex = 2 in
596 multiclass RV1m<string opcStr, bits<8>opc, RegisterClass RC,
598 let cy = 0, sy = 0, vz = ? in
599 defm v : RVmm<opcStr, ", $vz", opc, RC, RCM, (ins RC:$vz)>;
601 // Generic RV multiclass with no argument.
603 let VE_VLIndex = 1 in
604 multiclass RV0m<string opcStr, bits<8>opc, RegisterClass RC,
606 let cy = 0, sy = 0 in
607 defm "" : RVmm<opcStr, "", opc, RC, RCM, (ins)>;
609 // Generic RV multiclass with 2 arguments for shift operations.
610 // e.g. VSLL, VSRL, VSLA, and etc.
611 let VE_VLIndex = 3 in
612 multiclass RVSm<string opcStr, bits<8>opc, RegisterClass ScaRC,
613 RegisterClass RC, RegisterClass RCM> {
614 let cy = 0, sy = 0, vy = ?, vz = ? in
615 defm vv : RVmm<opcStr, ", $vz, $vy", opc, RC, RCM, (ins RC:$vz, RC:$vy)>;
616 let cs = 1, vz = ? in
617 defm vr : RVmm<opcStr, ", $vz, $sy", opc, RC, RCM, (ins RC:$vz, ScaRC:$sy)>;
618 let cs = 1, cy = 0, vz = ? in
619 defm vi : RVmm<opcStr, ", $vz, $sy", opc, RC, RCM, (ins RC:$vz, uimm7:$sy)>;
621 // Generic RV multiclass with 3 arguments for shift operations.
622 // e.g. VSLD and VSRD.
623 let VE_VLIndex = 4 in
624 multiclass RVSDm<string opcStr, bits<8>opc, RegisterClass RC,
626 let vy = ?, vz = ? in
627 defm vvr : RVmm<opcStr, ", ($vy, ${vz}), $sy", opc, RC, RCM,
628 (ins RC:$vy, RC:$vz, I64:$sy)>;
629 let cy = 0, vy = ?, vz = ? in
630 defm vvi : RVmm<opcStr, ", ($vy, ${vz}), $sy", opc, RC, RCM,
631 (ins RC:$vy, RC:$vz, uimm7:$sy)>;
633 // Special RV multiclass with 3 arguments.
635 let VE_VLIndex = 4 in
636 multiclass RVSAm<string opcStr, bits<8>opc, RegisterClass RC,
638 let cz = 1, sz = ?, vz = ? in
639 defm vrr : RVmm<opcStr, ", $vz, $sy, $sz", opc, RC, RCM,
640 (ins RC:$vz, I64:$sy, I64:$sz)>;
641 let cz = 0, sz = ?, vz = ? in
642 defm vrm : RVmm<opcStr, ", $vz, $sy, $sz", opc, RC, RCM,
643 (ins RC:$vz, I64:$sy, mimm:$sz)>;
644 let cy = 0, cz = 1, sz = ?, vz = ? in
645 defm vir : RVmm<opcStr, ", $vz, $sy, $sz", opc, RC, RCM,
646 (ins RC:$vz, uimm3:$sy, I64:$sz)>;
647 let cy = 0, cz = 0, sz = ?, vz = ? in
648 defm vim : RVmm<opcStr, ", $vz, $sy, $sz", opc, RC, RCM,
649 (ins RC:$vz, uimm3:$sy, mimm:$sz)>;
651 // Generic RV multiclass with 1 argument using vy field.
652 // e.g. VFSQRT, VRCP, and VRSQRT.
653 let VE_VLIndex = 2 in
654 multiclass RVF1m<string opcStr, bits<8>opc, RegisterClass RC,
656 let cy = 0, sy = 0, vy = ? in
657 defm v : RVmm<opcStr, ", $vy", opc, RC, RCM, (ins RC:$vy)>;
659 // Special RV multiclass with 3 arguments using cs2.
660 // e.g. VFMAD, VFMSB, VFNMAD, and etc.
661 let VE_VLIndex = 4 in
662 multiclass RVMm<string opcStr, bits<8>opc, RegisterClass VRC, RegisterClass RC,
663 RegisterClass RCM, Operand SIMM = simm7> {
664 let cy = 0, sy = 0, vy = ?, vz = ?, vw = ? in
665 defm vvv : RVmm<opcStr, ", $vy, $vz, $vw", opc, VRC, RCM,
666 (ins VRC:$vy, VRC:$vz, VRC:$vw)>;
667 let cs2 = 1, vy = ?, vw = ? in
668 defm vrv : RVmm<opcStr, ", $vy, $sy, $vw", opc, VRC, RCM,
669 (ins VRC:$vy, RC:$sy, VRC:$vw)>;
670 let cs2 = 1, cy = 0, vy = ?, vw = ? in
671 defm viv : RVmm<opcStr, ", $vy, $sy, $vw", opc, VRC, RCM,
672 (ins VRC:$vy, SIMM:$sy, VRC:$vw)>;
673 let cs = 1, vz = ?, vw = ? in
674 defm rvv : RVmm<opcStr, ", $sy, $vz, $vw", opc, VRC, RCM,
675 (ins RC:$sy, VRC:$vz, VRC:$vw)>;
676 let cs = 1, cy = 0, vz = ?, vw = ? in
677 defm ivv : RVmm<opcStr, ", $sy, $vz, $vw", opc, VRC, RCM,
678 (ins SIMM:$sy, VRC:$vz, VRC:$vw)>;
680 // Special RV multiclass with 2 arguments for floating point conversions.
681 // e.g. VFIX and VFIXX
682 let hasSideEffects = 0, VE_VLIndex = 3 in
683 multiclass RVFIXm<string opcStr, bits<8> opc, RegisterClass RC,
685 let cy = 0, sy = 0, vy = ?, vz = ? in
686 defm v : RVmm<opcStr#"$vz", ", $vy", opc, RC, RCM, (ins RDOp:$vz, RC:$vy)>;
688 // Multiclass for generic iterative vector calculation
689 let vx = ?, hasSideEffects = 0, Uses = [VL] in
690 multiclass RVIbm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
691 dag dag_in, string disEnc = ""> {
692 let DisableEncoding = disEnc in
693 def "" : RV<opc, (outs RC:$vx), dag_in,
694 !strconcat(opcStr, " $vx", argStr)>;
695 let isCodeGenOnly = 1, Constraints = "$vx = $base", DisableEncoding = disEnc#"$base" in
696 def _v : RV<opc, (outs RC:$vx), !con(dag_in, (ins RC:$base)),
697 !strconcat(opcStr, " $vx", argStr)>;
699 multiclass RVIlm<string opcStr, string argStr, bits<8>opc, RegisterClass RC,
701 defm "" : RVIbm<opcStr, argStr, opc, RC, dag_in>;
702 let isCodeGenOnly = 1, VE_VLInUse = 1 in {
703 defm l : RVIbm<opcStr, argStr, opc, RC, !con(dag_in, (ins I32:$vl)),
705 defm L : RVIbm<opcStr, argStr, opc, RC, !con(dag_in, (ins VLS:$vl)),
709 // Generic RV multiclass for iterative operation with 2 argument.
710 // e.g. VFIA, VFIS, and VFIM
711 let VE_VLIndex = 3 in
712 multiclass RVI2m<string opcStr, bits<8>opc, RegisterClass VRC,
715 defm vr : RVIlm<opcStr, ", $vy, $sy", opc, VRC, (ins VRC:$vy, RC:$sy)>;
716 let cy = 0, vy = ? in
717 defm vi : RVIlm<opcStr, ", $vy, $sy", opc, VRC, (ins VRC:$vy, simm7fp:$sy)>;
719 // Generic RV multiclass for iterative operation with 3 argument.
720 // e.g. VFIAM, VFISM, VFIMA, and etc.
721 let VE_VLIndex = 4 in
722 multiclass RVI3m<string opcStr, bits<8>opc, RegisterClass VRC,
724 let vy = ?, vz = ? in
725 defm vvr : RVIlm<opcStr, ", $vy, $vz, $sy", opc, VRC,
726 (ins VRC:$vy, VRC:$vz, RC:$sy)>;
727 let cy = 0, vy = ?, vz = ? in
728 defm vvi : RVIlm<opcStr, ", $vy, $vz, $sy", opc, VRC,
729 (ins VRC:$vy, VRC:$vz, simm7fp:$sy)>;
731 // special RV multiclass with 3 arguments for VSHF.
733 let vy = ?, vz = ?, VE_VLIndex = 4 in
734 multiclass RVSHFm<string opcStr, bits<8>opc, RegisterClass RC,
735 Operand SIMM = uimm4> {
736 defm vvr : RVlm<opcStr, ", $vy, $vz, $sy", opc, RC,
737 (ins RC:$vy, RC:$vz, I64:$sy)>;
738 let cy = 0 in defm vvi : RVlm<opcStr, ", $vy, $vz, $sy", opc, RC,
739 (ins RC:$vy, RC:$vz, SIMM:$sy)>;
741 // Multiclass for generic mask calculation
742 let vx = ?, hasSideEffects = 0, Uses = [VL] in
743 multiclass RVMKbm<string opcStr, string argStr, bits<8>opc, dag dag_out,
745 def "" : RV<opc, dag_out, dag_in, !strconcat(opcStr, argStr)>;
746 let DisableEncoding = "$vl", isCodeGenOnly = 1, VE_VLInUse = 1 in {
747 def l : RV<opc, dag_out, !con(dag_in, (ins I32:$vl)),
748 !strconcat(opcStr, argStr)>;
749 def L : RV<opc, dag_out, !con(dag_in, (ins VLS:$vl)),
750 !strconcat(opcStr, argStr)>;
753 multiclass RVMKlm<string opcStr, string argStr, bits<8>opc, RegisterClass RCM,
755 defm "" : RVMKbm<opcStr, " $vx"#argStr, opc, (outs RCM:$vx), dag_in>;
756 let m = ?, VE_VLWithMask = 1 in
757 defm m : RVMKbm<opcStr, " $vx"#argStr#", $m", opc, (outs RCM:$vx),
758 !con(dag_in, (ins RCM:$m))>;
760 // Generic RV multiclass for mask calculation with a condition.
761 // e.g. VFMK, VFMS, and VFMF
762 let cy = 0, sy = 0 in
763 multiclass RVMKom<string opcStr, bits<8> opc, RegisterClass RC,
765 let vy = ?, vz = ?, VE_VLIndex = 3 in
766 defm v : RVMKlm<opcStr#"$vy", ", $vz", opc, RCM, (ins CCOp:$vy, RC:$vz)>;
767 let vy = 15 /* AT */, VE_VLIndex = 1 in
768 defm a : RVMKlm<opcStr#"at", "", opc, RCM, (ins)>;
769 let vy = 0 /* AF */, VE_VLIndex = 1 in
770 defm na : RVMKlm<opcStr#"af", "", opc, RCM, (ins)>;
772 multiclass RVMKm<string opcStr, bits<8> opc, RegisterClass RC,
774 defm "" : RVMKom<opcStr, opc, RC, RCM>;
776 // Generic RV multiclass for mask calculation with 2 arguments.
777 // e.g. ANDM, ORM, XORM, and etc.
778 let cy = 0, sy = 0, vx = ?, vy = ?, vz = ?, hasSideEffects = 0 in
779 multiclass RVM2m<string opcStr, bits<8> opc, RegisterClass RCM> {
780 def mm : RV<opc, (outs RCM:$vx), (ins RCM:$vy, RCM:$vz),
781 !strconcat(opcStr, " $vx, $vy, $vz")>;
783 // Generic RV multiclass for mask calculation with 1 argument.
785 let cy = 0, sy = 0, vx = ?, vy = ?, hasSideEffects = 0 in
786 multiclass RVM1m<string opcStr, bits<8> opc, RegisterClass RCM> {
787 def m : RV<opc, (outs RCM:$vx), (ins RCM:$vy),
788 !strconcat(opcStr, " $vx, $vy")>;
790 // Generic RV multiclass for mask calculation with 1 argument.
791 // e.g. PCVM, LZVM, and TOVM
792 let cy = 0, sy = 0, vy = ?, hasSideEffects = 0, Uses = [VL] in
793 multiclass RVMSbm<string opcStr, string argStr, bits<8>opc, dag dag_in> {
794 def "" : RV<opc, (outs I64:$sx), dag_in,
795 !strconcat(opcStr, " $sx,", argStr)> {
797 let Inst{54-48} = sx;
799 let DisableEncoding = "$vl", isCodeGenOnly = 1, VE_VLInUse = 1 in {
800 def l : RV<opc, (outs I64:$sx), !con(dag_in, (ins I32:$vl)),
801 !strconcat(opcStr, " $sx,", argStr)> {
803 let Inst{54-48} = sx;
805 def L : RV<opc, (outs I64:$sx), !con(dag_in, (ins VLS:$vl)),
806 !strconcat(opcStr, " $sx,", argStr)> {
808 let Inst{54-48} = sx;
812 let VE_VLIndex = 2 in
813 multiclass RVMSm<string opcStr, bits<8> opc, RegisterClass RCM> {
814 defm m : RVMSbm<opcStr, " $vy", opc, (ins RCM:$vy)>;
817 // Section 8.10.1 - VADD (Vector Add)
818 let cx = 0, cx2 = 0 in
819 defm VADDUL : RVm<"vaddu.l", 0xc8, V64, I64, VM>;
820 let cx = 0, cx2 = 1 in {
821 defm PVADDULO : RVm<"pvaddu.lo", 0xc8, V64, I32, VM>;
822 let isCodeGenOnly = 1 in
823 defm VADDUW : RVm<"vaddu.w", 0xc8, V64, I32, VM>;
825 let cx = 1, cx2 = 0 in
826 defm PVADDUUP : RVm<"pvaddu.up", 0xc8, V64, I64, VM>;
827 let cx = 1, cx2 = 1 in
828 defm PVADDU : RVm<"pvaddu", 0xc8, V64, I64, VM512>;
829 def : MnemonicAlias<"vaddu.w", "pvaddu.lo">;
831 // Section 8.10.2 - VADS (Vector Add Single)
832 let cx = 0, cx2 = 0 in
833 defm VADDSWSX : RVm<"vadds.w.sx", 0xca, V64, I32, VM>;
834 let cx = 0, cx2 = 1 in {
835 defm PVADDSLO : RVm<"pvadds.lo", 0xca, V64, I32, VM>;
836 let isCodeGenOnly = 1 in
837 defm VADDSWZX : RVm<"vadds.w.zx", 0xca, V64, I32, VM>;
839 let cx = 1, cx2 = 0 in
840 defm PVADDSUP : RVm<"pvadds.up", 0xca, V64, I64, VM>;
841 let cx = 1, cx2 = 1 in
842 defm PVADDS : RVm<"pvadds", 0xca, V64, I64, VM512>;
843 def : MnemonicAlias<"pvadds.lo.sx", "vadds.w.sx">;
844 def : MnemonicAlias<"vadds.w.zx", "pvadds.lo">;
845 def : MnemonicAlias<"vadds.w", "pvadds.lo">;
846 def : MnemonicAlias<"pvadds.lo.zx", "pvadds.lo">;
848 // Section 8.10.3 - VADX (Vector Add)
849 defm VADDSL : RVm<"vadds.l", 0x8b, V64, I64, VM>;
851 // Section 8.10.4 - VSUB (Vector Subtract)
852 let cx = 0, cx2 = 0 in
853 defm VSUBUL : RVm<"vsubu.l", 0xd8, V64, I64, VM>;
854 let cx = 0, cx2 = 1 in {
855 defm PVSUBULO : RVm<"pvsubu.lo", 0xd8, V64, I32, VM>;
856 let isCodeGenOnly = 1 in
857 defm VSUBUW : RVm<"vsubu.w", 0xd8, V64, I32, VM>;
859 let cx = 1, cx2 = 0 in
860 defm PVSUBUUP : RVm<"pvsubu.up", 0xd8, V64, I64, VM>;
861 let cx = 1, cx2 = 1 in
862 defm PVSUBU : RVm<"pvsubu", 0xd8, V64, I64, VM512>;
863 def : MnemonicAlias<"vsubu.w", "pvsubu.lo">;
865 // Section 8.10.5 - VSBS (Vector Subtract Single)
866 let cx = 0, cx2 = 0 in
867 defm VSUBSWSX : RVm<"vsubs.w.sx", 0xda, V64, I32, VM>;
868 let cx = 0, cx2 = 1 in {
869 defm PVSUBSLO : RVm<"pvsubs.lo", 0xda, V64, I32, VM>;
870 let isCodeGenOnly = 1 in
871 defm VSUBSWZX : RVm<"vsubs.w.zx", 0xda, V64, I32, VM>;
873 let cx = 1, cx2 = 0 in
874 defm PVSUBSUP : RVm<"pvsubs.up", 0xda, V64, I64, VM>;
875 let cx = 1, cx2 = 1 in
876 defm PVSUBS : RVm<"pvsubs", 0xda, V64, I64, VM512>;
877 def : MnemonicAlias<"pvsubs.lo.sx", "vsubs.w.sx">;
878 def : MnemonicAlias<"vsubs.w.zx", "pvsubs.lo">;
879 def : MnemonicAlias<"vsubs.w", "pvsubs.lo">;
880 def : MnemonicAlias<"pvsubs.lo.zx", "pvsubs.lo">;
882 // Section 8.10.6 - VSBX (Vector Subtract)
883 defm VSUBSL : RVm<"vsubs.l", 0x9b, V64, I64, VM>;
885 // Section 8.10.7 - VMPY (Vector Multiply)
887 defm VMULUL : RVm<"vmulu.l", 0xc9, V64, I64, VM>;
889 defm VMULUW : RVm<"vmulu.w", 0xc9, V64, I32, VM>;
891 // Section 8.10.8 - VMPS (Vector Multiply Single)
893 defm VMULSWSX : RVm<"vmuls.w.sx", 0xcb, V64, I32, VM>;
895 defm VMULSWZX : RVm<"vmuls.w.zx", 0xcb, V64, I32, VM>;
896 def : MnemonicAlias<"vmuls.w", "vmuls.w.zx">;
898 // Section 8.10.9 - VMPX (Vector Multiply)
899 defm VMULSL : RVm<"vmuls.l", 0xdb, V64, I64, VM>;
901 // Section 8.10.10 - VMPD (Vector Multiply)
902 defm VMULSLW : RVm<"vmuls.l.w", 0xd9, V64, I32, VM>;
904 // Section 8.10.11 - VDIV (Vector Divide)
906 defm VDIVUL : RVDIVm<"vdivu.l", 0xe9, V64, I64, VM>;
908 defm VDIVUW : RVDIVm<"vdivu.w", 0xe9, V64, I32, VM>;
910 // Section 8.10.12 - VDVS (Vector Divide Single)
912 defm VDIVSWSX : RVDIVm<"vdivs.w.sx", 0xeb, V64, I32, VM>;
914 defm VDIVSWZX : RVDIVm<"vdivs.w.zx", 0xeb, V64, I32, VM>;
915 def : MnemonicAlias<"vdivs.w", "vdivs.w.zx">;
917 // Section 8.10.13 - VDVX (Vector Divide)
918 defm VDIVSL : RVDIVm<"vdivs.l", 0xfb, V64, I64, VM>;
920 // Section 8.10.14 - VCMP (Vector Compare)
921 let cx = 0, cx2 = 0 in
922 defm VCMPUL : RVm<"vcmpu.l", 0xb9, V64, I64, VM>;
923 let cx = 0, cx2 = 1 in {
924 defm PVCMPULO : RVm<"pvcmpu.lo", 0xb9, V64, I32, VM>;
925 let isCodeGenOnly = 1 in
926 defm VCMPUW : RVm<"vcmpu.w", 0xb9, V64, I32, VM>;
928 let cx = 1, cx2 = 0 in
929 defm PVCMPUUP : RVm<"pvcmpu.up", 0xb9, V64, I64, VM>;
930 let cx = 1, cx2 = 1 in
931 defm PVCMPU : RVm<"pvcmpu", 0xb9, V64, I64, VM512>;
932 def : MnemonicAlias<"vcmpu.w", "pvcmpu.lo">;
934 // Section 8.10.15 - VCPS (Vector Compare Single)
935 let cx = 0, cx2 = 0 in
936 defm VCMPSWSX : RVm<"vcmps.w.sx", 0xfa, V64, I32, VM>;
937 let cx = 0, cx2 = 1 in {
938 defm PVCMPSLO : RVm<"pvcmps.lo", 0xfa, V64, I32, VM>;
939 let isCodeGenOnly = 1 in
940 defm VCMPSWZX : RVm<"vcmps.w.zx", 0xfa, V64, I32, VM>;
942 let cx = 1, cx2 = 0 in
943 defm PVCMPSUP : RVm<"pvcmps.up", 0xfa, V64, I64, VM>;
944 let cx = 1, cx2 = 1 in
945 defm PVCMPS : RVm<"pvcmps", 0xfa, V64, I64, VM512>;
946 def : MnemonicAlias<"pvcmps.lo.sx", "vcmps.w.sx">;
947 def : MnemonicAlias<"vcmps.w.zx", "pvcmps.lo">;
948 def : MnemonicAlias<"vcmps.w", "pvcmps.lo">;
949 def : MnemonicAlias<"pvcmps.lo.zx", "pvcmps.lo">;
951 // Section 8.10.16 - VCPX (Vector Compare)
952 defm VCMPSL : RVm<"vcmps.l", 0xba, V64, I64, VM>;
954 // Section 8.10.17 - VCMS (Vector Compare and Select Maximum/Minimum Single)
955 let cx = 0, cx2 = 0 in
956 defm VMAXSWSX : RVm<"vmaxs.w.sx", 0x8a, V64, I32, VM>;
957 let cx = 0, cx2 = 1 in {
958 defm PVMAXSLO : RVm<"pvmaxs.lo", 0x8a, V64, I32, VM>;
959 let isCodeGenOnly = 1 in
960 defm VMAXSWZX : RVm<"vmaxs.w.zx", 0x8a, V64, I32, VM>;
962 let cx = 1, cx2 = 0 in
963 defm PVMAXSUP : RVm<"pvmaxs.up", 0x8a, V64, I64, VM>;
964 let cx = 1, cx2 = 1 in
965 defm PVMAXS : RVm<"pvmaxs", 0x8a, V64, I64, VM512>;
967 let cx = 0, cx2 = 0 in
968 defm VMINSWSX : RVm<"vmins.w.sx", 0x8a, V64, I32, VM>;
969 let cx = 0, cx2 = 1 in {
970 defm PVMINSLO : RVm<"pvmins.lo", 0x8a, V64, I32, VM>;
971 let isCodeGenOnly = 1 in
972 defm VMINSWZX : RVm<"vmins.w.zx", 0x8a, V64, I32, VM>;
974 let cx = 1, cx2 = 0 in
975 defm PVMINSUP : RVm<"pvmins.up", 0x8a, V64, I64, VM>;
976 let cx = 1, cx2 = 1 in
977 defm PVMINS : RVm<"pvmins", 0x8a, V64, I64, VM512>;
979 def : MnemonicAlias<"pvmaxs.lo.sx", "vmaxs.w.sx">;
980 def : MnemonicAlias<"vmaxs.w.zx", "pvmaxs.lo">;
981 def : MnemonicAlias<"vmaxs.w", "pvmaxs.lo">;
982 def : MnemonicAlias<"pvmaxs.lo.zx", "pvmaxs.lo">;
983 def : MnemonicAlias<"pvmins.lo.sx", "vmins.w.sx">;
984 def : MnemonicAlias<"vmins.w.zx", "pvmins.lo">;
985 def : MnemonicAlias<"vmins.w", "pvmins.lo">;
986 def : MnemonicAlias<"pvmins.lo.zx", "pvmins.lo">;
988 // Section 8.10.18 - VCMX (Vector Compare and Select Maximum/Minimum)
989 defm VMAXSL : RVm<"vmaxs.l", 0x9a, V64, I64, VM>;
991 defm VMINSL : RVm<"vmins.l", 0x9a, V64, I64, VM>;
993 //-----------------------------------------------------------------------------
994 // Section 8.11 - Vector Logical Operation Instructions
995 //-----------------------------------------------------------------------------
997 // Section 8.11.1 - VAND (Vector And)
998 let cx = 0, cx2 = 0 in defm VAND : RVLm<"vand", 0xc4, I64, V64, VM>;
999 let cx = 0, cx2 = 1 in defm PVANDLO : RVLm<"pvand.lo", 0xc4, I32, V64, VM>;
1000 let cx = 1, cx2 = 0 in defm PVANDUP : RVLm<"pvand.up", 0xc4, F32, V64, VM>;
1001 let cx = 1, cx2 = 1 in defm PVAND : RVLm<"pvand", 0xc4, I64, V64, VM512>;
1003 // Section 8.11.2 - VOR (Vector Or)
1004 let cx = 0, cx2 = 0 in defm VOR : RVLm<"vor", 0xc5, I64, V64, VM>;
1005 let cx = 0, cx2 = 1 in defm PVORLO : RVLm<"pvor.lo", 0xc5, I32, V64, VM>;
1006 let cx = 1, cx2 = 0 in defm PVORUP : RVLm<"pvor.up", 0xc5, F32, V64, VM>;
1007 let cx = 1, cx2 = 1 in defm PVOR : RVLm<"pvor", 0xc5, I64, V64, VM512>;
1009 // Section 8.11.3 - VXOR (Vector Exclusive Or)
1010 let cx = 0, cx2 = 0 in defm VXOR : RVLm<"vxor", 0xc6, I64, V64, VM>;
1011 let cx = 0, cx2 = 1 in defm PVXORLO : RVLm<"pvxor.lo", 0xc6, I32, V64, VM>;
1012 let cx = 1, cx2 = 0 in defm PVXORUP : RVLm<"pvxor.up", 0xc6, F32, V64, VM>;
1013 let cx = 1, cx2 = 1 in defm PVXOR : RVLm<"pvxor", 0xc6, I64, V64, VM512>;
1015 // Section 8.11.4 - VEQV (Vector Equivalence)
1016 let cx = 0, cx2 = 0 in defm VEQV : RVLm<"veqv", 0xc7, I64, V64, VM>;
1017 let cx = 0, cx2 = 1 in defm PVEQVLO : RVLm<"pveqv.lo", 0xc7, I32, V64, VM>;
1018 let cx = 1, cx2 = 0 in defm PVEQVUP : RVLm<"pveqv.up", 0xc7, F32, V64, VM>;
1019 let cx = 1, cx2 = 1 in defm PVEQV : RVLm<"pveqv", 0xc7, I64, V64, VM512>;
1021 // Section 8.11.5 - VLDZ (Vector Leading Zero Count)
1022 let cx = 0, cx2 = 0 in defm VLDZ : RV1m<"vldz", 0xe7, V64, VM>;
1023 let cx = 0, cx2 = 1 in defm PVLDZLO : RV1m<"pvldz.lo", 0xe7, V64, VM>;
1024 let cx = 1, cx2 = 0 in defm PVLDZUP : RV1m<"pvldz.up", 0xe7, V64, VM>;
1025 let cx = 1, cx2 = 1 in defm PVLDZ : RV1m<"pvldz", 0xe7, V64, VM512>;
1027 // Section 8.11.6 - VPCNT (Vector Population Count)
1028 let cx = 0, cx2 = 0 in defm VPCNT : RV1m<"vpcnt", 0xac, V64, VM>;
1029 let cx = 0, cx2 = 1 in defm PVPCNTLO : RV1m<"pvpcnt.lo", 0xac, V64, VM>;
1030 let cx = 1, cx2 = 0 in defm PVPCNTUP : RV1m<"pvpcnt.up", 0xac, V64, VM>;
1031 let cx = 1, cx2 = 1 in defm PVPCNT : RV1m<"pvpcnt", 0xac, V64, VM512>;
1033 // Section 8.11.7 - VBRV (Vector Bit Reverse)
1034 let cx = 0, cx2 = 0 in defm VBRV : RV1m<"vbrv", 0xf7, V64, VM>;
1035 let cx = 0, cx2 = 1 in defm PVBRVLO : RV1m<"pvbrv.lo", 0xf7, V64, VM>;
1036 let cx = 1, cx2 = 0 in defm PVBRVUP : RV1m<"pvbrv.up", 0xf7, V64, VM>;
1037 let cx = 1, cx2 = 1 in defm PVBRV : RV1m<"pvbrv", 0xf7, V64, VM512>;
1039 // Section 8.11.8 - VSEQ (Vector Sequential Number)
1040 let cx = 0, cx2 = 0 in defm VSEQ : RV0m<"vseq", 0x99, V64, VM>;
1041 let cx = 0, cx2 = 1 in defm PVSEQLO : RV0m<"pvseq.lo", 0x99, V64, VM>;
1042 let cx = 1, cx2 = 0 in defm PVSEQUP : RV0m<"pvseq.up", 0x99, V64, VM>;
1043 let cx = 1, cx2 = 1 in defm PVSEQ : RV0m<"pvseq", 0x99, V64, VM512>;
1045 //-----------------------------------------------------------------------------
1046 // Section 8.12 - Vector Shift Operation Instructions
1047 //-----------------------------------------------------------------------------
1049 // Section 8.12.1 - VSLL (Vector Shift Left Logical)
1050 let cx = 0, cx2 = 0 in defm VSLL : RVSm<"vsll", 0xe5, I64, V64, VM>;
1051 let cx = 0, cx2 = 1 in defm PVSLLLO : RVSm<"pvsll.lo", 0xe5, I32, V64, VM>;
1052 let cx = 1, cx2 = 0 in defm PVSLLUP : RVSm<"pvsll.up", 0xe5, F32, V64, VM>;
1053 let cx = 1, cx2 = 1 in defm PVSLL : RVSm<"pvsll", 0xe5, I64, V64, VM512>;
1055 // Section 8.12.2 - VSLD (Vector Shift Left Double)
1056 defm VSLD : RVSDm<"vsld", 0xe4, V64, VM>;
1058 // Section 8.12.3 - VSRL (Vector Shift Right Logical)
1059 let cx = 0, cx2 = 0 in defm VSRL : RVSm<"vsrl", 0xf5, I64, V64, VM>;
1060 let cx = 0, cx2 = 1 in defm PVSRLLO : RVSm<"pvsrl.lo", 0xf5, I32, V64, VM>;
1061 let cx = 1, cx2 = 0 in defm PVSRLUP : RVSm<"pvsrl.up", 0xf5, F32, V64, VM>;
1062 let cx = 1, cx2 = 1 in defm PVSRL : RVSm<"pvsrl", 0xf5, I64, V64, VM512>;
1064 // Section 8.12.4 - VSRD (Vector Shift Right Double)
1065 defm VSRD : RVSDm<"vsrd", 0xf4, V64, VM>;
1067 // Section 8.12.5 - VSLA (Vector Shift Left Arithmetic)
1068 let cx = 0, cx2 = 0 in defm VSLAWSX : RVSm<"vsla.w.sx", 0xe6, I32, V64, VM>;
1069 let cx = 0, cx2 = 1 in {
1070 defm PVSLALO : RVSm<"pvsla.lo", 0xe6, I32, V64, VM>;
1071 let isCodeGenOnly = 1 in defm VSLAWZX : RVSm<"vsla.w.zx", 0xe6, I32, V64, VM>;
1073 let cx = 1, cx2 = 0 in defm PVSLAUP : RVSm<"pvsla.up", 0xe6, F32, V64, VM>;
1074 let cx = 1, cx2 = 1 in defm PVSLA : RVSm<"pvsla", 0xe6, I64, V64, VM512>;
1075 def : MnemonicAlias<"pvsla.lo.sx", "vsla.w.sx">;
1076 def : MnemonicAlias<"vsla.w.zx", "pvsla.lo">;
1077 def : MnemonicAlias<"vsla.w", "pvsla.lo">;
1078 def : MnemonicAlias<"pvsla.lo.zx", "pvsla.lo">;
1080 // Section 8.12.6 - VSLAX (Vector Shift Left Arithmetic)
1081 defm VSLAL : RVSm<"vsla.l", 0xd4, I64, V64, VM>;
1083 // Section 8.12.7 - VSRA (Vector Shift Right Arithmetic)
1084 let cx = 0, cx2 = 0 in defm VSRAWSX : RVSm<"vsra.w.sx", 0xf6, I32, V64, VM>;
1085 let cx = 0, cx2 = 1 in {
1086 defm PVSRALO : RVSm<"pvsra.lo", 0xf6, I32, V64, VM>;
1087 let isCodeGenOnly = 1 in defm VSRAWZX : RVSm<"vsra.w.zx", 0xf6, I32, V64, VM>;
1089 let cx = 1, cx2 = 0 in defm PVSRAUP : RVSm<"pvsra.up", 0xf6, F32, V64, VM>;
1090 let cx = 1, cx2 = 1 in defm PVSRA : RVSm<"pvsra", 0xf6, I64, V64, VM512>;
1091 def : MnemonicAlias<"pvsra.lo.sx", "vsra.w.sx">;
1092 def : MnemonicAlias<"vsra.w.zx", "pvsra.lo">;
1093 def : MnemonicAlias<"vsra.w", "pvsra.lo">;
1094 def : MnemonicAlias<"pvsra.lo.zx", "pvsra.lo">;
1096 // Section 8.12.8 - VSRAX (Vector Shift Right Arithmetic)
1097 defm VSRAL : RVSm<"vsra.l", 0xd5, I64, V64, VM>;
1099 // Section 8.12.9 - VSFA (Vector Shift Left and Add)
1100 defm VSFA : RVSAm<"vsfa", 0xd7, V64, VM>;
1102 //-----------------------------------------------------------------------------
1103 // Section 8.13 - Vector Floating-Point Arithmetic Instructions
1104 //-----------------------------------------------------------------------------
1106 // Section 8.13.1 - VFAD (Vector Floating Add)
1107 let cx = 0, cx2 = 0 in
1108 defm VFADDD : RVm<"vfadd.d", 0xcc, V64, I64, VM, simm7fp>;
1109 let cx = 0, cx2 = 1 in
1110 defm PVFADDLO : RVm<"pvfadd.lo", 0xcc, V64, I64, VM, simm7fp>;
1111 let cx = 1, cx2 = 0 in {
1112 defm PVFADDUP : RVm<"pvfadd.up", 0xcc, V64, F32, VM, simm7fp>;
1113 let isCodeGenOnly = 1 in
1114 defm VFADDS : RVm<"vfadd.s", 0xcc, V64, F32, VM, simm7fp>;
1116 let cx = 1, cx2 = 1 in
1117 defm PVFADD : RVm<"pvfadd", 0xcc, V64, I64, VM512, simm7fp>;
1118 def : MnemonicAlias<"vfadd.s", "pvfadd.up">;
1120 // Section 8.13.2 - VFSB (Vector Floating Subtract)
1121 let cx = 0, cx2 = 0 in
1122 defm VFSUBD : RVm<"vfsub.d", 0xdc, V64, I64, VM, simm7fp>;
1123 let cx = 0, cx2 = 1 in
1124 defm PVFSUBLO : RVm<"pvfsub.lo", 0xdc, V64, I64, VM, simm7fp>;
1125 let cx = 1, cx2 = 0 in {
1126 defm PVFSUBUP : RVm<"pvfsub.up", 0xdc, V64, F32, VM, simm7fp>;
1127 let isCodeGenOnly = 1 in
1128 defm VFSUBS : RVm<"vfsub.s", 0xdc, V64, F32, VM, simm7fp>;
1130 let cx = 1, cx2 = 1 in
1131 defm PVFSUB : RVm<"pvfsub", 0xdc, V64, I64, VM512, simm7fp>;
1132 def : MnemonicAlias<"vfsub.s", "pvfsub.up">;
1134 // Section 8.13.3 - VFMP (Vector Floating Multiply)
1135 let cx = 0, cx2 = 0 in
1136 defm VFMULD : RVm<"vfmul.d", 0xcd, V64, I64, VM, simm7fp>;
1137 let cx = 0, cx2 = 1 in
1138 defm PVFMULLO : RVm<"pvfmul.lo", 0xcd, V64, I64, VM, simm7fp>;
1139 let cx = 1, cx2 = 0 in {
1140 defm PVFMULUP : RVm<"pvfmul.up", 0xcd, V64, F32, VM, simm7fp>;
1141 let isCodeGenOnly = 1 in
1142 defm VFMULS : RVm<"vfmul.s", 0xcd, V64, F32, VM, simm7fp>;
1144 let cx = 1, cx2 = 1 in
1145 defm PVFMUL : RVm<"pvfmul", 0xcd, V64, I64, VM512, simm7fp>;
1146 def : MnemonicAlias<"vfmul.s", "pvfmul.up">;
1148 // Section 8.13.4 - VFDV (Vector Floating Divide)
1149 defm VFDIVD : RVDIVm<"vfdiv.d", 0xdd, V64, I64, VM, simm7fp>;
1151 defm VFDIVS : RVDIVm<"vfdiv.s", 0xdd, V64, F32, VM, simm7fp>;
1153 // Section 8.13.5 - VFSQRT (Vector Floating Square Root)
1154 defm VFSQRTD : RVF1m<"vfsqrt.d", 0xed, V64, VM>;
1156 defm VFSQRTS : RVF1m<"vfsqrt.s", 0xed, V64, VM>;
1158 // Section 8.13.6 - VFCP (Vector Floating Compare)
1159 let cx = 0, cx2 = 0 in
1160 defm VFCMPD : RVm<"vfcmp.d", 0xfc, V64, I64, VM, simm7fp>;
1161 let cx = 0, cx2 = 1 in
1162 defm PVFCMPLO : RVm<"pvfcmp.lo", 0xfc, V64, I64, VM, simm7fp>;
1163 let cx = 1, cx2 = 0 in {
1164 defm PVFCMPUP : RVm<"pvfcmp.up", 0xfc, V64, F32, VM, simm7fp>;
1165 let isCodeGenOnly = 1 in
1166 defm VFCMPS : RVm<"vfcmp.s", 0xfc, V64, F32, VM, simm7fp>;
1168 let cx = 1, cx2 = 1 in
1169 defm PVFCMP : RVm<"pvfcmp", 0xfc, V64, I64, VM512, simm7fp>;
1170 def : MnemonicAlias<"vfcmp.s", "pvfcmp.up">;
1172 // Section 8.13.7 - VFCM (Vector Floating Compare and Select Maximum/Minimum)
1173 let cx = 0, cx2 = 0 in
1174 defm VFMAXD : RVm<"vfmax.d", 0xbd, V64, I64, VM, simm7fp>;
1175 let cx = 0, cx2 = 1 in
1176 defm PVFMAXLO : RVm<"pvfmax.lo", 0xbd, V64, I64, VM, simm7fp>;
1177 let cx = 1, cx2 = 0 in {
1178 defm PVFMAXUP : RVm<"pvfmax.up", 0xbd, V64, F32, VM, simm7fp>;
1179 let isCodeGenOnly = 1 in
1180 defm VFMAXS : RVm<"vfmax.s", 0xbd, V64, F32, VM, simm7fp>;
1182 let cx = 1, cx2 = 1 in
1183 defm PVFMAX : RVm<"pvfmax", 0xbd, V64, I64, VM512, simm7fp>;
1185 let cx = 0, cx2 = 0 in
1186 defm VFMIND : RVm<"vfmin.d", 0xbd, V64, I64, VM, simm7fp>;
1187 let cx = 0, cx2 = 1 in
1188 defm PVFMINLO : RVm<"pvfmin.lo", 0xbd, V64, I64, VM, simm7fp>;
1189 let cx = 1, cx2 = 0 in {
1190 defm PVFMINUP : RVm<"pvfmin.up", 0xbd, V64, F32, VM, simm7fp>;
1191 let isCodeGenOnly = 1 in
1192 defm VFMINS : RVm<"vfmin.s", 0xbd, V64, F32, VM, simm7fp>;
1194 let cx = 1, cx2 = 1 in
1195 defm PVFMIN : RVm<"pvfmin", 0xbd, V64, I64, VM512, simm7fp>;
1197 def : MnemonicAlias<"vfmax.s", "pvfmax.up">;
1198 def : MnemonicAlias<"vfmin.s", "pvfmin.up">;
1200 // Section 8.13.8 - VFMAD (Vector Floating Fused Multiply Add)
1201 let cx = 0, cx2 = 0 in
1202 defm VFMADD : RVMm<"vfmad.d", 0xe2, V64, I64, VM, simm7fp>;
1203 let cx = 0, cx2 = 1 in
1204 defm PVFMADLO : RVMm<"pvfmad.lo", 0xe2, V64, I64, VM, simm7fp>;
1205 let cx = 1, cx2 = 0 in {
1206 defm PVFMADUP : RVMm<"pvfmad.up", 0xe2, V64, F32, VM, simm7fp>;
1207 let isCodeGenOnly = 1 in
1208 defm VFMADS : RVMm<"vfmad.s", 0xe2, V64, F32, VM, simm7fp>;
1210 let cx = 1, cx2 = 1 in
1211 defm PVFMAD : RVMm<"pvfmad", 0xe2, V64, I64, VM512, simm7fp>;
1212 def : MnemonicAlias<"vfmad.s", "pvfmad.up">;
1214 // Section 8.13.9 - VFMSB (Vector Floating Fused Multiply Subtract)
1215 let cx = 0, cx2 = 0 in
1216 defm VFMSBD : RVMm<"vfmsb.d", 0xf2, V64, I64, VM, simm7fp>;
1217 let cx = 0, cx2 = 1 in
1218 defm PVFMSBLO : RVMm<"pvfmsb.lo", 0xf2, V64, I64, VM, simm7fp>;
1219 let cx = 1, cx2 = 0 in {
1220 defm PVFMSBUP : RVMm<"pvfmsb.up", 0xf2, V64, F32, VM, simm7fp>;
1221 let isCodeGenOnly = 1 in
1222 defm VFMSBS : RVMm<"vfmsb.s", 0xf2, V64, F32, VM, simm7fp>;
1224 let cx = 1, cx2 = 1 in
1225 defm PVFMSB : RVMm<"pvfmsb", 0xf2, V64, I64, VM512, simm7fp>;
1226 def : MnemonicAlias<"vfmsb.s", "pvfmsb.up">;
1228 // Section 8.13.10 - VFNMAD (Vector Floating Fused Negative Multiply Add)
1229 let cx = 0, cx2 = 0 in
1230 defm VFNMADD : RVMm<"vfnmad.d", 0xe3, V64, I64, VM, simm7fp>;
1231 let cx = 0, cx2 = 1 in
1232 defm PVFNMADLO : RVMm<"pvfnmad.lo", 0xe3, V64, I64, VM, simm7fp>;
1233 let cx = 1, cx2 = 0 in {
1234 defm PVFNMADUP : RVMm<"pvfnmad.up", 0xe3, V64, F32, VM, simm7fp>;
1235 let isCodeGenOnly = 1 in
1236 defm VFNMADS : RVMm<"vfnmad.s", 0xe3, V64, F32, VM, simm7fp>;
1238 let cx = 1, cx2 = 1 in
1239 defm PVFNMAD : RVMm<"pvfnmad", 0xe3, V64, I64, VM512, simm7fp>;
1240 def : MnemonicAlias<"vfnmad.s", "pvfnmad.up">;
1242 // Section 8.13.11 - VFNMSB (Vector Floating Fused Negative Multiply Subtract)
1243 let cx = 0, cx2 = 0 in
1244 defm VFNMSBD : RVMm<"vfnmsb.d", 0xf3, V64, I64, VM, simm7fp>;
1245 let cx = 0, cx2 = 1 in
1246 defm PVFNMSBLO : RVMm<"pvfnmsb.lo", 0xf3, V64, I64, VM, simm7fp>;
1247 let cx = 1, cx2 = 0 in {
1248 defm PVFNMSBUP : RVMm<"pvfnmsb.up", 0xf3, V64, F32, VM, simm7fp>;
1249 let isCodeGenOnly = 1 in
1250 defm VFNMSBS : RVMm<"vfnmsb.s", 0xf3, V64, F32, VM, simm7fp>;
1252 let cx = 1, cx2 = 1 in
1253 defm PVFNMSB : RVMm<"pvfnmsb", 0xf3, V64, I64, VM512, simm7fp>;
1254 def : MnemonicAlias<"vfnmsb.s", "pvfnmsb.up">;
1256 // Section 8.13.12 - VRCP (Vector Floating Reciprocal)
1257 let cx = 0, cx2 = 0 in defm VRCPD : RVF1m<"vrcp.d", 0xe1, V64, VM>;
1258 let cx = 0, cx2 = 1 in defm PVRCPLO : RVF1m<"pvrcp.lo", 0xe1, V64, VM>;
1259 let cx = 1, cx2 = 0 in {
1260 defm PVRCPUP : RVF1m<"pvrcp.up", 0xe1, V64, VM>;
1261 let isCodeGenOnly = 1 in defm VRCPS : RVF1m<"vrcp.s", 0xe1, V64, VM>;
1263 let cx = 1, cx2 = 1 in defm PVRCP : RVF1m<"pvrcp", 0xe1, V64, VM512>;
1264 def : MnemonicAlias<"vrcp.s", "pvrcp.up">;
1266 // Section 8.13.13 - VRSQRT (Vector Floating Reciprocal Square Root)
1267 let cx = 0, cx2 = 0 in defm VRSQRTD : RVF1m<"vrsqrt.d", 0xf1, V64, VM>;
1268 let cx = 0, cx2 = 1 in defm PVRSQRTLO : RVF1m<"pvrsqrt.lo", 0xf1, V64, VM>;
1269 let cx = 1, cx2 = 0 in {
1270 defm PVRSQRTUP : RVF1m<"pvrsqrt.up", 0xf1, V64, VM>;
1271 let isCodeGenOnly = 1 in
1272 defm VRSQRTS : RVF1m<"vrsqrt.s", 0xf1, V64, VM>;
1274 let cx = 1, cx2 = 1 in
1275 defm PVRSQRT : RVF1m<"pvrsqrt", 0xf1, V64, VM512>;
1277 let cx = 0, cx2 = 0 in
1278 defm VRSQRTDNEX : RVF1m<"vrsqrt.d.nex", 0xf1, V64, VM>;
1279 let cx = 0, cx2 = 1 in
1280 defm PVRSQRTLONEX : RVF1m<"pvrsqrt.lo.nex", 0xf1, V64, VM>;
1281 let cx = 1, cx2 = 0 in {
1282 defm PVRSQRTUPNEX : RVF1m<"pvrsqrt.up.nex", 0xf1, V64, VM>;
1283 let isCodeGenOnly = 1 in
1284 defm VRSQRTSNEX : RVF1m<"vrsqrt.s.nex", 0xf1, V64, VM>;
1286 let cx = 1, cx2 = 1 in
1287 defm PVRSQRTNEX : RVF1m<"pvrsqrt.nex", 0xf1, V64, VM512>;
1289 def : MnemonicAlias<"vrsqrt.s", "pvrsqrt.up">;
1290 def : MnemonicAlias<"vrsqrt.s.nex", "pvrsqrt.up.nex">;
1292 // Section 8.13.14 - VFIX (Vector Convert to Fixed Pointer)
1293 let cx = 0, cx2 = 0, cs2 = 0 in
1294 defm VCVTWDSX : RVFIXm<"vcvt.w.d.sx", 0xe8, V64, VM>;
1295 let cx = 0, cx2 = 1, cs2 = 0 in
1296 defm VCVTWDZX : RVFIXm<"vcvt.w.d.zx", 0xe8, V64, VM>;
1297 let cx = 1, cx2 = 0, cs2 = 0 in
1298 defm VCVTWSSX : RVFIXm<"vcvt.w.s.sx", 0xe8, V64, VM>;
1299 let cx = 1, cx2 = 1, cs2 = 0 in
1300 defm VCVTWSZX : RVFIXm<"vcvt.w.s.zx", 0xe8, V64, VM>;
1301 let cx = 0, cx2 = 1, cs2 = 1 in
1302 defm PVCVTWSLO : RVFIXm<"pvcvt.w.s.lo", 0xe8, V64, VM>;
1303 let cx = 1, cx2 = 0, cs2 = 1 in
1304 defm PVCVTWSUP : RVFIXm<"pvcvt.w.s.up", 0xe8, V64, VM>;
1305 let cx = 1, cx2 = 1, cs2 = 1 in
1306 defm PVCVTWS : RVFIXm<"pvcvt.w.s", 0xe8, V64, VM512>;
1308 // Section 8.13.15 - VFIXX (Vector Convert to Fixed Pointer)
1309 defm VCVTLD : RVFIXm<"vcvt.l.d", 0xa8, V64, VM>;
1311 // Section 8.13.16 - VFLT (Vector Convert to Floating Pointer)
1312 let cx = 0, cx2 = 0, cs2 = 0 in
1313 defm VCVTDW : RVF1m<"vcvt.d.w", 0xf8, V64, VM>;
1314 let cx = 1, cx2 = 0, cs2 = 0 in
1315 defm VCVTSW : RVF1m<"vcvt.s.w", 0xf8, V64, VM>;
1316 let cx = 0, cx2 = 1, cs2 = 1 in
1317 defm PVCVTSWLO : RVF1m<"pvcvt.s.w.lo", 0xf8, V64, VM>;
1318 let cx = 1, cx2 = 0, cs2 = 1 in
1319 defm PVCVTSWUP : RVF1m<"pvcvt.s.w.up", 0xf8, V64, VM>;
1320 let cx = 1, cx2 = 1, cs2 = 1 in
1321 defm PVCVTSW : RVF1m<"pvcvt.s.w", 0xf8, V64, VM512>;
1323 // Section 8.13.17 - VFLTX (Vector Convert to Floating Pointer)
1324 defm VCVTDL : RVF1m<"vcvt.d.l", 0xb8, V64, VM>;
1326 // Section 8.13.18 - VCVS (Vector Convert to Single-format)
1327 defm VCVTSD : RVF1m<"vcvt.s.d", 0x9f, V64, VM>;
1329 // Section 8.13.19 - VCVD (Vector Convert to Double-format)
1330 defm VCVTDS : RVF1m<"vcvt.d.s", 0x8f, V64, VM>;
1332 //-----------------------------------------------------------------------------
1333 // Section 8.14 - Vector Reduction Instructions
1334 //-----------------------------------------------------------------------------
1336 // Section 8.14.1 - VSUMS (Vector Sum Single)
1337 defm VSUMWSX : RVF1m<"vsum.w.sx", 0xea, V64, VM>;
1338 let cx2 = 1 in defm VSUMWZX : RVF1m<"vsum.w.zx", 0xea, V64, VM>;
1340 // Section 8.14.2 - VSUMX (Vector Sum)
1341 defm VSUML : RVF1m<"vsum.l", 0xaa, V64, VM>;
1343 // Section 8.14.3 - VFSUM (Vector Floating Sum)
1344 defm VFSUMD : RVF1m<"vfsum.d", 0xec, V64, VM>;
1345 let cx = 1 in defm VFSUMS : RVF1m<"vfsum.s", 0xec, V64, VM>;
1347 // Section 8.14.4 - VMAXS (Vector Maximum/Minimum Single)
1348 let cx2 = 0 in defm VRMAXSWFSTSX : RVF1m<"vrmaxs.w.fst.sx", 0xbb, V64, VM>;
1349 let cx2 = 1 in defm VRMAXSWFSTZX : RVF1m<"vrmaxs.w.fst.zx", 0xbb, V64, VM>;
1352 defm VRMAXSWLSTSX : RVF1m<"vrmaxs.w.lst.sx", 0xbb, V64, VM>;
1354 defm VRMAXSWLSTZX : RVF1m<"vrmaxs.w.lst.zx", 0xbb, V64, VM>;
1358 defm VRMINSWFSTSX : RVF1m<"vrmins.w.fst.sx", 0xbb, V64, VM>;
1360 defm VRMINSWFSTZX : RVF1m<"vrmins.w.fst.zx", 0xbb, V64, VM>;
1363 defm VRMINSWLSTSX : RVF1m<"vrmins.w.lst.sx", 0xbb, V64, VM>;
1365 defm VRMINSWLSTZX : RVF1m<"vrmins.w.lst.zx", 0xbb, V64, VM>;
1369 // Section 8.14.5 - VMAXX (Vector Maximum/Minimum)
1370 let cs = 0 in defm VRMAXSLFST : RVF1m<"vrmaxs.l.fst", 0xab, V64, VM>;
1371 let cs = 1 in defm VRMAXSLLST : RVF1m<"vrmaxs.l.lst", 0xab, V64, VM>;
1373 let cs = 0 in defm VRMINSLFST : RVF1m<"vrmins.l.fst", 0xab, V64, VM>;
1374 let cs = 1 in defm VRMINSLLST : RVF1m<"vrmins.l.lst", 0xab, V64, VM>;
1377 // Section 8.14.6 - VFMAX (Vector Floating Maximum/Minimum)
1378 let cs = 0 in defm VFRMAXDFST : RVF1m<"vfrmax.d.fst", 0xad, V64, VM>;
1379 let cs = 1 in defm VFRMAXDLST : RVF1m<"vfrmax.d.lst", 0xad, V64, VM>;
1381 let cs = 0 in defm VFRMINDFST : RVF1m<"vfrmin.d.fst", 0xad, V64, VM>;
1382 let cs = 1 in defm VFRMINDLST : RVF1m<"vfrmin.d.lst", 0xad, V64, VM>;
1385 let cs = 0 in defm VFRMAXSFST : RVF1m<"vfrmax.s.fst", 0xad, V64, VM>;
1386 let cs = 1 in defm VFRMAXSLST : RVF1m<"vfrmax.s.lst", 0xad, V64, VM>;
1388 let cs = 0 in defm VFRMINSFST : RVF1m<"vfrmin.s.fst", 0xad, V64, VM>;
1389 let cs = 1 in defm VFRMINSLST : RVF1m<"vfrmin.s.lst", 0xad, V64, VM>;
1393 // Section 8.14.7 - VRAND (Vector Reduction And)
1394 defm VRAND : RVF1m<"vrand", 0x88, V64, VM>;
1396 // Section 8.14.8 - VROR (Vector Reduction Or)
1397 defm VROR : RVF1m<"vror", 0x98, V64, VM>;
1399 // Section 8.14.9 - VRXOR (Vector Reduction Exclusive Or)
1400 defm VRXOR : RVF1m<"vrxor", 0x89, V64, VM>;
1402 //-----------------------------------------------------------------------------
1403 // Section 8.15 - Vector Iterative Operation Instructions
1404 //-----------------------------------------------------------------------------
1406 // Section 8.15.1 - VFIA (Vector Floating Iteration Add)
1407 let cx = 0 in defm VFIAD : RVI2m<"vfia.d", 0xce, V64, I64>;
1408 let cx = 1 in defm VFIAS : RVI2m<"vfia.s", 0xce, V64, F32>;
1410 // Section 8.15.2 - VFIS (Vector Floating Iteration Subtract)
1411 let cx = 0 in defm VFISD : RVI2m<"vfis.d", 0xde, V64, I64>;
1412 let cx = 1 in defm VFISS : RVI2m<"vfis.s", 0xde, V64, F32>;
1414 // Section 8.15.3 - VFIM (Vector Floating Iteration Multiply)
1415 let cx = 0 in defm VFIMD : RVI2m<"vfim.d", 0xcf, V64, I64>;
1416 let cx = 1 in defm VFIMS : RVI2m<"vfim.s", 0xcf, V64, F32>;
1418 // Section 8.15.4 - VFIAM (Vector Floating Iteration Add and Multiply)
1419 let cx = 0 in defm VFIAMD : RVI3m<"vfiam.d", 0xee, V64, I64>;
1420 let cx = 1 in defm VFIAMS : RVI3m<"vfiam.s", 0xee, V64, F32>;
1422 // Section 8.15.5 - VFISM (Vector Floating Iteration Subtract and Multiply)
1423 let cx = 0 in defm VFISMD : RVI3m<"vfism.d", 0xfe, V64, I64>;
1424 let cx = 1 in defm VFISMS : RVI3m<"vfism.s", 0xfe, V64, F32>;
1426 // Section 8.15.6 - VFIMA (Vector Floating Iteration Multiply and Add)
1427 let cx = 0 in defm VFIMAD : RVI3m<"vfima.d", 0xef, V64, I64>;
1428 let cx = 1 in defm VFIMAS : RVI3m<"vfima.s", 0xef, V64, F32>;
1430 // Section 8.15.7 - VFIMS (Vector Floating Iteration Multiply and Subtract)
1431 let cx = 0 in defm VFIMSD : RVI3m<"vfims.d", 0xff, V64, I64>;
1432 let cx = 1 in defm VFIMSS : RVI3m<"vfims.s", 0xff, V64, F32>;
1434 //-----------------------------------------------------------------------------
1435 // Section 8.16 - Vector Merger Operation Instructions
1436 //-----------------------------------------------------------------------------
1438 // Section 8.16.1 - VMRG (Vector Merge)
1439 let cx = 0 in defm VMRG : RVm<"vmrg", 0xd6, V64, I64, VM>;
1440 // FIXME: vmrg.w should be called as pvmrg, but following assembly manual.
1441 let cx = 1 in defm VMRGW : RVm<"vmrg.w", 0xd6, V64, I64, VM512>;
1442 def : MnemonicAlias<"vmrg.l", "vmrg">;
1444 // Section 8.16.2 - VSHF (Vector Shuffle)
1445 defm VSHF : RVSHFm<"vshf", 0xbc, V64>;
1447 // Section 8.16.3 - VCP (Vector Compress)
1448 defm VCP : RV1m<"vcp", 0x8d, V64, VM>;
1450 // Section 8.16.4 - VEX (Vector Expand)
1451 defm VEX : RV1m<"vex", 0x9d, V64, VM>;
1453 //-----------------------------------------------------------------------------
1454 // Section 8.17 - Vector Mask Operation Instructions
1455 //-----------------------------------------------------------------------------
1457 // Section 8.17.1 - VFMK (Vector Form Mask)
1458 defm VFMKL : RVMKm<"vfmk.l.", 0xb4, V64, VM>;
1459 def : MnemonicAlias<"vfmk.l", "vfmk.l.at">;
1461 // Section 8.17.2 - VFMS (Vector Form Mask Single)
1462 defm VFMKW : RVMKm<"vfmk.w.", 0xb5, V64, VM>;
1463 let isCodeGenOnly = 1 in defm PVFMKWLO : RVMKm<"vfmk.w.", 0xb5, V64, VM>;
1464 let cx = 1 in defm PVFMKWUP : RVMKm<"pvfmk.w.up.", 0xb5, V64, VM>;
1465 def : MnemonicAlias<"vfmk.w", "vfmk.w.at">;
1466 def : MnemonicAlias<"pvfmk.w.up", "pvfmk.w.up.at">;
1467 def : MnemonicAlias<"pvfmk.w.lo", "vfmk.w.at">;
1468 foreach CC = [ "af", "gt", "lt", "ne", "eq", "ge", "le", "at" ] in {
1469 def : MnemonicAlias<"pvfmk.w.lo."#CC, "vfmk.w."#CC>;
1472 // Section 8.17.3 - VFMF (Vector Form Mask Floating Point)
1473 defm VFMKD : RVMKm<"vfmk.d.", 0xb6, V64, VM>;
1474 let cx2 = 1 in defm PVFMKSLO : RVMKm<"pvfmk.s.lo.", 0xb6, V64, VM>;
1476 defm PVFMKSUP : RVMKm<"pvfmk.s.up.", 0xb6, V64, VM>;
1477 let isCodeGenOnly = 1 in defm VFMKS : RVMKm<"vfmk.s.", 0xb6, V64, VM>;
1479 def : MnemonicAlias<"vfmk.d", "vfmk.d.at">;
1480 def : MnemonicAlias<"pvfmk.s.lo", "pvfmk.s.lo.at">;
1481 def : MnemonicAlias<"pvfmk.s.up", "pvfmk.s.up.at">;
1482 def : MnemonicAlias<"vfmk.s", "pvfmk.s.up.at">;
1483 foreach CC = [ "af", "gt", "lt", "ne", "eq", "ge", "le", "at", "num", "nan",
1484 "gtnan", "ltnan", "nenan", "eqnan", "genan", "lenan" ] in {
1485 def : MnemonicAlias<"vfmk.s."#CC, "pvfmk.s.up."#CC>;
1488 // Section 8.17.4 - ANDM (And VM)
1489 defm ANDM : RVM2m<"andm", 0x84, VM>;
1491 // Section 8.17.5 - ORM (Or VM)
1492 defm ORM : RVM2m<"orm", 0x85, VM>;
1494 // Section 8.17.6 - XORM (Exclusive Or VM)
1495 defm XORM : RVM2m<"xorm", 0x86, VM>;
1497 // Section 8.17.7 - EQVM (Equivalence VM)
1498 defm EQVM : RVM2m<"eqvm", 0x87, VM>;
1500 // Section 8.17.8 - NNDM (Negate And VM)
1501 defm NNDM : RVM2m<"nndm", 0x94, VM>;
1503 // Section 8.17.9 - NEGM (Negate VM)
1504 defm NEGM : RVM1m<"negm", 0x95, VM>;
1506 // Section 8.17.10 - PCVM (Population Count of VM)
1507 defm PCVM : RVMSm<"pcvm", 0xa4, VM>;
1509 // Section 8.17.11 - LZVM (Leading Zero of VM)
1510 defm LZVM : RVMSm<"lzvm", 0xa5, VM>;
1512 // Section 8.17.12 - TOVM (Trailing One of VM)
1513 defm TOVM : RVMSm<"tovm", 0xa6, VM>;
1515 //-----------------------------------------------------------------------------
1516 // Section 8.18 - Vector Control Instructions
1517 //-----------------------------------------------------------------------------
1519 // Section 8.18.1 - LVL (Load VL)
1520 let sx = 0, cz = 0, sz = 0, hasSideEffects = 0, Defs = [VL] in {
1521 def LVLr : RR<0xbf, (outs), (ins I64:$sy), "lvl $sy">;
1522 let cy = 0 in def LVLi : RR<0xbf, (outs), (ins simm7:$sy), "lvl $sy">;
1525 // Section 8.18.2 - SVL (Save VL)
1526 let cy = 0, sy = 0, cz = 0, sz = 0, hasSideEffects = 0, Uses = [VL] in
1527 def SVL : RR<0x2f, (outs I64:$sx), (ins), "svl $sx">;
1529 // Section 8.18.3 - SMVL (Save Maximum Vector Length)
1530 let cy = 0, sy = 0, cz = 0, sz = 0, hasSideEffects = 0 in
1531 def SMVL : RR<0x2e, (outs I64:$sx), (ins), "smvl $sx">;
1533 // Section 8.18.4 - LVIX (Load Vector Data Index)
1534 let sx = 0, cz = 0, sz = 0, hasSideEffects = 0, Defs = [VIX] in {
1535 def LVIXr : RR<0xaf, (outs), (ins I64:$sy), "lvix $sy">;
1536 let cy = 0 in def LVIXi : RR<0xaf, (outs), (ins uimm6:$sy), "lvix $sy">;