1 //===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file describes the RISC-V instructions from the standard 'V' Vector
10 /// extension, version 1.0.
12 //===----------------------------------------------------------------------===//
14 include "RISCVInstrFormatsV.td"
16 //===----------------------------------------------------------------------===//
17 // Operand and SDNode transformation definitions.
18 //===----------------------------------------------------------------------===//
20 class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
21 let Name = "VTypeI" # VTypeINum;
22 let ParserMethod = "parseVTypeI";
23 let DiagnosticType = "InvalidVTypeI";
24 let RenderMethod = "addVTypeIOperands";
27 class VTypeIOp<int VTypeINum> : RISCVOp {
28 let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
29 let PrintMethod = "printVTypeI";
30 let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
31 let OperandType = "OPERAND_VTYPEI" # VTypeINum;
32 let MCOperandPredicate = [{
34 if (MCOp.evaluateAsConstantImm(Imm))
35 return isUInt<VTypeINum>(Imm);
36 return MCOp.isBareSymbolRef();
40 def VTypeIOp10 : VTypeIOp<10>;
41 def VTypeIOp11 : VTypeIOp<11>;
43 def VMaskAsmOperand : AsmOperandClass {
44 let Name = "RVVMaskRegOpOperand";
45 let RenderMethod = "addRegOperands";
46 let PredicateMethod = "isV0Reg";
47 let ParserMethod = "parseMaskReg";
49 let DefaultMethod = "defaultMaskRegOp";
50 let DiagnosticType = "InvalidVMaskRegister";
53 def VMaskOp : RegisterOperand<VMV0> {
54 let ParserMatchClass = VMaskAsmOperand;
55 let PrintMethod = "printVMaskReg";
56 let EncoderMethod = "getVMaskReg";
57 let DecoderMethod = "decodeVMaskReg";
60 def simm5 : RISCVSImmLeafOp<5> {
61 let MCOperandPredicate = [{
63 if (MCOp.evaluateAsConstantImm(Imm))
65 return MCOp.isBareSymbolRef();
69 def SImm5Plus1AsmOperand : AsmOperandClass {
70 let Name = "SImm5Plus1";
71 let RenderMethod = "addImmOperands";
72 let DiagnosticType = "InvalidSImm5Plus1";
75 def simm5_plus1 : RISCVOp, ImmLeaf<XLenVT,
76 [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
77 let ParserMatchClass = SImm5Plus1AsmOperand;
78 let OperandType = "OPERAND_SIMM5_PLUS1";
79 let MCOperandPredicate = [{
81 if (MCOp.evaluateAsConstantImm(Imm))
82 return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
83 return MCOp.isBareSymbolRef();
87 def simm5_plus1_nonzero : ImmLeaf<XLenVT,
88 [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
90 //===----------------------------------------------------------------------===//
91 // Scheduling definitions.
92 //===----------------------------------------------------------------------===//
94 // Common class of scheduling definitions.
95 // `ReadVMergeOp` will be prepended to reads if instruction is masked.
96 // `ReadVMask` will be appended to reads if instruction is masked.
98 // `writes` SchedWrites that are listed for each explicit def operand
100 // `reads` SchedReads that are listed for each explicit use operand.
101 // `forceMasked` Forced to be masked (e.g. Add-with-Carry Instructions).
102 // `forceMergeOpRead` Force to have read for merge operand.
103 class SchedCommon<list<SchedWrite> writes, list<SchedRead> reads,
104 string mx = "WorstCase", int sew = 0, bit forceMasked = 0,
105 bit forceMergeOpRead = 0> : Sched<[]> {
106 defvar isMasked = !ne(!find(NAME, "_MASK"), -1);
107 defvar isMaskedOrForceMasked = !or(forceMasked, isMasked);
108 defvar mergeRead = !if(!or(!eq(mx, "WorstCase"), !eq(sew, 0)),
109 !cast<SchedRead>("ReadVMergeOp_" # mx),
110 !cast<SchedRead>("ReadVMergeOp_" # mx # "_E" #sew));
111 defvar needsMergeRead = !or(isMaskedOrForceMasked, forceMergeOpRead);
112 defvar readsWithMask =
113 !if(isMaskedOrForceMasked, !listconcat(reads, [ReadVMask]), reads);
115 !if(needsMergeRead, !listconcat([mergeRead], readsWithMask), reads);
116 let SchedRW = !listconcat(writes, allReads);
119 // Common class of scheduling definitions for n-ary instructions.
120 // The scheudling resources are relevant to LMUL and may be relevant to SEW.
121 class SchedNary<string write, list<string> reads, string mx, int sew = 0,
122 bit forceMasked = 0, bit forceMergeOpRead = 0>
123 : SchedCommon<[!cast<SchedWrite>(
125 write # "_" # mx # "_E" # sew,
127 !foreach(read, reads,
128 !cast<SchedRead>(!if(sew, read #"_" #mx #"_E" #sew,
130 mx, sew, forceMasked, forceMergeOpRead>;
132 // Classes with postfix "MC" are only used in MC layer.
133 // For these classes, we assume that they are with the worst case costs and
134 // `ReadVMask` is always needed (with some exceptions).
136 // For instructions with no operand.
137 class SchedNullary<string write, string mx, int sew = 0, bit forceMasked = 0,
138 bit forceMergeOpRead = 0>:
139 SchedNary<write, [], mx, sew, forceMasked, forceMergeOpRead>;
140 class SchedNullaryMC<string write, bit forceMasked = 1>:
141 SchedNullary<write, "WorstCase", forceMasked=forceMasked>;
143 // For instructions with one operand.
144 class SchedUnary<string write, string read0, string mx, int sew = 0,
145 bit forceMasked = 0, bit forceMergeOpRead = 0>:
146 SchedNary<write, [read0], mx, sew, forceMasked, forceMergeOpRead>;
147 class SchedUnaryMC<string write, string read0, bit forceMasked = 1>:
148 SchedUnary<write, read0, "WorstCase", forceMasked=forceMasked>;
150 // For instructions with two operands.
151 class SchedBinary<string write, string read0, string read1, string mx,
152 int sew = 0, bit forceMasked = 0, bit forceMergeOpRead = 0>
153 : SchedNary<write, [read0, read1], mx, sew, forceMasked, forceMergeOpRead>;
154 class SchedBinaryMC<string write, string read0, string read1,
155 bit forceMasked = 1>:
156 SchedBinary<write, read0, read1, "WorstCase", forceMasked=forceMasked>;
158 // For instructions with three operands.
159 class SchedTernary<string write, string read0, string read1, string read2,
160 string mx, int sew = 0, bit forceMasked = 0,
161 bit forceMergeOpRead = 0>
162 : SchedNary<write, [read0, read1, read2], mx, sew, forceMasked,
164 class SchedTernaryMC<string write, string read0, string read1, string read2,
165 int sew = 0, bit forceMasked = 1>:
166 SchedNary<write, [read0, read1, read2], "WorstCase", sew, forceMasked>;
168 // For reduction instructions.
169 class SchedReduction<string write, string read, string mx, int sew,
170 bit forceMergeOpRead = 0>
171 : SchedCommon<[!cast<SchedWrite>(write #"_" #mx #"_E" #sew)],
172 !listsplat(!cast<SchedRead>(read), 3), mx, sew, forceMergeOpRead>;
173 class SchedReductionMC<string write, string readV, string readV0>:
174 SchedCommon<[!cast<SchedWrite>(write # "_WorstCase")],
175 [!cast<SchedRead>(readV), !cast<SchedRead>(readV0)],
178 // Whole Vector Register Move
179 class VMVRSched<int n> : SchedCommon<
180 [!cast<SchedWrite>("WriteVMov" # n # "V")],
181 [!cast<SchedRead>("ReadVMov" # n # "V")]
184 // Vector Unit-Stride Loads and Stores
185 class VLESched<string lmul, bit forceMasked = 0> : SchedCommon<
186 [!cast<SchedWrite>("WriteVLDE_" # lmul)],
187 [ReadVLDX], mx=lmul, forceMasked=forceMasked
189 class VLESchedMC : VLESched<"WorstCase", forceMasked=1>;
191 class VSESched<string lmul, bit forceMasked = 0> : SchedCommon<
192 [!cast<SchedWrite>("WriteVSTE_" # lmul)],
193 [!cast<SchedRead>("ReadVSTEV_" # lmul), ReadVSTX], mx=lmul,
194 forceMasked=forceMasked
196 class VSESchedMC : VSESched<"WorstCase", forceMasked=1>;
198 // Vector Strided Loads and Stores
199 class VLSSched<int eew, string emul, bit forceMasked = 0> : SchedCommon<
200 [!cast<SchedWrite>("WriteVLDS" # eew # "_" # emul)],
201 [ReadVLDX, ReadVLDSX], emul, eew, forceMasked
203 class VLSSchedMC<int eew> : VLSSched<eew, "WorstCase", forceMasked=1>;
205 class VSSSched<int eew, string emul, bit forceMasked = 0> : SchedCommon<
206 [!cast<SchedWrite>("WriteVSTS" # eew # "_" # emul)],
207 [!cast<SchedRead>("ReadVSTS" # eew # "V_" # emul), ReadVSTX, ReadVSTSX],
208 emul, eew, forceMasked
210 class VSSSchedMC<int eew> : VSSSched<eew, "WorstCase", forceMasked=1>;
212 // Vector Indexed Loads and Stores
213 class VLXSched<int dataEEW, bit isOrdered, string dataEMUL, string idxEMUL,
214 bit forceMasked = 0> : SchedCommon<
215 [!cast<SchedWrite>("WriteVLD" # !if(isOrdered, "O", "U") # "X" # dataEEW # "_" # dataEMUL)],
216 [ReadVLDX, !cast<SchedRead>("ReadVLD" # !if(isOrdered, "O", "U") # "XV_" # idxEMUL)],
217 dataEMUL, dataEEW, forceMasked
219 class VLXSchedMC<int dataEEW, bit isOrdered>:
220 VLXSched<dataEEW, isOrdered, "WorstCase", "WorstCase", forceMasked=1>;
222 class VSXSched<int dataEEW, bit isOrdered, string dataEMUL, string idxEMUL,
223 bit forceMasked = 0> : SchedCommon<
224 [!cast<SchedWrite>("WriteVST" # !if(isOrdered, "O", "U") # "X" # dataEEW # "_" # dataEMUL)],
225 [!cast<SchedRead>("ReadVST" # !if(isOrdered, "O", "U") #"X" # dataEEW # "_" # dataEMUL),
226 ReadVSTX, !cast<SchedRead>("ReadVST" # !if(isOrdered, "O", "U") # "XV_" # idxEMUL)],
227 dataEMUL, dataEEW, forceMasked
229 class VSXSchedMC<int dataEEW, bit isOrdered>:
230 VSXSched<dataEEW, isOrdered, "WorstCase", "WorstCase", forceMasked=1>;
232 // Unit-stride Fault-Only-First Loads
233 class VLFSched<string lmul, bit forceMasked = 0> : SchedCommon<
234 [!cast<SchedWrite>("WriteVLDFF_" # lmul)],
235 [ReadVLDX], mx=lmul, forceMasked=forceMasked
237 class VLFSchedMC: VLFSched<"WorstCase", forceMasked=1>;
239 // Unit-Stride Segment Loads and Stores
240 class VLSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
241 [!cast<SchedWrite>("WriteVLSEG" #nf #"e" #eew #"_" #emul)],
242 [ReadVLDX], emul, eew, forceMasked
244 class VLSEGSchedMC<int nf, int eew> : VLSEGSched<nf, eew, "WorstCase",
247 class VSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
248 [!cast<SchedWrite>("WriteVSSEG" # nf # "e" # eew # "_" # emul)],
249 [!cast<SchedRead>("ReadVSTEV_" #emul), ReadVSTX], emul, eew, forceMasked
251 class VSSEGSchedMC<int nf, int eew> : VSSEGSched<nf, eew, "WorstCase",
254 class VLSEGFFSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
255 [!cast<SchedWrite>("WriteVLSEGFF" # nf # "e" # eew # "_" # emul)],
256 [ReadVLDX], emul, eew, forceMasked
258 class VLSEGFFSchedMC<int nf, int eew> : VLSEGFFSched<nf, eew, "WorstCase",
261 // Strided Segment Loads and Stores
262 class VLSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
263 [!cast<SchedWrite>("WriteVLSSEG" #nf #"e" #eew #"_" #emul)],
264 [ReadVLDX, ReadVLDSX], emul, eew, forceMasked
266 class VLSSEGSchedMC<int nf, int eew> : VLSSEGSched<nf, eew, "WorstCase",
269 class VSSSEGSched<int nf, int eew, string emul, bit forceMasked = 0> : SchedCommon<
270 [!cast<SchedWrite>("WriteVSSSEG" #nf #"e" #eew #"_" #emul)],
271 [!cast<SchedRead>("ReadVSTS" #eew #"V_" #emul),
272 ReadVSTX, ReadVSTSX], emul, eew, forceMasked
274 class VSSSEGSchedMC<int nf, int eew> : VSSSEGSched<nf, eew, "WorstCase",
277 // Indexed Segment Loads and Stores
278 class VLXSEGSched<int nf, int eew, bit isOrdered, string emul,
279 bit forceMasked = 0> : SchedCommon<
280 [!cast<SchedWrite>("WriteVL" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)],
281 [ReadVLDX, !cast<SchedRead>("ReadVLD" #!if(isOrdered, "O", "U") #"XV_" #emul)],
282 emul, eew, forceMasked
284 class VLXSEGSchedMC<int nf, int eew, bit isOrdered>:
285 VLXSEGSched<nf, eew, isOrdered, "WorstCase", forceMasked=1>;
287 // Passes sew=0 instead of eew=0 since this pseudo does not follow MX_E form.
288 class VSXSEGSched<int nf, int eew, bit isOrdered, string emul,
289 bit forceMasked = 0> : SchedCommon<
290 [!cast<SchedWrite>("WriteVS" #!if(isOrdered, "O", "U") #"XSEG" #nf #"e" #eew #"_" #emul)],
291 [!cast<SchedRead>("ReadVST" #!if(isOrdered, "O", "U") #"X" #eew #"_" #emul),
292 ReadVSTX, !cast<SchedRead>("ReadVST" #!if(isOrdered, "O", "U") #"XV_" #emul)],
293 emul, sew=0, forceMasked=forceMasked
295 class VSXSEGSchedMC<int nf, int eew, bit isOrdered>:
296 VSXSEGSched<nf, eew, isOrdered, "WorstCase", forceMasked=1>;
298 //===----------------------------------------------------------------------===//
299 // Instruction class templates
300 //===----------------------------------------------------------------------===//
302 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
303 // unit-stride load vd, (rs1), vm
304 class VUnitStrideLoad<RISCVWidth width, string opcodestr>
305 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
307 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
309 let vm = 1, RVVConstraint = NoConstraint in {
310 // unit-stride whole register load vl<nf>r.v vd, (rs1)
311 class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
312 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
313 width.Value{2-0}, (outs VRC:$vd), (ins GPRMemZeroOffset:$rs1),
314 opcodestr, "$vd, $rs1"> {
318 // unit-stride mask load vd, (rs1)
319 class VUnitStrideLoadMask<string opcodestr>
320 : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
322 (ins GPRMemZeroOffset:$rs1), opcodestr, "$vd, $rs1">;
323 } // vm = 1, RVVConstraint = NoConstraint
325 // unit-stride fault-only-first load vd, (rs1), vm
326 class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
327 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
329 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
331 // strided load vd, (rs1), rs2, vm
332 class VStridedLoad<RISCVWidth width, string opcodestr>
333 : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
335 (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
336 "$vd, $rs1, $rs2$vm">;
338 // indexed load vd, (rs1), vs2, vm
339 class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
340 : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
342 (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
343 "$vd, $rs1, $vs2$vm">;
345 // unit-stride segment load vd, (rs1), vm
346 class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
347 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
349 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
351 // segment fault-only-first load vd, (rs1), vm
352 class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
353 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
355 (ins GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr, "$vd, ${rs1}$vm">;
357 // strided segment load vd, (rs1), rs2, vm
358 class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
359 : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
361 (ins GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
362 "$vd, $rs1, $rs2$vm">;
364 // indexed segment load vd, (rs1), vs2, vm
365 class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
367 : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
369 (ins GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
370 "$vd, $rs1, $vs2$vm">;
371 } // hasSideEffects = 0, mayLoad = 1, mayStore = 0
373 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
374 // unit-stride store vd, vs3, (rs1), vm
375 class VUnitStrideStore<RISCVWidth width, string opcodestr>
376 : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
377 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
381 // vs<nf>r.v vd, (rs1)
382 class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
383 : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
384 0b000, (outs), (ins VRC:$vs3, GPRMemZeroOffset:$rs1),
385 opcodestr, "$vs3, $rs1"> {
389 // unit-stride mask store vd, vs3, (rs1)
390 class VUnitStrideStoreMask<string opcodestr>
391 : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
392 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1), opcodestr,
396 // strided store vd, vs3, (rs1), rs2, vm
397 class VStridedStore<RISCVWidth width, string opcodestr>
398 : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
399 (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
400 opcodestr, "$vs3, $rs1, $rs2$vm">;
402 // indexed store vd, vs3, (rs1), vs2, vm
403 class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
404 : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
405 (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
406 opcodestr, "$vs3, $rs1, $vs2$vm">;
408 // segment store vd, vs3, (rs1), vm
409 class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
410 : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
411 (outs), (ins VR:$vs3, GPRMemZeroOffset:$rs1, VMaskOp:$vm), opcodestr,
414 // segment store vd, vs3, (rs1), rs2, vm
415 class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
416 : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
417 (ins VR:$vs3, GPRMemZeroOffset:$rs1, GPR:$rs2, VMaskOp:$vm),
418 opcodestr, "$vs3, $rs1, $rs2$vm">;
420 // segment store vd, vs3, (rs1), vs2, vm
421 class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
423 : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
424 (ins VR:$vs3, GPRMemZeroOffset:$rs1, VR:$vs2, VMaskOp:$vm),
425 opcodestr, "$vs3, $rs1, $vs2$vm">;
426 } // hasSideEffects = 0, mayLoad = 0, mayStore = 1
428 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
429 // op vd, vs2, vs1, vm
430 class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
431 : RVInstVV<funct6, opv, (outs VR:$vd),
432 (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
433 opcodestr, "$vd, $vs2, $vs1$vm">;
435 // op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
436 class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
437 : RVInstVV<funct6, opv, (outs VR:$vd),
438 (ins VR:$vs2, VR:$vs1, VMV0:$v0),
439 opcodestr, "$vd, $vs2, $vs1, v0"> {
443 // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
444 class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr,
445 bit EarlyClobber = 0>
446 : RVInstVV<funct6, opv, (outs VR:$vd_wb),
447 (ins VR:$vd, VR:$vs1, VR:$vs2, VMaskOp:$vm),
448 opcodestr, "$vd, $vs1, $vs2$vm"> {
449 let Constraints = !if(EarlyClobber, "@earlyclobber $vd_wb, $vd = $vd_wb",
454 class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
455 : RVInstVV<funct6, opv, (outs VR:$vd),
456 (ins VR:$vs2, VR:$vs1),
457 opcodestr, "$vd, $vs2, $vs1"> {
461 // op vd, vs2, rs1, vm
462 class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
463 : RVInstVX<funct6, opv, (outs VR:$vd),
464 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
465 opcodestr, "$vd, $vs2, $rs1$vm">;
467 // op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
468 class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
469 : RVInstVX<funct6, opv, (outs VR:$vd),
470 (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
471 opcodestr, "$vd, $vs2, $rs1, v0"> {
475 // op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
476 class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr,
477 bit EarlyClobber = 0>
478 : RVInstVX<funct6, opv, (outs VR:$vd_wb),
479 (ins VR:$vd, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
480 opcodestr, "$vd, $rs1, $vs2$vm"> {
481 let Constraints = !if(EarlyClobber, "@earlyclobber $vd_wb, $vd = $vd_wb",
486 class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
487 : RVInstVX<funct6, opv, (outs VR:$vd),
488 (ins VR:$vs2, GPR:$rs1),
489 opcodestr, "$vd, $vs2, $rs1"> {
493 // op vd, vs2, imm, vm
494 class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
495 : RVInstIVI<funct6, (outs VR:$vd),
496 (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
497 opcodestr, "$vd, $vs2, $imm$vm">;
499 // op vd, vs2, imm, v0 (without mask, use v0 as carry input)
500 class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
501 : RVInstIVI<funct6, (outs VR:$vd),
502 (ins VR:$vs2, optype:$imm, VMV0:$v0),
503 opcodestr, "$vd, $vs2, $imm, v0"> {
507 // op vd, vs2, imm, vm
508 class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
509 : RVInstIVI<funct6, (outs VR:$vd),
510 (ins VR:$vs2, optype:$imm),
511 opcodestr, "$vd, $vs2, $imm"> {
515 // op vd, vs2, rs1, vm (Float)
516 class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
517 : RVInstVX<funct6, opv, (outs VR:$vd),
518 (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
519 opcodestr, "$vd, $vs2, $rs1$vm">;
521 // op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
522 class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr,
523 bit EarlyClobber = 0>
524 : RVInstVX<funct6, opv, (outs VR:$vd_wb),
525 (ins VR:$vd, FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
526 opcodestr, "$vd, $rs1, $vs2$vm"> {
527 let Constraints = !if(EarlyClobber, "@earlyclobber $vd_wb, $vd = $vd_wb",
531 // op vd, vs2, vm (use vs1 as instruction encoding)
532 class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
533 : RVInstV<funct6, vs1, opv, (outs VR:$vd),
534 (ins VR:$vs2, VMaskOp:$vm),
535 opcodestr, "$vd, $vs2$vm">;
537 // op vd, vs2 (use vs1 as instruction encoding)
538 class VALUVs2NoVm<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
539 : RVInstV<funct6, vs1, opv, (outs VR:$vd),
540 (ins VR:$vs2), opcodestr,
544 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
546 //===----------------------------------------------------------------------===//
547 // Combination of instruction classes.
548 // Use these multiclasses to define instructions more easily.
549 //===----------------------------------------------------------------------===//
551 multiclass VIndexLoadStore<int eew> {
552 defvar w = !cast<RISCVWidth>("LSWidth" # eew);
554 def VLUXEI # eew # _V :
555 VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # eew # ".v">,
556 VLXSchedMC<eew, isOrdered=0>;
557 def VLOXEI # eew # _V :
558 VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # eew # ".v">,
559 VLXSchedMC<eew, isOrdered=1>;
561 def VSUXEI # eew # _V :
562 VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # eew # ".v">,
563 VSXSchedMC<eew, isOrdered=0>;
564 def VSOXEI # eew # _V :
565 VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # eew # ".v">,
566 VSXSchedMC<eew, isOrdered=1>;
569 multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
570 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
571 SchedBinaryMC<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV">;
574 multiclass VALU_IV_X<string opcodestr, bits<6> funct6> {
575 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
576 SchedBinaryMC<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX">;
579 multiclass VALU_IV_I<string opcodestr, bits<6> funct6> {
580 def I : VALUVI<funct6, opcodestr # ".vi", simm5>,
581 SchedUnaryMC<"WriteVIALUI", "ReadVIALUV">;
584 multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6>
585 : VALU_IV_V<opcodestr, funct6>,
586 VALU_IV_X<opcodestr, funct6>,
587 VALU_IV_I<opcodestr, funct6>;
589 multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6>
590 : VALU_IV_V<opcodestr, funct6>,
591 VALU_IV_X<opcodestr, funct6>;
593 multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6>
594 : VALU_IV_X<opcodestr, funct6>,
595 VALU_IV_I<opcodestr, funct6>;
597 multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw> {
598 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
599 SchedBinaryMC<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV">;
600 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
601 SchedBinaryMC<"WriteVIWALUX", "ReadVIWALUV", "ReadVIWALUX">;
604 multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6> {
605 def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv">,
606 SchedTernaryMC<"WriteVIMulAddV", "ReadVIMulAddV", "ReadVIMulAddV",
608 def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">,
609 SchedTernaryMC<"WriteVIMulAddX", "ReadVIMulAddV", "ReadVIMulAddX",
613 multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6> {
614 let RVVConstraint = WidenV in
615 def X : VALUrVX<funct6, OPMVX, opcodestr # ".vx">,
616 SchedTernaryMC<"WriteVIWMulAddX", "ReadVIWMulAddV", "ReadVIWMulAddX",
620 multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6>
621 : VWMAC_MV_X<opcodestr, funct6> {
622 let RVVConstraint = WidenV in
623 def V : VALUrVV<funct6, OPMVV, opcodestr # ".vv", EarlyClobber=1>,
624 SchedTernaryMC<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV",
628 multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
629 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
630 SchedUnaryMC<"WriteVExtV", "ReadVExtV">;
633 multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
634 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
635 SchedBinaryMC<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV">;
636 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
637 SchedBinaryMC<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX">;
638 def IM : VALUmVI<funct6, opcodestr # ".vim">,
639 SchedUnaryMC<"WriteVIMergeI", "ReadVIMergeV">;
642 multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
643 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
644 SchedBinaryMC<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV">;
645 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
646 SchedBinaryMC<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX">;
649 multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6>
650 : VALUm_IV_V_X<opcodestr, funct6> {
651 def IM : VALUmVI<funct6, opcodestr # ".vim">,
652 SchedUnaryMC<"WriteVICALUI", "ReadVICALUV">;
655 multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
656 def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
657 SchedBinaryMC<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV",
659 def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
660 SchedBinaryMC<"WriteVICALUX", "ReadVICALUV", "ReadVICALUX",
664 multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6>
665 : VALUNoVm_IV_V_X<opcodestr, funct6> {
666 def I : VALUVINoVm<funct6, opcodestr # ".vi", simm5>,
667 SchedUnaryMC<"WriteVICALUI", "ReadVICALUV", forceMasked=0>;
670 multiclass VALU_FV_F<string opcodestr, bits<6> funct6> {
671 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
672 SchedBinaryMC<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF">;
675 multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6>
676 : VALU_FV_F<opcodestr, funct6> {
677 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
678 SchedBinaryMC<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV">;
681 multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw> {
682 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
683 SchedBinaryMC<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV">;
684 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
685 SchedBinaryMC<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF">;
688 multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6> {
689 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
690 SchedBinaryMC<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV">;
691 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
692 SchedBinaryMC<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF">;
695 multiclass VDIV_FV_F<string opcodestr, bits<6> funct6> {
696 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
697 SchedBinaryMC<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF">;
700 multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6>
701 : VDIV_FV_F<opcodestr, funct6> {
702 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
703 SchedBinaryMC<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV">;
706 multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6> {
707 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
708 SchedBinaryMC<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV">;
709 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
710 SchedBinaryMC<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF">;
713 multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6> {
714 def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv">,
715 SchedTernaryMC<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV",
717 def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf">,
718 SchedTernaryMC<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF",
722 multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6> {
723 let RVVConstraint = WidenV in {
724 def V : VALUrVV<funct6, OPFVV, opcodestr # ".vv", EarlyClobber=1>,
725 SchedTernaryMC<"WriteVFWMulAddV", "ReadVFWMulAddV", "ReadVFWMulAddV",
727 def F : VALUrVF<funct6, OPFVF, opcodestr # ".vf", EarlyClobber=1>,
728 SchedTernaryMC<"WriteVFWMulAddF", "ReadVFWMulAddV", "ReadVFWMulAddF",
733 multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
734 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
735 SchedUnaryMC<"WriteVFSqrtV", "ReadVFSqrtV">;
738 multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
739 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
740 SchedUnaryMC<"WriteVFRecpV", "ReadVFRecpV">;
743 multiclass VMINMAX_FV_V_F<string opcodestr, bits<6> funct6> {
744 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
745 SchedBinaryMC<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV">;
746 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
747 SchedBinaryMC<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF">;
750 multiclass VCMP_FV_F<string opcodestr, bits<6> funct6> {
751 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
752 SchedBinaryMC<"WriteVFCmpF", "ReadVFCmpV", "ReadVFCmpF">;
755 multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6>
756 : VCMP_FV_F<opcodestr, funct6> {
757 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
758 SchedBinaryMC<"WriteVFCmpV", "ReadVFCmpV", "ReadVFCmpV">;
761 multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6> {
762 def V : VALUVV<funct6, OPFVV, opcodestr # ".vv">,
763 SchedBinaryMC<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV">;
764 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
765 SchedBinaryMC<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF">;
768 multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
769 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
770 SchedUnaryMC<"WriteVFClassV", "ReadVFClassV">;
773 multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
774 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
775 SchedUnaryMC<"WriteVFCvtIToFV", "ReadVFCvtIToFV">;
778 multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
779 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
780 SchedUnaryMC<"WriteVFCvtFToIV", "ReadVFCvtFToIV">;
783 multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
784 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
785 SchedUnaryMC<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV">;
788 multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
789 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
790 SchedUnaryMC<"WriteVFWCvtFToIV", "ReadVFWCvtFToIV">;
793 multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
794 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
795 SchedUnaryMC<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV">;
798 multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
799 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
800 SchedUnaryMC<"WriteVFNCvtIToFV", "ReadVFNCvtIToFV">;
803 multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
804 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
805 SchedUnaryMC<"WriteVFNCvtFToIV", "ReadVFNCvtFToIV">;
808 multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
809 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
810 SchedUnaryMC<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV">;
813 multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
814 def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
815 SchedReductionMC<"WriteVIRedV_From", "ReadVIRedV", "ReadVIRedV0">;
818 multiclass VREDMINMAX_MV_V<string opcodestr, bits<6> funct6> {
819 def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
820 SchedReductionMC<"WriteVIRedMinMaxV_From", "ReadVIRedV", "ReadVIRedV0">;
823 multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
824 def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
825 SchedReductionMC<"WriteVIWRedV_From", "ReadVIWRedV", "ReadVIWRedV0">;
828 multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
829 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
830 SchedReductionMC<"WriteVFRedV_From", "ReadVFRedV", "ReadVFRedV0">;
833 multiclass VREDMINMAX_FV_V<string opcodestr, bits<6> funct6> {
834 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
835 SchedReductionMC<"WriteVFRedMinMaxV_From", "ReadVFRedV", "ReadVFRedV0">;
838 multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
839 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
840 SchedReductionMC<"WriteVFRedOV_From", "ReadVFRedOV", "ReadVFRedOV0">;
843 multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
844 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
845 SchedReductionMC<"WriteVFWRedV_From", "ReadVFWRedV", "ReadVFWRedV0">;
848 multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
849 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
850 SchedReductionMC<"WriteVFWRedOV_From", "ReadVFWRedOV", "ReadVFWRedOV0">;
853 multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
854 def M : VALUVVNoVm<funct6, OPMVV, opcodestr #"." #vm #"m">,
855 SchedBinaryMC<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV",
859 multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
860 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
861 SchedUnaryMC<"WriteVMSFSV", "ReadVMSFSV">;
864 multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
865 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
866 SchedUnaryMC<"WriteVMIotV", "ReadVMIotV">;
869 multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6> {
870 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
871 SchedBinaryMC<"WriteVShiftV", "ReadVShiftV", "ReadVShiftV">;
872 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
873 SchedBinaryMC<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX">;
874 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>,
875 SchedUnaryMC<"WriteVShiftI", "ReadVShiftV">;
878 multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6> {
879 def V : VALUVV<funct6, OPIVV, opcodestr # ".wv">,
880 SchedBinaryMC<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV">;
881 def X : VALUVX<funct6, OPIVX, opcodestr # ".wx">,
882 SchedBinaryMC<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX">;
883 def I : VALUVI<funct6, opcodestr # ".wi", uimm5>,
884 SchedUnaryMC<"WriteVNShiftI", "ReadVNShiftV">;
887 multiclass VMINMAX_IV_V_X<string opcodestr, bits<6> funct6> {
888 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
889 SchedBinaryMC<"WriteVIMinMaxV", "ReadVIMinMaxV", "ReadVIMinMaxV">;
890 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
891 SchedBinaryMC<"WriteVIMinMaxX", "ReadVIMinMaxV", "ReadVIMinMaxX">;
894 multiclass VCMP_IV_V<string opcodestr, bits<6> funct6> {
895 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
896 SchedBinaryMC<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV">;
899 multiclass VCMP_IV_X<string opcodestr, bits<6> funct6> {
900 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
901 SchedBinaryMC<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX">;
904 multiclass VCMP_IV_I<string opcodestr, bits<6> funct6> {
905 def I : VALUVI<funct6, opcodestr # ".vi", simm5>,
906 SchedUnaryMC<"WriteVICmpI", "ReadVICmpV">;
909 multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6>
910 : VCMP_IV_V<opcodestr, funct6>,
911 VCMP_IV_X<opcodestr, funct6>,
912 VCMP_IV_I<opcodestr, funct6>;
914 multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6>
915 : VCMP_IV_X<opcodestr, funct6>,
916 VCMP_IV_I<opcodestr, funct6>;
918 multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6>
919 : VCMP_IV_V<opcodestr, funct6>,
920 VCMP_IV_X<opcodestr, funct6>;
922 multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6> {
923 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
924 SchedBinaryMC<"WriteVIMulV", "ReadVIMulV", "ReadVIMulV">;
925 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
926 SchedBinaryMC<"WriteVIMulX", "ReadVIMulV", "ReadVIMulX">;
929 multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6> {
930 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
931 SchedBinaryMC<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV">;
932 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
933 SchedBinaryMC<"WriteVIWMulX", "ReadVIWMulV", "ReadVIWMulX">;
936 multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6> {
937 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
938 SchedBinaryMC<"WriteVIDivV", "ReadVIDivV", "ReadVIDivV">;
939 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
940 SchedBinaryMC<"WriteVIDivX", "ReadVIDivV", "ReadVIDivX">;
943 multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6> {
944 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
945 SchedBinaryMC<"WriteVSALUV", "ReadVSALUV", "ReadVSALUV">;
946 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
947 SchedBinaryMC<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX">;
950 multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6>
951 : VSALU_IV_V_X<opcodestr, funct6> {
952 def I : VALUVI<funct6, opcodestr # ".vi", simm5>,
953 SchedUnaryMC<"WriteVSALUI", "ReadVSALUV">;
956 multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6> {
957 def V : VALUVV<funct6, OPMVV, opcodestr # ".vv">,
958 SchedBinaryMC<"WriteVAALUV", "ReadVAALUV", "ReadVAALUV">;
959 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
960 SchedBinaryMC<"WriteVAALUX", "ReadVAALUV", "ReadVAALUX">;
963 multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6> {
964 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
965 SchedBinaryMC<"WriteVSMulV", "ReadVSMulV", "ReadVSMulV">;
966 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
967 SchedBinaryMC<"WriteVSMulX", "ReadVSMulV", "ReadVSMulX">;
970 multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6> {
971 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
972 SchedBinaryMC<"WriteVSShiftV", "ReadVSShiftV", "ReadVSShiftV">;
973 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
974 SchedBinaryMC<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX">;
975 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>,
976 SchedUnaryMC<"WriteVSShiftI", "ReadVSShiftV">;
979 multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6> {
980 def V : VALUVV<funct6, OPIVV, opcodestr # ".wv">,
981 SchedBinaryMC<"WriteVNClipV", "ReadVNClipV", "ReadVNClipV">;
982 def X : VALUVX<funct6, OPIVX, opcodestr # ".wx">,
983 SchedBinaryMC<"WriteVNClipX", "ReadVNClipV", "ReadVNClipX">;
984 def I : VALUVI<funct6, opcodestr # ".wi", uimm5>,
985 SchedUnaryMC<"WriteVNClipI", "ReadVNClipV">;
988 multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6> {
989 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
990 SchedBinaryMC<"WriteVISlideX", "ReadVISlideV", "ReadVISlideX">;
991 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>,
992 SchedUnaryMC<"WriteVISlideI", "ReadVISlideV">;
995 multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6> {
996 def X : VALUVX<funct6, OPMVX, opcodestr # ".vx">,
997 SchedBinaryMC<"WriteVISlide1X", "ReadVISlideV", "ReadVISlideX">;
1000 multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6> {
1001 def F : VALUVF<funct6, OPFVF, opcodestr # ".vf">,
1002 SchedBinaryMC<"WriteVFSlide1F", "ReadVFSlideV", "ReadVFSlideF">;
1005 multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6> {
1006 def V : VALUVV<funct6, OPIVV, opcodestr # ".vv">,
1007 SchedBinaryMC<"WriteVRGatherVV", "ReadVRGatherVV_data",
1008 "ReadVRGatherVV_index">;
1009 def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
1010 SchedBinaryMC<"WriteVRGatherVX", "ReadVRGatherVX_data",
1011 "ReadVRGatherVX_index">;
1012 def I : VALUVI<funct6, opcodestr # ".vi", uimm5>,
1013 SchedUnaryMC<"WriteVRGatherVI", "ReadVRGatherVI_data">;
1016 multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
1017 def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
1018 SchedBinaryMC<"WriteVCompressV", "ReadVCompressV", "ReadVCompressV">;
1021 multiclass VWholeLoadN<int l, bits<3> nf, string opcodestr, RegisterClass VRC> {
1022 defvar w = !cast<RISCVWidth>("LSWidth" # l);
1023 defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R");
1025 def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
1026 Sched<[s, ReadVLDX]>;
1029 //===----------------------------------------------------------------------===//
1031 //===----------------------------------------------------------------------===//
1033 let Predicates = [HasVInstructions] in {
1034 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
1035 def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
1036 "vsetvli", "$rd, $rs1, $vtypei">,
1037 Sched<[WriteVSETVLI, ReadVSETVLI]>;
1038 def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
1039 "vsetivli", "$rd, $uimm, $vtypei">,
1040 Sched<[WriteVSETIVLI]>;
1042 def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
1043 "vsetvl", "$rd, $rs1, $rs2">,
1044 Sched<[WriteVSETVL, ReadVSETVL, ReadVSETVL]>;
1045 } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
1046 } // Predicates = [HasVInstructions]
1048 foreach eew = [8, 16, 32, 64] in {
1049 defvar w = !cast<RISCVWidth>("LSWidth" # eew);
1051 let Predicates = !if(!eq(eew, 64), [HasVInstructionsI64],
1052 [HasVInstructions]) in {
1053 // Vector Unit-Stride Instructions
1054 def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESchedMC;
1055 def VSE#eew#_V : VUnitStrideStore<w, "vse"#eew#".v">, VSESchedMC;
1057 // Vector Unit-Stride Fault-only-First Loads
1058 def VLE#eew#FF_V : VUnitStrideLoadFF<w, "vle"#eew#"ff.v">, VLFSchedMC;
1060 // Vector Strided Instructions
1061 def VLSE#eew#_V : VStridedLoad<w, "vlse"#eew#".v">, VLSSchedMC<eew>;
1062 def VSSE#eew#_V : VStridedStore<w, "vsse"#eew#".v">, VSSSchedMC<eew>;
1064 defm VL1R : VWholeLoadN<eew, 0, "vl1r", VR>;
1065 defm VL2R : VWholeLoadN<eew, 1, "vl2r", VRM2>;
1066 defm VL4R : VWholeLoadN<eew, 3, "vl4r", VRM4>;
1067 defm VL8R : VWholeLoadN<eew, 7, "vl8r", VRM8>;
1070 let Predicates = !if(!eq(eew, 64), [IsRV64, HasVInstructionsI64],
1071 [HasVInstructions]) in
1072 defm "" : VIndexLoadStore<eew>;
1075 let Predicates = [HasVInstructions] in {
1076 def VLM_V : VUnitStrideLoadMask<"vlm.v">,
1077 Sched<[WriteVLDM_WorstCase, ReadVLDX]>;
1078 def VSM_V : VUnitStrideStoreMask<"vsm.v">,
1079 Sched<[WriteVSTM_WorstCase, ReadVSTM_WorstCase, ReadVSTX]>;
1080 def : InstAlias<"vle1.v $vd, (${rs1})",
1081 (VLM_V VR:$vd, GPR:$rs1), 0>;
1082 def : InstAlias<"vse1.v $vs3, (${rs1})",
1083 (VSM_V VR:$vs3, GPR:$rs1), 0>;
1085 def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
1086 Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
1087 def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
1088 Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
1089 def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
1090 Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
1091 def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
1092 Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
1094 def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
1095 def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
1096 def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
1097 def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
1098 } // Predicates = [HasVInstructions]
1100 let Predicates = [HasVInstructions] in {
1101 // Vector Single-Width Integer Add and Subtract
1102 defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
1103 defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
1104 defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
1106 def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1107 def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1109 // Vector Widening Integer Add/Subtract
1110 // Refer to 11.2 Widening Vector Arithmetic Instructions
1111 // The destination vector register group cannot overlap a source vector
1112 // register group of a different element width (including the mask register
1113 // if masked), otherwise an illegal instruction exception is raised.
1114 let Constraints = "@earlyclobber $vd" in {
1115 let RVVConstraint = WidenV in {
1116 defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000, "v">;
1117 defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010, "v">;
1118 defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001, "v">;
1119 defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011, "v">;
1120 } // RVVConstraint = WidenV
1121 // Set earlyclobber for following instructions for second and mask operands.
1122 // This has the downside that the earlyclobber constraint is too coarse and
1123 // will impose unnecessary restrictions by not allowing the destination to
1124 // overlap with the first (wide) operand.
1125 let RVVConstraint = WidenW in {
1126 defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
1127 defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
1128 defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
1129 defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
1130 } // RVVConstraint = WidenW
1131 } // Constraints = "@earlyclobber $vd"
1133 def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
1134 (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1135 def : InstAlias<"vwcvt.x.x.v $vd, $vs",
1136 (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1137 def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
1138 (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1139 def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
1140 (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
1142 // Vector Integer Extension
1143 defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
1144 defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
1145 defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
1146 defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
1147 defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
1148 defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
1150 // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
1151 defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
1152 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1153 defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
1154 defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
1155 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1156 defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
1157 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1158 defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
1159 defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
1160 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1162 // Vector Bitwise Logical Instructions
1163 defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
1164 defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
1165 defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
1167 def : InstAlias<"vnot.v $vd, $vs$vm",
1168 (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
1169 def : InstAlias<"vnot.v $vd, $vs",
1170 (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
1172 // Vector Single-Width Bit Shift Instructions
1173 defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101>;
1174 defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000>;
1175 defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001>;
1177 // Vector Narrowing Integer Right Shift Instructions
1178 // Refer to 11.3. Narrowing Vector Arithmetic Instructions
1179 // The destination vector register group cannot overlap the first source
1180 // vector register group (specified by vs2). The destination vector register
1181 // group cannot overlap the mask register if used, unless LMUL=1.
1182 let Constraints = "@earlyclobber $vd" in {
1183 defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100>;
1184 defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101>;
1185 } // Constraints = "@earlyclobber $vd"
1187 def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
1188 (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
1189 def : InstAlias<"vncvt.x.x.w $vd, $vs",
1190 (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
1192 // Vector Integer Comparison Instructions
1193 let RVVConstraint = NoConstraint in {
1194 defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
1195 defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
1196 defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
1197 defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
1198 defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
1199 defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
1200 defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
1201 defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
1202 } // RVVConstraint = NoConstraint
1204 def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
1205 (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1206 def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
1207 (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1208 def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
1209 (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1210 def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
1211 (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1213 let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1215 // For unsigned comparisons we need to special case 0 immediate to maintain
1216 // the always true/false semantics we would invert if we just decremented the
1217 // immediate like we do for signed. To match the GNU assembler we will use
1218 // vmseq/vmsne.vv with the same register for both operands which we can't do
1219 // from an InstAlias.
1220 def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
1221 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1222 [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
1223 def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
1224 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1225 [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
1226 // Handle signed with pseudos as well for more consistency in the
1228 def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
1229 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1230 [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
1231 def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
1232 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1233 [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
1236 let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1238 def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
1239 (ins VR:$vs2, GPR:$rs1),
1240 [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
1241 def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
1242 (ins VR:$vs2, GPR:$rs1),
1243 [], "vmsge.vx", "$vd, $vs2, $rs1">;
1244 def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
1245 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1246 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
1247 def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
1248 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1249 [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
1250 def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1251 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1252 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1253 def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1254 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1255 [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1258 // Vector Integer Min/Max Instructions
1259 defm VMINU_V : VMINMAX_IV_V_X<"vminu", 0b000100>;
1260 defm VMIN_V : VMINMAX_IV_V_X<"vmin", 0b000101>;
1261 defm VMAXU_V : VMINMAX_IV_V_X<"vmaxu", 0b000110>;
1262 defm VMAX_V : VMINMAX_IV_V_X<"vmax", 0b000111>;
1264 // Vector Single-Width Integer Multiply Instructions
1265 defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
1266 defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
1267 defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
1268 defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
1270 // Vector Integer Divide Instructions
1271 defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
1272 defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
1273 defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
1274 defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
1276 // Vector Widening Integer Multiply Instructions
1277 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1278 defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
1279 defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
1280 defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
1281 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1283 // Vector Single-Width Integer Multiply-Add Instructions
1284 defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
1285 defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
1286 defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
1287 defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
1289 // Vector Widening Integer Multiply-Add Instructions
1290 defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
1291 defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
1292 defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
1293 defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
1295 // Vector Integer Merge Instructions
1296 defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
1298 // Vector Integer Move Instructions
1299 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
1300 RVVConstraint = NoConstraint in {
1302 def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
1303 (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
1304 SchedUnaryMC<"WriteVIMovV", "ReadVIMovV", forceMasked=0>;
1306 def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
1307 (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
1308 SchedUnaryMC<"WriteVIMovX", "ReadVIMovX", forceMasked=0>;
1310 def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
1311 (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
1312 SchedNullaryMC<"WriteVIMovI", forceMasked=0>;
1313 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1315 // Vector Fixed-Point Arithmetic Instructions
1316 defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
1317 defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
1318 defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
1319 defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
1321 // Vector Single-Width Averaging Add and Subtract
1322 defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
1323 defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
1324 defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
1325 defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
1327 // Vector Single-Width Fractional Multiply with Rounding and Saturation
1328 defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
1330 // Vector Single-Width Scaling Shift Instructions
1331 defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010>;
1332 defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011>;
1334 // Vector Narrowing Fixed-Point Clip Instructions
1335 let Constraints = "@earlyclobber $vd" in {
1336 defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110>;
1337 defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111>;
1338 } // Constraints = "@earlyclobber $vd"
1339 } // Predicates = [HasVInstructions]
1341 let Predicates = [HasVInstructionsAnyF] in {
1342 // Vector Single-Width Floating-Point Add/Subtract Instructions
1343 let Uses = [FRM], mayRaiseFPException = true in {
1344 defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
1345 defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
1346 defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
1349 // Vector Widening Floating-Point Add/Subtract Instructions
1350 let Constraints = "@earlyclobber $vd",
1352 mayRaiseFPException = true in {
1353 let RVVConstraint = WidenV in {
1354 defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000, "v">;
1355 defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010, "v">;
1356 } // RVVConstraint = WidenV
1357 // Set earlyclobber for following instructions for second and mask operands.
1358 // This has the downside that the earlyclobber constraint is too coarse and
1359 // will impose unnecessary restrictions by not allowing the destination to
1360 // overlap with the first (wide) operand.
1361 let RVVConstraint = WidenW in {
1362 defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
1363 defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
1364 } // RVVConstraint = WidenW
1365 } // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
1367 // Vector Single-Width Floating-Point Multiply/Divide Instructions
1368 let Uses = [FRM], mayRaiseFPException = true in {
1369 defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
1370 defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
1371 defm VFRDIV_V : VDIV_FV_F<"vfrdiv", 0b100001>;
1374 // Vector Widening Floating-Point Multiply
1375 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1376 Uses = [FRM], mayRaiseFPException = true in {
1377 defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
1378 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1380 // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
1381 let Uses = [FRM], mayRaiseFPException = true in {
1382 defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
1383 defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
1384 defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
1385 defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
1386 defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
1387 defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
1388 defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
1389 defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
1392 // Vector Widening Floating-Point Fused Multiply-Add Instructions
1393 let Uses = [FRM], mayRaiseFPException = true in {
1394 defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
1395 defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
1396 defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
1397 defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
1398 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1400 // Vector Floating-Point Square-Root Instruction
1401 let Uses = [FRM], mayRaiseFPException = true in {
1402 defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
1403 defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
1406 let mayRaiseFPException = true in
1407 defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
1409 // Vector Floating-Point MIN/MAX Instructions
1410 let mayRaiseFPException = true in {
1411 defm VFMIN_V : VMINMAX_FV_V_F<"vfmin", 0b000100>;
1412 defm VFMAX_V : VMINMAX_FV_V_F<"vfmax", 0b000110>;
1415 // Vector Floating-Point Sign-Injection Instructions
1416 defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
1417 defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
1418 defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
1420 def : InstAlias<"vfneg.v $vd, $vs$vm",
1421 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1422 def : InstAlias<"vfneg.v $vd, $vs",
1423 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1424 def : InstAlias<"vfabs.v $vd, $vs$vm",
1425 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1426 def : InstAlias<"vfabs.v $vd, $vs",
1427 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1429 // Vector Floating-Point Compare Instructions
1430 let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
1431 defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
1432 defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
1433 defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
1434 defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
1435 defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
1436 defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
1437 } // RVVConstraint = NoConstraint, mayRaiseFPException = true
1439 def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
1440 (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1441 def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
1442 (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1444 // Vector Floating-Point Classify Instruction
1445 defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
1447 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1449 // Vector Floating-Point Merge Instruction
1451 def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1452 (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
1453 "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
1454 SchedBinaryMC<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF">;
1456 // Vector Floating-Point Move Instruction
1457 let RVVConstraint = NoConstraint in
1458 let vm = 1, vs2 = 0 in
1459 def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1460 (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
1461 SchedUnaryMC<"WriteVFMovV", "ReadVFMovF", forceMasked=0>;
1463 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1465 // Single-Width Floating-Point/Integer Type-Convert Instructions
1466 let mayRaiseFPException = true in {
1467 let Uses = [FRM] in {
1468 defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
1469 defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
1471 defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
1472 defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
1473 let Uses = [FRM] in {
1474 defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
1475 defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
1477 } // mayRaiseFPException = true
1479 // Widening Floating-Point/Integer Type-Convert Instructions
1480 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt,
1481 mayRaiseFPException = true in {
1482 let Uses = [FRM] in {
1483 defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
1484 defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
1486 defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
1487 defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
1488 defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
1489 defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
1490 defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
1491 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
1493 // Narrowing Floating-Point/Integer Type-Convert Instructions
1494 let Constraints = "@earlyclobber $vd", mayRaiseFPException = true in {
1495 let Uses = [FRM] in {
1496 defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
1497 defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
1499 defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
1500 defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
1501 let Uses = [FRM] in {
1502 defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
1503 defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
1504 defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
1506 defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
1507 } // Constraints = "@earlyclobber $vd", mayRaiseFPException = true
1508 } // Predicates = HasVInstructionsAnyF]
1510 let Predicates = [HasVInstructions] in {
1512 // Vector Single-Width Integer Reduction Instructions
1513 let RVVConstraint = NoConstraint in {
1514 defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>;
1515 defm VREDMAXU : VREDMINMAX_MV_V<"vredmaxu", 0b000110>;
1516 defm VREDMAX : VREDMINMAX_MV_V<"vredmax", 0b000111>;
1517 defm VREDMINU : VREDMINMAX_MV_V<"vredminu", 0b000100>;
1518 defm VREDMIN : VREDMINMAX_MV_V<"vredmin", 0b000101>;
1519 defm VREDAND : VRED_MV_V<"vredand", 0b000001>;
1520 defm VREDOR : VRED_MV_V<"vredor", 0b000010>;
1521 defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>;
1522 } // RVVConstraint = NoConstraint
1524 // Vector Widening Integer Reduction Instructions
1525 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1526 // Set earlyclobber for following instructions for second and mask operands.
1527 // This has the downside that the earlyclobber constraint is too coarse and
1528 // will impose unnecessary restrictions by not allowing the destination to
1529 // overlap with the first (wide) operand.
1530 defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
1531 defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
1532 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1534 } // Predicates = [HasVInstructions]
1536 let Predicates = [HasVInstructionsAnyF] in {
1537 // Vector Single-Width Floating-Point Reduction Instructions
1538 let RVVConstraint = NoConstraint in {
1539 let Uses = [FRM], mayRaiseFPException = true in {
1540 defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
1541 defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
1543 let mayRaiseFPException = true in {
1544 defm VFREDMAX : VREDMINMAX_FV_V<"vfredmax", 0b000111>;
1545 defm VFREDMIN : VREDMINMAX_FV_V<"vfredmin", 0b000101>;
1547 } // RVVConstraint = NoConstraint
1549 def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
1550 (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1552 // Vector Widening Floating-Point Reduction Instructions
1553 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1554 // Set earlyclobber for following instructions for second and mask operands.
1555 // This has the downside that the earlyclobber constraint is too coarse and
1556 // will impose unnecessary restrictions by not allowing the destination to
1557 // overlap with the first (wide) operand.
1558 let Uses = [FRM], mayRaiseFPException = true in {
1559 defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
1560 defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
1562 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1564 def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
1565 (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1566 } // Predicates = [HasVInstructionsAnyF]
1568 let Predicates = [HasVInstructions] in {
1569 // Vector Mask-Register Logical Instructions
1570 let RVVConstraint = NoConstraint in {
1571 defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
1572 defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
1573 defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
1574 defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
1575 defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
1576 defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
1577 defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
1578 defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
1581 def : InstAlias<"vmmv.m $vd, $vs",
1582 (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1583 def : InstAlias<"vmclr.m $vd",
1584 (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1585 def : InstAlias<"vmset.m $vd",
1586 (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1587 def : InstAlias<"vmnot.m $vd, $vs",
1588 (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1590 def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
1591 (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1592 def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
1593 (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1595 let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1596 RVVConstraint = NoConstraint in {
1598 // Vector mask population count vcpop
1599 def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
1600 (ins VR:$vs2, VMaskOp:$vm),
1601 "vcpop.m", "$vd, $vs2$vm">,
1602 SchedUnaryMC<"WriteVMPopV", "ReadVMPopV">;
1604 // vfirst find-first-set mask bit
1605 def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
1606 (ins VR:$vs2, VMaskOp:$vm),
1607 "vfirst.m", "$vd, $vs2$vm">,
1608 SchedUnaryMC<"WriteVMFFSV", "ReadVMFFSV">;
1610 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1612 def : InstAlias<"vpopc.m $vd, $vs2$vm",
1613 (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
1615 let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
1617 // vmsbf.m set-before-first mask bit
1618 defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
1619 // vmsif.m set-including-first mask bit
1620 defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
1621 // vmsof.m set-only-first mask bit
1622 defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
1623 // Vector Iota Instruction
1624 defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
1626 } // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
1628 // Vector Element Index Instruction
1629 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1632 def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
1633 (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
1634 SchedNullaryMC<"WriteVMIdxV">;
1636 // Integer Scalar Move Instructions
1637 let vm = 1, RVVConstraint = NoConstraint in {
1638 def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
1639 (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
1640 Sched<[WriteVIMovVX, ReadVIMovVX]>;
1641 let Constraints = "$vd = $vd_wb" in
1642 def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
1643 (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
1644 Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
1647 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1649 } // Predicates = [HasVInstructions]
1651 let Predicates = [HasVInstructionsAnyF] in {
1653 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1654 RVVConstraint = NoConstraint in {
1655 // Floating-Point Scalar Move Instructions
1656 def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1657 (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
1658 Sched<[WriteVFMovVF, ReadVFMovVF]>;
1659 let Constraints = "$vd = $vd_wb" in
1660 def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1661 (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
1662 Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>;
1664 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1666 } // Predicates = [HasVInstructionsAnyF]
1668 let Predicates = [HasVInstructions] in {
1669 // Vector Slide Instructions
1670 let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1671 defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110>;
1672 defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
1673 } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1674 defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111>;
1675 defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
1676 } // Predicates = [HasVInstructions]
1678 let Predicates = [HasVInstructionsAnyF] in {
1679 let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1680 defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
1681 } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1682 defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
1683 } // Predicates = [HasVInstructionsAnyF]
1685 let Predicates = [HasVInstructions] in {
1686 // Vector Register Gather Instruction
1687 let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1688 defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100>;
1689 def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
1690 SchedBinaryMC<"WriteVRGatherVV", "ReadVRGatherVV_data",
1691 "ReadVRGatherVV_index">;
1692 } // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1694 // Vector Compress Instruction
1695 let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1696 defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
1697 } // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1699 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isMoveReg = 1,
1700 RVVConstraint = NoConstraint in {
1701 // A future extension may relax the vector register alignment restrictions.
1702 foreach n = [1, 2, 4, 8] in {
1703 defvar vrc = !cast<VReg>(!if(!eq(n, 1), "VR", "VRM"#n));
1704 def VMV#n#R_V : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
1705 (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
1711 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1712 } // Predicates = [HasVInstructions]
1714 let Predicates = [HasVInstructions] in {
1716 foreach eew = [8, 16, 32] in {
1717 defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1719 def VLSEG#nf#E#eew#_V :
1720 VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">,
1721 VLSEGSchedMC<nf, eew>;
1722 def VLSEG#nf#E#eew#FF_V :
1723 VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">,
1724 VLSEGFFSchedMC<nf, eew>;
1725 def VSSEG#nf#E#eew#_V :
1726 VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">,
1727 VSSEGSchedMC<nf, eew>;
1728 // Vector Strided Instructions
1729 def VLSSEG#nf#E#eew#_V :
1730 VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">,
1731 VLSSEGSchedMC<nf, eew>;
1732 def VSSSEG#nf#E#eew#_V :
1733 VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">,
1734 VSSSEGSchedMC<nf, eew>;
1736 // Vector Indexed Instructions
1737 def VLUXSEG#nf#EI#eew#_V :
1738 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
1739 "vluxseg"#nf#"ei"#eew#".v">,
1740 VLXSEGSchedMC<nf, eew, isOrdered=0>;
1741 def VLOXSEG#nf#EI#eew#_V :
1742 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
1743 "vloxseg"#nf#"ei"#eew#".v">,
1744 VLXSEGSchedMC<nf, eew, isOrdered=1>;
1745 def VSUXSEG#nf#EI#eew#_V :
1746 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
1747 "vsuxseg"#nf#"ei"#eew#".v">,
1748 VSXSEGSchedMC<nf, eew, isOrdered=0>;
1749 def VSOXSEG#nf#EI#eew#_V :
1750 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
1751 "vsoxseg"#nf#"ei"#eew#".v">,
1752 VSXSEGSchedMC<nf, eew, isOrdered=1>;
1755 } // Predicates = [HasVInstructions]
1757 let Predicates = [HasVInstructionsI64] in {
1759 // Vector Unit-strided Segment Instructions
1760 def VLSEG#nf#E64_V :
1761 VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">,
1762 VLSEGSchedMC<nf, 64>;
1763 def VLSEG#nf#E64FF_V :
1764 VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">,
1765 VLSEGFFSchedMC<nf, 64>;
1766 def VSSEG#nf#E64_V :
1767 VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">,
1768 VSSEGSchedMC<nf, 64>;
1770 // Vector Strided Segment Instructions
1771 def VLSSEG#nf#E64_V :
1772 VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">,
1773 VLSSEGSchedMC<nf, 64>;
1774 def VSSSEG#nf#E64_V :
1775 VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">,
1776 VSSSEGSchedMC<nf, 64>;
1778 } // Predicates = [HasVInstructionsI64]
1779 let Predicates = [HasVInstructionsI64, IsRV64] in {
1780 foreach nf = 2 - 8 in {
1781 // Vector Indexed Segment Instructions
1782 def VLUXSEG #nf #EI64_V
1783 : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
1784 "vluxseg" #nf #"ei64.v">,
1785 VLXSEGSchedMC<nf, 64, isOrdered=0>;
1786 def VLOXSEG #nf #EI64_V
1787 : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
1788 "vloxseg" #nf #"ei64.v">,
1789 VLXSEGSchedMC<nf, 64, isOrdered=1>;
1790 def VSUXSEG #nf #EI64_V
1791 : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
1792 "vsuxseg" #nf #"ei64.v">,
1793 VSXSEGSchedMC<nf, 64, isOrdered=0>;
1794 def VSOXSEG #nf #EI64_V
1795 : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
1796 "vsoxseg" #nf #"ei64.v">,
1797 VSXSEGSchedMC<nf, 64, isOrdered=1>;
1799 } // Predicates = [HasVInstructionsI64, IsRV64]
1801 include "RISCVInstrInfoZvfbf.td"
1802 include "RISCVInstrInfoVPseudos.td"