migration/rdma: Plug memory leaks in qemu_rdma_registration_stop()
[qemu/armbru.git] / disas / libvixl / vixl / a64 / decoder-a64.h
blobb3f04f68fc55cb07c355df4f150e387a9ce97233
1 // Copyright 2014, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef VIXL_A64_DECODER_A64_H_
28 #define VIXL_A64_DECODER_A64_H_
30 #include <list>
32 #include "vixl/globals.h"
33 #include "vixl/a64/instructions-a64.h"
36 // List macro containing all visitors needed by the decoder class.
38 #define VISITOR_LIST_THAT_RETURN(V) \
39 V(PCRelAddressing) \
40 V(AddSubImmediate) \
41 V(LogicalImmediate) \
42 V(MoveWideImmediate) \
43 V(Bitfield) \
44 V(Extract) \
45 V(UnconditionalBranch) \
46 V(UnconditionalBranchToRegister) \
47 V(CompareBranch) \
48 V(TestBranch) \
49 V(ConditionalBranch) \
50 V(System) \
51 V(Exception) \
52 V(LoadStorePairPostIndex) \
53 V(LoadStorePairOffset) \
54 V(LoadStorePairPreIndex) \
55 V(LoadStorePairNonTemporal) \
56 V(LoadLiteral) \
57 V(LoadStoreUnscaledOffset) \
58 V(LoadStorePostIndex) \
59 V(LoadStorePreIndex) \
60 V(LoadStoreRegisterOffset) \
61 V(LoadStoreUnsignedOffset) \
62 V(LoadStoreExclusive) \
63 V(LogicalShifted) \
64 V(AddSubShifted) \
65 V(AddSubExtended) \
66 V(AddSubWithCarry) \
67 V(ConditionalCompareRegister) \
68 V(ConditionalCompareImmediate) \
69 V(ConditionalSelect) \
70 V(DataProcessing1Source) \
71 V(DataProcessing2Source) \
72 V(DataProcessing3Source) \
73 V(FPCompare) \
74 V(FPConditionalCompare) \
75 V(FPConditionalSelect) \
76 V(FPImmediate) \
77 V(FPDataProcessing1Source) \
78 V(FPDataProcessing2Source) \
79 V(FPDataProcessing3Source) \
80 V(FPIntegerConvert) \
81 V(FPFixedPointConvert) \
82 V(Crypto2RegSHA) \
83 V(Crypto3RegSHA) \
84 V(CryptoAES) \
85 V(NEON2RegMisc) \
86 V(NEON3Different) \
87 V(NEON3Same) \
88 V(NEONAcrossLanes) \
89 V(NEONByIndexedElement) \
90 V(NEONCopy) \
91 V(NEONExtract) \
92 V(NEONLoadStoreMultiStruct) \
93 V(NEONLoadStoreMultiStructPostIndex) \
94 V(NEONLoadStoreSingleStruct) \
95 V(NEONLoadStoreSingleStructPostIndex) \
96 V(NEONModifiedImmediate) \
97 V(NEONScalar2RegMisc) \
98 V(NEONScalar3Diff) \
99 V(NEONScalar3Same) \
100 V(NEONScalarByIndexedElement) \
101 V(NEONScalarCopy) \
102 V(NEONScalarPairwise) \
103 V(NEONScalarShiftImmediate) \
104 V(NEONShiftImmediate) \
105 V(NEONTable) \
106 V(NEONPerm) \
108 #define VISITOR_LIST_THAT_DONT_RETURN(V) \
109 V(Unallocated) \
110 V(Unimplemented) \
112 #define VISITOR_LIST(V) \
113 VISITOR_LIST_THAT_RETURN(V) \
114 VISITOR_LIST_THAT_DONT_RETURN(V) \
116 namespace vixl {
118 // The Visitor interface. Disassembler and simulator (and other tools)
119 // must provide implementations for all of these functions.
120 class DecoderVisitor {
121 public:
122 enum VisitorConstness {
123 kConstVisitor,
124 kNonConstVisitor
126 explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
127 : constness_(constness) {}
129 virtual ~DecoderVisitor() {}
131 #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
132 VISITOR_LIST(DECLARE)
133 #undef DECLARE
135 bool IsConstVisitor() const { return constness_ == kConstVisitor; }
136 Instruction* MutableInstruction(const Instruction* instr) {
137 VIXL_ASSERT(!IsConstVisitor());
138 return const_cast<Instruction*>(instr);
141 private:
142 const VisitorConstness constness_;
146 class Decoder {
147 public:
148 Decoder() {}
150 // Top-level wrappers around the actual decoding function.
151 void Decode(const Instruction* instr) {
152 std::list<DecoderVisitor*>::iterator it;
153 for (it = visitors_.begin(); it != visitors_.end(); it++) {
154 VIXL_ASSERT((*it)->IsConstVisitor());
156 DecodeInstruction(instr);
158 void Decode(Instruction* instr) {
159 DecodeInstruction(const_cast<const Instruction*>(instr));
162 // Register a new visitor class with the decoder.
163 // Decode() will call the corresponding visitor method from all registered
164 // visitor classes when decoding reaches the leaf node of the instruction
165 // decode tree.
166 // Visitors are called in order.
167 // A visitor can be registered multiple times.
169 // d.AppendVisitor(V1);
170 // d.AppendVisitor(V2);
171 // d.PrependVisitor(V2);
172 // d.AppendVisitor(V3);
174 // d.Decode(i);
176 // will call in order visitor methods in V2, V1, V2, V3.
177 void AppendVisitor(DecoderVisitor* visitor);
178 void PrependVisitor(DecoderVisitor* visitor);
179 // These helpers register `new_visitor` before or after the first instance of
180 // `registered_visiter` in the list.
181 // So if
182 // V1, V2, V1, V2
183 // are registered in this order in the decoder, calls to
184 // d.InsertVisitorAfter(V3, V1);
185 // d.InsertVisitorBefore(V4, V2);
186 // will yield the order
187 // V1, V3, V4, V2, V1, V2
189 // For more complex modifications of the order of registered visitors, one can
190 // directly access and modify the list of visitors via the `visitors()'
191 // accessor.
192 void InsertVisitorBefore(DecoderVisitor* new_visitor,
193 DecoderVisitor* registered_visitor);
194 void InsertVisitorAfter(DecoderVisitor* new_visitor,
195 DecoderVisitor* registered_visitor);
197 // Remove all instances of a previously registered visitor class from the list
198 // of visitors stored by the decoder.
199 void RemoveVisitor(DecoderVisitor* visitor);
201 #define DECLARE(A) void Visit##A(const Instruction* instr);
202 VISITOR_LIST(DECLARE)
203 #undef DECLARE
206 std::list<DecoderVisitor*>* visitors() { return &visitors_; }
208 private:
209 // Decodes an instruction and calls the visitor functions registered with the
210 // Decoder class.
211 void DecodeInstruction(const Instruction* instr);
213 // Decode the PC relative addressing instruction, and call the corresponding
214 // visitors.
215 // On entry, instruction bits 27:24 = 0x0.
216 void DecodePCRelAddressing(const Instruction* instr);
218 // Decode the add/subtract immediate instruction, and call the correspoding
219 // visitors.
220 // On entry, instruction bits 27:24 = 0x1.
221 void DecodeAddSubImmediate(const Instruction* instr);
223 // Decode the branch, system command, and exception generation parts of
224 // the instruction tree, and call the corresponding visitors.
225 // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
226 void DecodeBranchSystemException(const Instruction* instr);
228 // Decode the load and store parts of the instruction tree, and call
229 // the corresponding visitors.
230 // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
231 void DecodeLoadStore(const Instruction* instr);
233 // Decode the logical immediate and move wide immediate parts of the
234 // instruction tree, and call the corresponding visitors.
235 // On entry, instruction bits 27:24 = 0x2.
236 void DecodeLogical(const Instruction* instr);
238 // Decode the bitfield and extraction parts of the instruction tree,
239 // and call the corresponding visitors.
240 // On entry, instruction bits 27:24 = 0x3.
241 void DecodeBitfieldExtract(const Instruction* instr);
243 // Decode the data processing parts of the instruction tree, and call the
244 // corresponding visitors.
245 // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
246 void DecodeDataProcessing(const Instruction* instr);
248 // Decode the floating point parts of the instruction tree, and call the
249 // corresponding visitors.
250 // On entry, instruction bits 27:24 = {0xE, 0xF}.
251 void DecodeFP(const Instruction* instr);
253 // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
254 // and call the corresponding visitors.
255 // On entry, instruction bits 29:25 = 0x6.
256 void DecodeNEONLoadStore(const Instruction* instr);
258 // Decode the Advanced SIMD (NEON) vector data processing part of the
259 // instruction tree, and call the corresponding visitors.
260 // On entry, instruction bits 28:25 = 0x7.
261 void DecodeNEONVectorDataProcessing(const Instruction* instr);
263 // Decode the Advanced SIMD (NEON) scalar data processing part of the
264 // instruction tree, and call the corresponding visitors.
265 // On entry, instruction bits 28:25 = 0xF.
266 void DecodeNEONScalarDataProcessing(const Instruction* instr);
268 private:
269 // Visitors are registered in a list.
270 std::list<DecoderVisitor*> visitors_;
273 } // namespace vixl
275 #endif // VIXL_A64_DECODER_A64_H_