1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2008
21 * the Initial Developer. All Rights Reserved.
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
50 using namespace ActionBlockConstants
;
52 VarScopeCtx
* Ctx::findVarScope()
55 while (ctx
->tag
!= CTX_Function
&& ctx
->tag
!= CTX_ClassMethod
&& ctx
->tag
!= CTX_Program
)
57 return (VarScopeCtx
*)ctx
;
60 const Cogen::BinopMapping
Cogen::binopMapping
[] = {
62 {0, OP_astypelate
}, // OPR_as
63 {0, OP_add
}, // OPR_plus,
64 {0, 0}, // OPR_assign,
67 {0, OP_subtract
}, // OPR_minus,
68 {0, OP_multiply
}, // OPR_multiply,
69 {0, OP_divide
}, // OPR_divide,
70 {0, OP_modulo
}, // OPR_remainder,
71 {0, OP_lshift
}, // OPR_leftShift,
72 {0, OP_rshift
}, // OPR_rightShift,
73 {0, OP_urshift
}, // OPR_rightShiftUnsigned,
74 {0, OP_instanceof
}, // OPR_instanceof
76 {0, OP_istypelate
}, // OPR_is
77 {0, OP_bitand
}, // OPR_bitwiseAnd,
78 {0, OP_bitor
}, // OPR_bitwiseOr,
79 {0, OP_bitxor
}, // OPR_bitwiseXor,
80 {0, 0}, // OPR_logicalAnd,
81 {0, 0}, // OPR_logicalOr,
82 {0, OP_lessthan
}, // OPR_less,
83 {0, OP_lessequals
}, // OPR_lessOrEqual,
84 {0, OP_greaterthan
}, // OPR_greater,
85 {0, OP_greaterequals
}, // OPR_greaterOrEqual,
86 {0, OP_equals
}, // OPR_equal,
87 {1, OP_equals
}, // OPR_notEqual,
88 {0, OP_strictequals
}, // OPR_strictEqual,
89 {1, OP_strictequals
}, // OPR_strictNotEqual
92 Cogen::Cogen(Compiler
*compiler
, ABCFile
* abc
, ABCTraitsTable
* traits
, ABCMethodBodyInfo
* body
, uint32_t first_temp
)
95 , allocator(compiler
->allocator
)
96 , code(compiler
->allocator
)
97 , labels(compiler
->allocator
)
102 , temp_counter(first_temp
)
104 , need_activation(false)
111 uint8_t* Cogen::serializeCodeBytes(uint8_t* b
) const
115 return b
+ code
.size();
118 /* Stack height tracking is "simplest possible": we assume that even for
119 * unconditional branches the stack height in the taken branch and in the
120 * untaken (impossible) branch are the same; this means that anyone jumping
121 * to a label following a jump must be careful to match the stack height.
122 * This does not seem to be an issue in practice; the verifier keeps us
123 * honest; and we don't need to track the expected stack height at every
124 * label. Also, we're not interested in the actual stack height everywhere,
125 * just a conservative approximation to it.
127 void Cogen::stackMovement(AbcOpcode opcode
)
129 stack_depth
= stack_depth
+ opcodeInfo
[opcode
].stack
;
130 AvmAssert((int32_t)stack_depth
>= 0);
131 if (stack_depth
> max_stack_depth
)
132 max_stack_depth
= stack_depth
;
135 void Cogen::stackMovement(AbcOpcode opcode
, bool hasRTNS
, bool hasRTName
, uint32_t pops
)
137 stack_depth
= stack_depth
+ opcodeInfo
[opcode
].stack
- hasRTNS
- hasRTName
- pops
;
138 AvmAssert((int32_t)stack_depth
>= 0);
139 if (stack_depth
> max_stack_depth
)
140 max_stack_depth
= stack_depth
;
143 void Cogen::emitOp(AbcOpcode opcode
)
145 code
.emitU8((uint8_t)opcode
);
146 stackMovement(opcode
);
149 void Cogen::emitOpU30(AbcOpcode opcode
, uint32_t u30
)
151 code
.emitU8((uint8_t)opcode
);
153 stackMovement(opcode
);
156 void Cogen::emitOpU30Special(AbcOpcode opcode
, uint32_t u30
, uint32_t pops
)
158 code
.emitU8((uint8_t)opcode
);
160 stackMovement(opcode
, false, false, pops
);
163 void Cogen::emitOpU30U30(AbcOpcode opcode
, uint32_t u30_1
, uint32_t u30_2
)
165 code
.emitU8((uint8_t)opcode
);
168 stackMovement(opcode
);
171 void Cogen::emitOpU8(AbcOpcode opcode
, uint8_t b
)
173 code
.emitU8((uint8_t)opcode
);
175 stackMovement(opcode
);
178 void Cogen::emitOpS8(AbcOpcode opcode
, int8_t b
)
180 code
.emitU8((uint8_t)opcode
);
182 stackMovement(opcode
);
185 void Cogen::I_getlocal(uint32_t index
) {
187 emitOp((AbcOpcode
)(OP_getlocal0
+ index
));
189 emitOpU30(OP_getlocal
, index
);
192 void Cogen::I_setlocal(uint32_t index
) {
194 emitOp((AbcOpcode
)(OP_setlocal0
+ index
));
196 emitOpU30(OP_setlocal
, index
);
199 void Cogen::I_debugfile(uint32_t index
)
201 if (compiler
->debugging
)
202 emitOpU30(OP_debugfile
, index
);
205 void Cogen::I_debugline(uint32_t linenum
)
207 if (compiler
->debugging
&& linenum
> last_linenum
)
209 last_linenum
= linenum
;
210 emitOpU30(OP_debugline
, linenum
);
214 void Cogen::callMN(AbcOpcode opcode
, uint32_t index
, uint32_t nargs
) {
215 code
.emitU8((uint8_t)opcode
);
218 stackMovement(opcode
, abc
->hasRTNS(index
), abc
->hasRTName(index
), nargs
);
221 void Cogen::propU30(AbcOpcode opcode
, uint32_t index
)
223 code
.emitU8((uint8_t)opcode
);
225 stackMovement(opcode
, abc
->hasRTNS(index
), abc
->hasRTName(index
), 0);
228 Label
* Cogen::newLabel()
230 Label
* l
= ALLOC(Label
, ());
235 void Cogen::emitJump(AbcOpcode opcode
, Label
* label
)
237 // OPTIMIZEME: don't need to register backpatches for branches to known labels.
238 code
.emitU8((uint8_t)opcode
);
240 stackMovement(opcode
);
241 label
->backpatches
= ALLOC(Seq
<uint32_t>, (code
.size() - 3, label
->backpatches
));
244 void Cogen::I_label(Label
* label
)
246 AvmAssert(label
->address
== ~0U);
247 label
->address
= code
.size();
248 code
.emitU8((uint8_t)OP_label
);
251 // The location to be patched must contain a signed adjustment that will be
252 // added to the offset value. For regular jump instructions this should be '3',
253 // because the jump is relative to the end of the instruction - 3 bytes after
254 // the address of the offset field. For lookupswitch it is a value that depends
255 // on the location within the lookupswitch instruction of the offset word,
256 // because the jump is relative to the start of the instruction.
258 void Cogen::fixupBackpatches(uint8_t* b
) const
260 for ( Seq
<Label
*>* labels
= this->labels
.get() ; labels
!= NULL
; labels
= labels
->tl
) {
261 uint32_t addr
= labels
->hd
->address
;
262 bool backward
= false;
263 AvmAssert(addr
!= ~0U);
264 for ( Seq
<uint32_t>* backpatches
= labels
->hd
->backpatches
; backpatches
!= NULL
; backpatches
= backpatches
->tl
) {
265 uint32_t loc
= backpatches
->hd
;
266 int32_t adjustment
= readS24(b
+ loc
);
267 int32_t offset
= (int32_t)(addr
- (loc
+ adjustment
));
268 backward
= backward
|| offset
< 0;
269 emitS24(b
+ loc
, offset
);
272 // Work around verifier bug: if a branch to this label is never a backward
273 // branch then replace OP_label with OP_nop. The verifier always assumes
274 // that OP_label is the target of a backward branch.
280 uint32_t Cogen::emitException(uint32_t from
, uint32_t to
, uint32_t target
, uint32_t type
, uint32_t name_index
)
282 return body
->exceptions
.addAtEnd(ALLOC(ABCExceptionInfo
, (from
, to
, target
, type
, name_index
)));
285 uint32_t Cogen::emitTypeName(Compiler
* compiler
, QualifiedName
* t
)
287 ABCFile
* abc
= &compiler
->abc
;
290 uint32_t ns
= compiler
->NS_public
;
291 if (t
->qualifier
!= NULL
) {
292 AvmAssert(t
->qualifier
->tag() == TAG_simpleName
);
293 ns
= abc
->addNamespace(CONSTANT_Namespace
, abc
->addString(((SimpleName
*)t
->qualifier
)->name
));
295 AvmAssert(t
->name
->tag() == TAG_simpleName
);
296 return abc
->addQName(ns
, abc
->addString(((SimpleName
*)t
->name
)->name
));
299 void Cogen::I_lookupswitch(Label
* default_label
, Label
** case_labels
, uint32_t ncases
)
301 AvmAssert( ncases
> 0 );
302 AvmAssert( default_label
!= NULL
);
303 // AvmAssert( forall c in case_labels c != NULL );
305 uint32_t here
= code
.size();
306 code
.emitU8((uint8_t)OP_lookupswitch
);
307 code
.emitS24((int32_t)(here
- code
.size()));
308 default_label
->backpatches
= ALLOC(Seq
<uint32_t>, (code
.size() - 3, default_label
->backpatches
));
309 code
.emitU30(ncases
- 1);
310 for ( uint32_t i
=0 ; i
< ncases
; i
++ ) {
311 Label
* label
= case_labels
[i
];
312 code
.emitS24((int32_t)(here
- code
.size()));
313 label
->backpatches
= ALLOC(Seq
<uint32_t>, (code
.size() - 3, label
->backpatches
));
315 stackMovement(OP_lookupswitch
);
318 void FunctionDefn::cogenGuts(Compiler
* compiler
, Ctx
* ctx
, ABCMethodInfo
** info
, ABCMethodBodyInfo
** body
)
320 Allocator
* allocator
= compiler
->allocator
;
321 ABCFile
* abc
= &compiler
->abc
;
322 ABCTraitsTable
* traits
;
323 Str
* name
= this->name
;
325 name
= compiler
->SYM_anonymous
;
327 SeqBuilder
<uint32_t> param_types(allocator
);
328 SeqBuilder
<DefaultValue
*> default_values(allocator
);
329 uint32_t numdefaults
= 0;
330 for ( Seq
<FunctionParam
*>* params
= this->params
; params
!= NULL
; params
= params
->tl
) {
331 param_types
.addAtEnd(Cogen::emitTypeName(compiler
, params
->hd
->type_name
));
332 if (params
->hd
->default_value
!= NULL
) {
333 Expr
* dv
= params
->hd
->default_value
;
337 case TAG_literalString
:
339 cv
= abc
->addString(((LiteralString
*)dv
)->value
);
341 case TAG_literalUInt
:
343 cv
= abc
->addUInt(((LiteralUInt
*)dv
)->value
);
347 cv
= abc
->addInt(((LiteralInt
*)dv
)->value
);
349 case TAG_literalDouble
:
350 case TAG_literalBoolean
:
351 ct
= CONSTANT_Double
;
352 cv
= abc
->addDouble(((LiteralDouble
*)dv
)->value
);
353 if (((LiteralBoolean
*)dv
)->value
)
358 case TAG_literalNull
:
362 // EXTENDME: we can sort-of support arbitrary default values here if we want to.
364 // AS3 does not support default value other than the six cases above. Doing better
367 // We can use one of the obscure namespace default values as a placeholder, then
368 // generate code to test for that value and compute the correct default value.
369 // But the signature of the function won't be right; the type of the argument
370 // must be '*'. May be close enough, as long as we assign a provided argument
371 // value to a typed slot and get a type check on entry.
372 compiler
->syntaxError(params
->hd
->default_value
->pos
, SYNTAXERR_IMPOSSIBLE_DEFAULT
);
375 default_values
.addAtEnd(ALLOC(DefaultValue
, (ct
, cv
)));
378 *info
= ALLOC(ABCMethodInfo
, (compiler
, abc
->addString(name
), numparams
, param_types
.get(), numdefaults
, default_values
.get(), Cogen::emitTypeName(compiler
, return_type_name
)));
379 traits
= ALLOC(ABCTraitsTable
, (compiler
));
380 *body
= ALLOC(ABCMethodBodyInfo
, (compiler
, *info
, traits
, 1 + numparams
+ (uses_arguments
|| (rest_param
!= NULL
))));
382 cogen(&(*body
)->cogen
, ctx
);
385 AvmAssert( !(uses_arguments
&& (rest_param
!= NULL
)) );
387 flags
|= abcMethod_NEED_ARGUMENTS
;
388 if (rest_param
!= NULL
)
389 flags
|= abcMethod_NEED_REST
;
391 flags
|= abcMethod_SETS_DXNS
;
392 (*info
)->setFlags((uint8_t)((*body
)->getFlags() | flags
));
395 void CodeBlock::cogen(Cogen
* cogen
, Ctx
* ctx
)
397 Compiler
* compiler
= cogen
->compiler
;
398 ABCFile
* abc
= cogen
->abc
;
399 uint32_t activation
= 0; // 0 means "unallocated"
400 FunctionDefn
* fn
= NULL
;
402 if (tag
== CODE_Function
)
403 fn
= (FunctionDefn
*)this;
405 cogen
->I_debugfile(cogen
->emitString(compiler
->str_filename
));
407 if (tag
== CODE_Program
) {
408 cogen
->I_getlocal(0);
409 cogen
->I_pushscope();
412 if (fn
&& (fn
->bindings
!= NULL
|| fn
->uses_arguments
)) {
413 activation
= cogen
->getTemp();
414 cogen
->I_newactivation();
416 cogen
->I_setlocal(activation
);
417 cogen
->I_pushscope();
420 for ( Seq
<Binding
*>* bindings
= this->bindings
; bindings
!= NULL
; bindings
= bindings
->tl
) {
421 Binding
* b
= bindings
->hd
;
422 uint32_t id
= abc
->addQName(compiler
->NS_public
, cogen
->emitString(b
->name
));
423 uint32_t type_id
= cogen
->emitTypeName(compiler
, b
->type_name
);
424 switch (bindings
->hd
->kind
) {
425 case TAG_namespaceBinding
: // FIXME: namespace bindings should be const, but the VM does not allow TAG_constBinding
427 cogen
->emitSlotTrait(id
, type_id
);
429 case TAG_constBinding
:
430 cogen
->emitConstTrait(id
, type_id
);
433 compiler
->internalError(0, "Unknown binding tag");
437 for ( Seq
<NamespaceDefn
*>* namespaces
= this->namespaces
; namespaces
!= NULL
; namespaces
= namespaces
->tl
) {
438 uint32_t id
= abc
->addQName(compiler
->NS_public
, cogen
->emitString(namespaces
->hd
->name
));
440 Expr
* value
= namespaces
->hd
->value
;
442 ns
= abc
->addNamespace(CONSTANT_Namespace
, cogen
->emitString(compiler
->intern(compiler
->namespace_counter
++)));
443 else if (value
->tag() == TAG_literalString
)
444 ns
= abc
->addNamespace(CONSTANT_ExplicitNamespace
, cogen
->emitString(((LiteralString
*)value
)->value
));
445 if (tag
== CODE_Program
)
446 cogen
->I_getlocal(0);
448 AvmAssert(activation
!= 0);
449 cogen
->I_getlocal(activation
);
452 cogen
->I_pushnamespace(ns
);
454 // FIXME: semantic check for namespaces.
455 // Check that the name on the RHS is actually a ns
456 // Code is the same as for 'use default namespace'
457 // If we can't tell (name may be shadowed?) then
458 // emit code that checks at run-time. If we can tell,
459 // then don't emit code for looking it up at run-time
460 // here, but just reference the definition of the other
461 // binding? (That's an optimization.)
462 //value->cogen(cogen);
463 compiler
->internalError(0, "Namespace should have been resolved before code generation");
465 cogen
->I_initproperty(id
);
470 Seq
<FunctionParam
*>* params
=fn
->params
;
471 for ( ; params
!= NULL
; params
= params
->tl
, i
++ ) {
472 uint32_t id
= abc
->addQName(compiler
->NS_public
, cogen
->emitString(params
->hd
->name
));
473 AvmAssert(activation
!= 0);
474 cogen
->I_getlocal(activation
);
475 cogen
->I_getlocal(i
);
476 cogen
->I_setproperty(id
);
478 if (fn
->uses_arguments
|| fn
->rest_param
) {
479 AvmAssert(activation
!= 0);
480 cogen
->I_getlocal(activation
);
481 cogen
->I_getlocal(i
);
482 if (fn
->uses_arguments
)
483 cogen
->I_setproperty(abc
->addQName(compiler
->NS_public
, cogen
->emitString(compiler
->SYM_arguments
)));
485 cogen
->I_setproperty(abc
->addQName(compiler
->NS_public
, cogen
->emitString(fn
->rest_param
->name
)));
489 for ( Seq
<FunctionDefn
*>* functions
= this->functions
; functions
!= NULL
; functions
= functions
->tl
) {
490 FunctionDefn
* func
= functions
->hd
;
491 ABCMethodInfo
* fn_info
;
492 ABCMethodBodyInfo
* fn_body
;
493 func
->cogenGuts(compiler
, ctx
, &fn_info
, &fn_body
);
494 uint32_t fname
= abc
->addQName(compiler
->NS_public
, cogen
->emitString(func
->name
));
495 cogen
->I_getlocal(activation
);
496 cogen
->I_newfunction(fn_info
->index
);
497 cogen
->I_setproperty(fname
);
500 cogenBody(cogen
, ctx
, activation
);
503 uint32_t Cogen::buildNssetWithPublic(Seq
<Namespace
*>* ns
)
505 SeqBuilder
<uint32_t> s(allocator
);
506 s
.addAtEnd(compiler
->NS_public
);
508 if (ns
->hd
->tag() != TAG_commonNamespace
)
509 compiler
->internalError(0, "Namespace should have been resolved before now.");
510 CommonNamespace
* cns
= (CommonNamespace
*)ns
->hd
;
511 s
.addAtEnd(abc
->addNamespace(CONSTANT_Namespace
, abc
->addString(cns
->name
)));
514 return abc
->addNsset(s
.get());