1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2010
21 * the Initial Developer. All Rights Reserved.
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
41 #include "MethodInfo.h"
45 #include "CdeclThunk.h"
49 // stores arbitrary Avm values without allocation
53 enum { _tagBits
= 3 };
54 double _d
__attribute__((aligned(16)));
64 return kDoubleType
| (Atom
)dblPtr();
73 Atom
get(Toplevel
* toplevel
, Traits
* t
)
75 AvmCore
* core
= toplevel
->core();
77 BuiltinType bt
= Traits::getBuiltinType(t
);
79 if ((bt
== BUILTIN_any
|| bt
== BUILTIN_object
) && _a
== dblAtom())
80 _a
= core
->doubleToAtom(*dblPtr());
82 // should not be in here if we want a void return type.
83 AvmAssert(bt
!= BUILTIN_void
);
90 return (Atom
)AvmCore::integer(_a
);
92 return (Atom
)AvmCore::toUInt32(_a
);
94 return (Atom
)AvmCore::boolean(_a
);
96 return (Atom
)core
->coerce_s(_a
);
97 case BUILTIN_namespace
:
98 if (atomKind(_a
) == kNamespaceType
)
99 return (Atom
)core
->atomToNamespace(_a
);
100 AvmAssert(AvmCore::isNullOrUndefined(_a
));
103 return AvmCore::isNullOrUndefined(_a
) ? nullObjectAtom
: _a
;
105 AvmAssert(t
!= NUMBER_TYPE
); // use getDouble
106 return (Atom
)AvmCore::atomToScriptObject(toplevel
->coerce(_a
, t
));
112 return AvmCore::number(_a
);
115 void set(Atom a
, Traits
* t
)
120 switch(Traits::getBuiltinType(t
))
123 AvmAssert(a
== AtomConstants::undefinedAtom
);
124 _a
= AtomConstants::undefinedAtom
;
130 setUint((uintptr_t)a
);
132 case BUILTIN_boolean
:
133 _a
= a
? trueAtom
: falseAtom
;
136 _a
= a
? ((Stringp
)a
)->atom() : nullStringAtom
;
138 case BUILTIN_namespace
:
139 _a
= a
? ((Namespace
*)a
)->atom() : nullNsAtom
;
142 if (atomKind(a
) == kUnusedAtomTag
)
143 _a
= a
? ((ScriptObject
*)a
)->atom() : nullObjectAtom
;
148 AvmAssert(Traits::getBuiltinType(t
) != BUILTIN_number
);
149 _a
= a
? ((ScriptObject
*)a
)->atom() : nullObjectAtom
;
154 void setInt(intptr_t i
)
156 intptr_t iwt
= i
<< _tagBits
;
157 if ((iwt
>> _tagBits
) == i
) {
158 _a
= (iwt
| kIntptrType
);
165 void setUint(uintptr_t u
)
167 if (u
& (~(static_cast<uintptr_t>(-1) >> (_tagBits
+ 1)))) {
171 _a
= (u
<< 3) | kIntptrType
;
175 void setDouble(double d
)
182 enum // we try to stick to 4 bits here instead of the 5 in BUILTIN_xxx
202 static Traits
* argTraitsFromType(const AvmCore
* core
, int32_t n
)
206 case kOBJECT
: return OBJECT_TYPE
;
207 case kCLASS
: return CLASS_TYPE
;
208 case kFUNCTION
: return FUNCTION_TYPE
;
209 case kARRAY
: return ARRAY_TYPE
;
210 case kSTRING
: return STRING_TYPE
;
211 case kNUMBER
: return NUMBER_TYPE
;
212 case kINT
: return INT_TYPE
;
213 case kUINT
: return UINT_TYPE
;
214 case kBOOLEAN
: return BOOLEAN_TYPE
;
215 case kVOID
: return VOID_TYPE
;
216 case kANY
: return NULL
;
217 case kNAMESPACE
: return NAMESPACE_TYPE
;
218 case kVECTORINT
: return VECTORINT_TYPE
;
219 case kVECTORUINT
: return VECTORUINT_TYPE
;
220 case kVECTORDOUBLE
: return VECTORDOUBLE_TYPE
;
221 case kVECTOROBJ
: return VECTOROBJ_TYPE
;
223 AvmAssert(false); // shouldn't happen...
227 // iterates over callee types for a method's signature
228 class MethodSigArgDescIter
232 const MethodInfo
* m_methInfo
;
233 const MethodSignature
* m_methSig
;
236 MethodSigArgDescIter(MethodInfo
* methInfo
) : m_n(0), m_methInfo(methInfo
), m_methSig(methInfo
->getMethodSignature())
242 if (m_n
<= m_methSig
->param_count())
243 return m_methSig
->paramTraits(m_n
++);
244 AvmCore
* core
= m_methInfo
->pool()->core
;
250 return m_methInfo
->needRest() ? true : false;
255 return m_methInfo
->needArguments() ? true : false;
258 bool needOptionalArgs()
269 typedef struct _APType
* APType
; // type doesn't matter.. just don't clash w/ va_list
271 class APArgDescIter
: public MethodSigArgDescIter
277 APArgDescIter(int argc
, MethodInfo
* mi
) : MethodSigArgDescIter(mi
), m_argc(argc
)
280 if (argc
>= 0 && !m_methSig
->argcOk(argc
))
282 AvmCore
* core
= m_methInfo
->pool()->core
;
284 core
->console
<< "argc bad: " << m_methInfo
->format(core
) << " : " << argc
<< "\n";
287 AvmAssert(argc
< 0 || m_methSig
->argcOk(argc
));
292 AvmCore
* core
= m_methInfo
->pool()->core
;
294 core
->console
<< "APArgDescIter::nextType() m_n: " << m_n
<< "\n";
296 if (m_argc
< 0 || m_n
<= m_argc
)
298 if (m_n
<= m_methSig
->param_count())
299 return MethodSigArgDescIter::nextType();
319 bool needOptionalArgs()
324 bool isVarArg() // can just keep pushing atoms
330 class AtomvArgDescIter
336 public: // TODO is this counting right?
337 AtomvArgDescIter(AvmCore
* core
, int argc
= -2) : m_core(core
), m_argc(argc
) {}
342 return NULL
; // all atoms all the time!
343 else if (m_argc
>= 0)
348 AvmCore
* core
= m_core
;
362 bool needOptionalArgs()
373 // read 4 bit types out of a uintptr_t bitwise
374 // 32 bits holds 7 argument descriptions (plus ret type)
375 // 64 bits can hold 15 (plus ret type)
383 ImmArgDescIter(uintptr_t argDesc
, AvmCore
* core
) : m_argDesc(argDesc
), m_core(core
)
387 // iterates over the types in the argument descriptor
388 // VOID signals end of arguments; it is invalid to call nextType again after
392 return argTraitsFromType(m_core
, nextTypeKind());
395 unsigned nextTypeKind()
397 unsigned type
= (unsigned)(m_argDesc
>> (sizeof(m_argDesc
) * 8 - 4));
412 bool needOptionalArgs()
423 // read types out of an unsigned char*
427 // pointer to next byte of arg description
429 // currently loaded bits of arg description
430 unsigned char m_bitBuf
;
431 // number of valid bits in m_bitBuf
432 unsigned char m_bits
;
436 PtrArgDescIter(void* argDesc
, AvmCore
* core
) : m_p((unsigned char* )argDesc
), m_bitBuf(0), m_bits(0), m_core(core
)
440 // iterates over the types in the argument descriptor
441 // VOID signals end of arguments; it is invalid to call nextType again after
445 return argTraitsFromType(m_core
, nextTypeKind());
448 unsigned nextTypeKind()
456 unsigned type
= m_bitBuf
>> 4;
473 bool needOptionalArgs()
484 // lay out an argument description
485 // on x86-32, an argument description is exactly a stack layout
486 // on ARM, the first 4 words are r0-r3 and the rest is exactly a stack layout
487 // on x86-64, it will no doubt be more complicated...
497 // NULL dst is legal for measuring a call layout
498 ArgDescLayout(void* dst
) : m_dst(dst
)
500 , m_minDst((void*)((uintptr_t)dst
+ 16)) // must make room for a1-a4 always
504 AvmAssert(!(7 & (uintptr_t)dst
));
506 AvmAssert(!(15 & (uintptr_t)dst
));
513 return (m_dst
> m_minDst
) ? m_dst
: m_minDst
;
519 // TODO really just platform int? that's what AtomMethodProc style uses
522 int32_t* result
= (int32_t*)m_dst
;
523 m_dst
= (void*)(4 + (uintptr_t)m_dst
);
529 // TODO: doubles on ARM may need to be aligned, but because we have only
530 // two arg sizes (4 and 8 bytes) it won't affect us right now
531 double* result
= (double* )m_dst
;
532 m_dst
= (void*)(8 + (uintptr_t)m_dst
);
538 void** result
= (void**)m_dst
;
539 m_dst
= (void*)(4 + (uintptr_t)m_dst
);
544 // calculate the size required for the given set of argument types for a cdecl call
545 // including MethodEnv
546 template <class ARG_TYPE_ITER
> static int32_t argDescSize(ARG_TYPE_ITER calleeTypeIter
, AvmCore
*)
548 ArgDescLayout
l(NULL
);
552 Traits
* t
= calleeTypeIter
.nextType();
554 BuiltinType bt
= Traits::getBuiltinType(t
);
556 if (bt
== BUILTIN_void
) break;
558 switch(Traits::getBuiltinType(t
))
562 case BUILTIN_boolean
:
573 if (calleeTypeIter
.needArguments() || calleeTypeIter
.hasRest())
576 l
.int32Arg(); // argc
579 l
.ptrArg(); // MethodEnv
580 return (int32_t)(uintptr_t)l
.argEnd();
583 // calculate the size required for the given set of argument types passed "ap" style
584 // just figures out the size of the ap struct... doesn't include MethodEnv
585 template <class ARG_TYPE_ITER1
, class ARG_TYPE_ITER2
> static int32_t apArgDescSize(ARG_TYPE_ITER1 callerTypeIter
, ARG_TYPE_ITER2 calleeTypeIter
, AvmCore
* core
)
589 // count "regular" arguments... stored as unaligned native
592 Traits
* t
= calleeTypeIter
.nextType();
594 BuiltinType bt
= Traits::getBuiltinType(t
);
596 // no more "regular" arguments? break out
597 if (bt
== BUILTIN_void
) break;
599 // no more caller arguments? done
600 if (callerTypeIter
.nextType() == VOID_TYPE
)
603 switch(Traits::getBuiltinType(t
))
607 case BUILTIN_boolean
:
608 size
+= sizeof(int32_t);
611 size
+= sizeof(double);
614 size
+= sizeof(void*);
618 // count the rest as pointers (Atoms)
619 while (callerTypeIter
.nextType() != VOID_TYPE
)
620 size
+= sizeof(void*);
624 #if defined(AVMPLUS_IA32)
625 // pseudo-cross-compiler macros for inline assembly...
626 // gcc/x86 in particular isn't really production grade
628 // msvc stuff works well inline...
629 #define ASM_FUNC_BEGIN(R, N, A) static R __declspec(naked) N A {
630 #define ASM_FUNC_END(N) }
631 #define ASM1(X) __asm { X }
632 #define ASM2(X,Y) __asm { X,Y }
633 #define ASM_CALL(X) __asm { call X }
634 #define ASM_REDIR(F) __asm { jmp F }
636 // gcc doesn't support naked inline functions despite the clear use for them
637 // this stuff is hackery to help development -- gcc targets should have .s files for production
638 #define ASM_FUNC_BEGIN(R, N, A) typedef R (* N##_type)A; static void* N##_container () { \
640 __asm__ (" mov $L"#N"_astart, %[result]" : [result] "=r" (result)); \
642 __asm__ (" .intel_syntax noprefix "); \
643 __asm__ ("L"#N"_astart: ");
644 #define ASM_FUNC_END(N) __asm__ (" .att_syntax noprefix "); ret: if (result == 0) goto asmlbl; return result; } N##_type N = (N##_type)N##_container();
645 #define ASM1(X) __asm__ ( #X );
646 #define ASM2(X,Y) __asm__ ( #X","#Y );
647 #define ASM_CALL(X) __asm__ ("call _"#X"\n");
648 #define ASM_REDIR(F) __asm__ ( "push [_"#F"]\n ret");
650 #elif defined(AVMPLUS_ARM)
651 // gcc doesn't support naked inline functions despite the clear use for them
652 // this stuff is hackery to help development -- gcc targets should have .s files for production
653 #define ASM_BP __asm__("stmdb sp!, {a1, a2, a3, a4}\n swi 0x80\n ldmia sp!, {a1, a2, a3, a4}")
654 #define ASM_FUNC_BEGIN(R, N, A) \
656 __asm__(".section __TEXT,__text,regular,pure_instructions"); \
657 __asm__(".align 2"); \
658 __asm__(".globl _"#N" "); \
660 #define ASM_FUNC_END(N)
661 #define ASM_REDIR(F) __asm__ ( "b _"#F" ");
665 // prototype for an argument coercer
666 // env is MethodEnv coercing for
667 // callerArgDesc is an opaque description of the arguments to be found via callerArgDesc
668 // callerAp is the variadic args passed to coerce32CdeclImmArgDesc, et al
669 // calleeArgDescBuf is the argument buffer into which coerced arguments are writter
670 // returns a pointer to a function used for return value conversion or NULL is none is needed
671 typedef void* (*ArgCoercer
)(void* callee
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
, void* calleeArgDescBuf
);
675 // core of the thunking mechanism...
676 // doesn't necessarily return an int32_t... the N/double variant is identical
677 // -- callee is the cdecl method to invoke
678 // -- calleeArgDescBufSize is the amount of space to allocate for the coerced args (and possibly register alloc info for, say, x86/64)
679 // -- argCoercer is a callback that does the actual filling of the coerced argument buf
680 // -- env is the MethodEnv we're calling (used to get argument traits -- does not make callee redundant as env may have >1 impl)
681 // -- calleeArgDesc is an opaque description of the types of variadic arguments in callerAp
682 // -- callerAp represents the arguments to coerce
683 ASM_FUNC_BEGIN(Atom
, coerce32CdeclShim
,
684 (void* callee
, unsigned calleeArgDescBufSize
, ArgCoercer argCoercer
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
))
686 __asm__("stmdb sp!, {v1, v2, v3, v7, lr}");
687 __asm__("mov v7, sp");
689 // a2 = calleeArgDescBufSize
693 // [v7, #4] = callerArgDesc
694 // [v7, #8] = callerAp
695 __asm__("sub sp, sp, a2"); // make room for args
696 __asm__("and sp, sp, #-8"); // double word align
697 __asm__("mov v2, a3"); // save off argCoercer
698 __asm__("mov v3, a4"); // save off env
701 __asm__("mov a2, a4"); // pass env
702 __asm__("ldr a3, [v7, #20]"); // pass retTraits
703 __asm__("ldr a4, [v7, #24]"); // pass callerArgDesc
704 __asm__("mov v1, sp"); // pass calleeArgDescBuf
705 __asm__("stmdb sp!, {v1}");
706 __asm__("ldr v1, [v7, #28]"); // pass callerAp
707 __asm__("stmdb sp!, {v1}");
708 __asm__("mov v1, a1"); // save off callee
709 __asm__("bl _via_v2"); // call coercer!
710 __asm__("add sp, sp, #8"); // restore stack
711 __asm__("mov v2, a1"); // remember returnCoercer
712 __asm__("ldmia sp!, {a1, a2, a3, a4}"); // move first 4 arg words into registers
713 __asm__("bl _via_v1"); // call the implementation!
714 __asm__("mov sp, v7"); // restore stack
715 __asm__("cmp v2, #0"); // maybe call returnCoercer -- a1 and a2 will be the double or a1 will be the 32 and a2 will be a dummy
716 __asm__("ldrne a3, [v7, #20]"); // retTraits
717 __asm__("movne a4, v3"); // env
718 __asm__("blne _via_v2");
719 __asm__("ldmia sp!, {v1, v2, v3, v7, pc}"); // done!
720 __asm__("_via_v1: bx v1");
721 __asm__("_via_v2: bx v2");
725 (void)calleeArgDescBufSize
;
736 // [ebp+4] = return address
738 // [ebp+12] = calleeArgDescBufSize
739 // [ebp+16] = argCoercer
741 // [ebp+24] = retTraits
742 // [ebp+28] = callerArgDesc
743 // [ebp+32] = callerAp
744 ASM2( sub esp
, [ebp
+12] ) // make room for args
745 ASM2( and esp
, 0xfffffff0 ) // 16 byte aligned
747 ASM2( sub esp
, 8 ) // 16 byte aligned for call
748 ASM1( push eax
) // calleeArgDescBuf
749 ASM1( push
[ebp
+32] ) // callerAp
750 ASM1( push
[ebp
+28] ) // callerArgDesc
751 ASM1( push
[ebp
+24] ) // retTraits
752 ASM1( push
[ebp
+20] ) // env
753 ASM1( push
[ebp
+8] ) // callee
754 ASM1( call
[ebp
+16] ) // map args: argCoercer(callee, env, retTraits, callerArgDesc, callerAp, callerArgDescBuf);
756 ASM2( mov esi
, eax
) // save result mapping func
757 ASM1( call
[ebp
+8] ) // call method
758 ASM2( lea esp
, [ebp
-4]) // restore stack
760 ASM1( je coerce32CdeclShim_done
)
761 ASM2( sub esp
, 8 ) // 16 byte aligned for call
762 ASM1( push
[ebp
+20] ) // env
763 ASM1( push
[ebp
+24] ) // retTraits
764 ASM1( call esi
) // map return value: retCoercer(retTraits, env)
766 ASM1(coerce32CdeclShim_done
:)
771 ASM_FUNC_END(coerce32CdeclShim
)
773 ASM_FUNC_BEGIN(double, coerceNCdeclShim
, (void* callee
,
774 unsigned calleeArgDescBufSize
, ArgCoercer argCoercer
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
))
777 (void)calleeArgDescBufSize
;
784 ASM_REDIR(coerce32CdeclShim
) // exact same impl
785 ASM_FUNC_END(coerceNCdeclShim
)
787 // Number => something
788 Atom
returnCoercerNImpl(double n
, Traits
* retTraits
, MethodEnv
* env
)
793 return v
.get(env
->toplevel(), retTraits
);
797 ASM_FUNC_BEGIN(Atom
, returnCoercerN
, (Traits
* retTraits
, MethodEnv
* env
))
799 __asm__("b _returnCoercerNImpl"); // straight through
801 ASM1( push ebp
) // this is necessary to keep pthreads happy!
804 ASM1( push
[ebp
+12]) // env
805 ASM1( push
[ebp
+8]) // retTraits
807 ASM1( fstp qword ptr
[esp
]) // callee will have left a value on the FP stack
808 ASM_CALL(returnCoercerNImpl
)
813 ASM_FUNC_END(returnCoercerN
)
816 ASM_FUNC_BEGIN(Atom
, returnCoercerNPop
, (Traits
* retTraits
, MethodEnv
* env
))
817 ASM1( push ebp
) // this is necessary to keep pthreads happy!
819 ASM1( fstp
st(0)) // callee will have left a value on the FP stack
822 ASM_FUNC_END(returnCoercerNPop
)
825 // something => Number
826 double returnCoercerN32Impl(Atom a
, Traits
* /*retTraits*/, MethodEnv
* env
)
828 Traits
* calleeRT
= env
->method
->getMethodSignature()->returnTraits();
833 return v
.getDouble();
837 ASM_FUNC_BEGIN(double, returnCoercerN32
, (Traits
* retTraits
, MethodEnv
* env
))
839 __asm__("mov a2, a3"); // a2 is a dummy
840 __asm__("mov a3, a4");
841 __asm__("b _returnCoercerN32Impl");
843 ASM1( push ebp
) // this is necessary to keep pthreads happy!
845 ASM1( push
[ebp
+12]) // env
846 ASM1( push
[ebp
+8]) // retTraits
848 ASM_CALL(returnCoercerN32Impl
) // will push something on the FP stack
853 ASM_FUNC_END(returnCoercerN32
)
855 // something => something
856 Atom
returnCoercer32Impl(Atom a
, Traits
* retTraits
, MethodEnv
* env
)
858 Traits
* calleeRT
= env
->method
->getMethodSignature()->returnTraits();
862 return v
.get(env
->toplevel(), retTraits
);
865 // some 32 => some 32
866 ASM_FUNC_BEGIN(Atom
, returnCoercer32
, (Traits
* retTraits
, MethodEnv
* env
))
868 __asm__("mov a2, a3"); // a2 is a dummy
869 __asm__("mov a3, a4");
870 __asm__("b _returnCoercer32Impl");
872 ASM1( push ebp
) // this is necessary to keep pthreads happy!
874 ASM1( push
[ebp
+12]) // env
875 ASM1( push
[ebp
+8]) // retTraits
877 ASM_CALL(returnCoercer32Impl
)
882 ASM_FUNC_END(returnCoercer32
)
886 // returns any function required to coerce the callee's return type to the
887 // caller's desired return type
888 static void* returnCoercer(AvmCore
* core
, Traits
* calleeRT
, Traits
* callerRT
)
890 // same or caller will discard? no conversion
891 if (callerRT
== calleeRT
)
893 if (callerRT
== VOID_TYPE
)
896 if (calleeRT
== NUMBER_TYPE
)
897 return (void*)returnCoercerNPop
;
902 // both integral types? no conversion
903 if ((callerRT
== INT_TYPE
|| callerRT
== UINT_TYPE
) &&
904 (calleeRT
== INT_TYPE
|| calleeRT
== UINT_TYPE
))
906 // is callee a double returner?
907 if (calleeRT
== NUMBER_TYPE
)
908 return (void*)returnCoercerN
; // Number => 32
910 if (callerRT
== NUMBER_TYPE
)
911 return (void*)returnCoercerN32
; // 32 => Number
913 return (void*)returnCoercer32
; // 32 => 32
916 static int32_t arg32(va_list& ap
)
918 return va_arg(ap
, int32_t); // x86-64?
921 static int32_t arg32(APType
& ap
)
923 int32_t result
= *(int32_t *)ap
;
924 ap
= (APType
)((uintptr_t)ap
+ sizeof(int32_t)); // x86-64?
928 static double argN(va_list& ap
)
930 return va_arg(ap
, double);
933 static double argN(APType
& ap
)
935 double result
= *(double*)ap
;
936 ap
= (APType
)((uintptr_t)ap
+ sizeof(double));
940 template <class ARG_ITER
> static Atom
coerceArgToAny(Toplevel
* toplevel
, ARG_ITER
& ap
, Traits
* callerT
)
942 AvmCore
* core
= toplevel
->core();
945 if (callerT
== NUMBER_TYPE
)
946 v
.setDouble(argN(ap
));
948 v
.set((Atom
)arg32(ap
), callerT
);
949 return v
.get(toplevel
, NULL
);
952 // coerces a single argument and writes it to an argument desc layout
953 template <class ARG_ITER
> static void coerceArg(Toplevel
* toplevel
, ArgDescLayout
& l
, Traits
* calleeT
, ARG_ITER
& callerAp
, Traits
* callerT
)
955 AvmCore
* core
= toplevel
->core();
957 if (calleeT
== callerT
&& calleeT
!= OBJECT_TYPE
) // OBJECT_TYPE might be a naked ScriptObject... let AvmValue handle it
959 if (calleeT
== NUMBER_TYPE
)
960 *l
.doubleArg() = argN(callerAp
);
962 *l
.int32Arg() = arg32(callerAp
);
964 else if (calleeT
== NUMBER_TYPE
)
967 v
.set((Atom
)arg32(callerAp
), callerT
);
968 *l
.doubleArg() = v
.getDouble();
970 else if (callerT
== NUMBER_TYPE
)
973 v
.setDouble(argN(callerAp
));
974 *l
.int32Arg() = v
.get(toplevel
, calleeT
);
979 v
.set((Atom
)arg32(callerAp
), callerT
);
980 *l
.int32Arg() = v
.get(toplevel
, calleeT
);
984 static void coerceArgAtom(Toplevel
* toplevel
, ArgDescLayout
& l
, Traits
* calleeT
, Atom a
)
986 APType ap
= (APType
)&a
;
987 coerceArg(toplevel
, l
, calleeT
, ap
, NULL
);
990 // coerces a single argument and writes it into an AtomList
991 static void coerceArg(Toplevel
* toplevel
, AtomList
&atoms
, Traits
* calleeT
, va_list& callerAp
, Traits
* callerT
)
993 AvmCore
* core
= toplevel
->core();
995 if (callerT
== NUMBER_TYPE
)
998 v
.setDouble(argN(callerAp
));
999 atoms
.add((Atom
)v
.get(toplevel
, calleeT
));
1004 v
.set((Atom
)arg32(callerAp
), callerT
);
1005 atoms
.add((Atom
)v
.get(toplevel
, calleeT
));
1009 static void coerceArgAtom(Toplevel
*, AtomList
&atoms
, Traits
* /*calleeT*/, Atom a
)
1014 // coerces a single argument and writes it to an "ap" style arg list
1015 static void coerceArg(Toplevel
* toplevel
, APType
& ap
, Traits
* calleeT
, va_list& callerAp
, Traits
* callerT
)
1017 AvmCore
* core
= toplevel
->core();
1019 if (calleeT
== callerT
&& calleeT
!= OBJECT_TYPE
) // OBJECT_TYPE might be a naked ScriptObject... let AvmValue handle it
1021 if (calleeT
== NUMBER_TYPE
)
1023 *(double* )ap
= argN(callerAp
);
1024 ap
= (APType
)(sizeof(double) + (uintptr_t)ap
);
1028 *(int32_t*)ap
= arg32(callerAp
);
1029 ap
= (APType
)(sizeof(int32_t) + (uintptr_t)ap
);
1032 else if (calleeT
== NUMBER_TYPE
)
1035 v
.set((Atom
)arg32(callerAp
), callerT
);
1036 *(double* )ap
= v
.getDouble();
1037 ap
= (APType
)(sizeof(double) + (uintptr_t)ap
);
1039 else if (callerT
== NUMBER_TYPE
)
1042 v
.setDouble(argN(callerAp
));
1043 *(int32_t*)ap
= v
.get(toplevel
, calleeT
);
1044 ap
= (APType
)(sizeof(int32_t) + (uintptr_t)ap
);
1049 v
.set((Atom
)arg32(callerAp
), callerT
);
1050 *(int32_t*)ap
= v
.get(toplevel
, calleeT
);
1051 ap
= (APType
)(sizeof(int32_t) + (uintptr_t)ap
);
1055 static void coerceArgAtomI(Toplevel
* toplevel
, APType
& ap
, Traits
* calleeT
, ...)
1059 va_start(va
, calleeT
);
1060 coerceArg(toplevel
, ap
, calleeT
, va
, NULL
);
1064 static void coerceArgAtom(Toplevel
* toplevel
, APType
& ap
, Traits
* calleeT
, Atom a
)
1066 coerceArgAtomI(toplevel
, ap
, calleeT
, a
);
1069 static void handleRest(Toplevel
*, ArgDescLayout
& l
, ArrayObject
*rest
)
1071 uint32_t argc
= rest
->getDenseLength();
1072 Atom
*argv
= rest
->getDenseCopy();
1074 *l
.ptrArg() = argv
; // TODO argv
1075 *l
.int32Arg() = argc
; // TODO argc
1076 *l
.ptrArg() = rest
; // rest
1079 static void handleRest(Toplevel
*, APType
&, ArrayObject
*)
1081 AvmAssert(false); // AP doesn't handle rest in the CC
1084 static void handleRest(Toplevel
*, AtomList
&, ArrayObject
*)
1089 // coerces a set of arguments and writes to a given argument description
1090 template <class ARG_TYPE_ITER1
, class ARG_ITER
, class ARG_TYPE_ITER2
, class ARG_WRITER
>
1091 static int32_t argCoerceLoop(MethodEnv
* env
, ARG_TYPE_ITER1 callerTypeIter
, ARG_ITER callerAp
,
1092 ARG_TYPE_ITER2 calleeTypeIter
, ARG_WRITER
&argDescWriter
)
1094 Toplevel
* toplevel
= env
->toplevel();
1095 MethodInfo
* info
= env
->method
;
1096 AvmCore
* core
= env
->core();
1100 ArrayObject
* argsOrRest
= NULL
;
1101 ArrayObject
* args
= NULL
; // "arguments"
1102 const MethodSignature
* ms
= env
->method
->getMethodSignature();
1104 // "arguments" captures all arguments
1105 if (calleeTypeIter
.needArguments())
1107 ARG_TYPE_ITER1 callerTypeIterTemp
= callerTypeIter
;
1108 ARG_ITER callerApTemp
= callerAp
;
1109 Traits
* callerT
= callerTypeIterTemp
.nextType();
1111 AvmAssert(callerT
!= VOID_TYPE
);
1112 Atom a
= coerceArgToAny(toplevel
, callerApTemp
, callerT
);
1113 args
= env
->createArguments(&a
, 0);
1118 Traits
* callerT
= callerTypeIter
.nextType();
1122 core
->console
<< " callerT: " << callerT
->formatClassName() << "\n";
1124 core
->console
<< " callerT: *\n";
1126 // no more params from caller? break out
1127 if (callerT
== VOID_TYPE
)
1130 Traits
* calleeT
= calleeTypeIter
.nextType();
1134 core
->console
<< " calleeT: " << calleeT
->formatClassName() << "\n";
1136 core
->console
<< " calleeT: *\n";
1139 // no more normal params for callee
1140 if (calleeT
== VOID_TYPE
)
1142 if (!ms
->allowExtraArgs())
1144 toplevel
->argumentErrorClass()->throwError(kWrongArgumentCountError
,
1145 core
->toErrorString(info
),
1146 core
->toErrorString(ms
->requiredParamCount()),
1147 core
->toErrorString(argc
));
1149 else // fill up rest/argument/var args
1151 // can we just keep pushing args? (i.e., "ap" style)
1152 if (calleeTypeIter
.isVarArg())
1155 core
->console
<< " argCoerceLoop: passing extra params as vararg\n";
1157 AvmAssert(!argsOrRest
); // shouldn't have rest or arguments if vararg
1160 // just keep writing "atom"s
1161 coerceArg(toplevel
, argDescWriter
, NULL
, callerAp
, callerT
);
1162 callerT
= callerTypeIter
.nextType();
1164 } while (callerT
!= VOID_TYPE
);
1168 // are we actually using the args?
1169 if (calleeTypeIter
.needArguments() || calleeTypeIter
.hasRest())
1172 core
->console
<< " argCoerceLoop: passing extra params as Array\n";
1175 argsOrRest
= args
? args
: toplevel
->arrayClass()->newArray(1);
1178 Atom a
= coerceArgToAny(toplevel
, callerAp
, callerT
);
1179 argsOrRest
->push(&a
, 1);
1180 callerT
= callerTypeIter
.nextType();
1181 } while (callerT
!= VOID_TYPE
);
1185 core
->console
<< " argCoerceLoop: discarding extra params\n";
1192 // copy arg into "arguments"
1195 ARG_ITER ap
= callerAp
;
1196 Atom a
= coerceArgToAny(toplevel
, ap
, callerT
);
1197 argsOrRest
->push(&a
, 1);
1199 else if (args
) // arguments doesn't take "this" so
1201 coerceArg(toplevel
, argDescWriter
, calleeT
, callerAp
, callerT
);
1205 if (calleeTypeIter
.needOptionalArgs())
1207 // deal with "optional" args
1208 int32_t regArgs
= ms
->param_count() + 1;
1209 int optNum
= argc
- regArgs
+ ms
->optional_count();
1211 if (optNum
< 0) // not enough non-optional arguments passed...
1212 toplevel
->argumentErrorClass()->throwError(kWrongArgumentCountError
,
1213 core
->toErrorString(info
),
1214 core
->toErrorString(ms
->requiredParamCount()),
1215 core
->toErrorString(argc
));
1217 while (argc
< regArgs
) // optional...
1219 Traits
* calleeT
= calleeTypeIter
.nextType();
1220 coerceArgAtom(toplevel
, argDescWriter
, calleeT
, ms
->getDefaultValue(optNum
++));
1224 // pass ArrayObject through
1225 if (calleeTypeIter
.needArguments() || calleeTypeIter
.hasRest())
1228 argsOrRest
= args
? args
: toplevel
->arrayClass()->newArray(0);
1229 handleRest(toplevel
, argDescWriter
, argsOrRest
);
1234 static void passBaseArgs(MethodEnv
* env
, va_list& callerAp
, ArgDescLayout
& l
)
1241 static void passBaseArgs(MethodEnv
* env
, APType
& callerAp
, ArgDescLayout
& l
)
1248 static void passTailArgs(MethodEnv
* env
, va_list& callerAp
, ArgDescLayout
& l
)
1253 // pass MethodEnv at the end
1257 static void passTailArgs(MethodEnv
* env
, APType
& callerAp
, ArgDescLayout
& l
)
1262 // pass MethodEnv at the end
1266 // coerces a set of variadic arguments to a cdecl arg description
1267 template <class ARG_TYPE_ITER
, class AP_TYPE
> static void* argCoercer(void* /*callee*/, MethodEnv
* env
, Traits
* callerRT
, ARG_TYPE_ITER callerTypeIter
, AP_TYPE callerAp
, void* calleeArgDescBuf
)
1269 ArgDescLayout
l(calleeArgDescBuf
);
1271 passBaseArgs(env
, callerAp
, l
);
1273 MethodInfo
* info
= env
->method
;
1274 MethodSigArgDescIter
calleeTypeIter(info
);
1275 Traits
* calleeRT
= info
->getMethodSignature()->returnTraits();
1276 AvmCore
* core
= info
->pool()->core
;
1277 argCoerceLoop(env
, callerTypeIter
, callerAp
, calleeTypeIter
, l
);
1279 core
->console
<< "argCoercer: " << info
->format(core
) << " ";
1281 core
->console
<< callerRT
->format(core
);
1283 core
->console
<< "*";
1284 core
->console
<< " -> ";
1286 core
->console
<< calleeRT
->format(core
);
1288 core
->console
<< "*";
1289 core
->console
<< "\n";
1292 passTailArgs(env
, callerAp
, l
);
1293 // return any return type Coercer
1294 return returnCoercer(core
, calleeRT
, callerRT
);
1297 // callerArgDesc is the number of atoms
1298 static void* atomvArgDescCoercer(void* callee
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
, void* calleeArgDescBuf
)
1300 AtomvArgDescIter
callerTypeIter(env
->core(), (int32_t)callerArgDesc
);
1302 AvmAssert(false); // TESTME -- AtomvArgDescIter off by one error or not
1303 return argCoercer(callee
, env
, retTraits
, callerTypeIter
, (APType
)callerAp
, calleeArgDescBuf
);
1306 // callerArgDesc is the number of args
1307 static void* apArgDescCoercer(void* callee
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
, void* calleeArgDescBuf
)
1309 APArgDescIter
callerTypeIter((int32_t)callerArgDesc
, env
->method
);
1311 return argCoercer(callee
, env
, retTraits
, callerTypeIter
, (APType
)callerAp
, calleeArgDescBuf
);
1314 // callerArgDesc is a pointer to a string of nybbles describing arg types
1315 static void* ptrArgDescCoercer(void* callee
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
, void* calleeArgDescBuf
)
1317 PtrArgDescIter
callerTypeIter((void*)callerArgDesc
, env
->core());
1319 return argCoercer(callee
, env
, retTraits
, callerTypeIter
, *(va_list*)callerAp
, calleeArgDescBuf
);
1322 // callerArgDesc is a value containing nybbles describing arg types
1323 static void* immArgDescCoercer(void* callee
, MethodEnv
* env
, Traits
* retTraits
, uintptr_t callerArgDesc
, void* callerAp
, void* calleeArgDescBuf
)
1325 ImmArgDescIter
callerTypeIter(callerArgDesc
, env
->core());
1327 return argCoercer(callee
, env
, retTraits
, callerTypeIter
, *(va_list*)callerAp
, calleeArgDescBuf
);
1330 // amount of stack space needed to call the given method cdecl style
1331 static int32_t argDescSize(MethodInfo
* info
)
1333 AvmCore
* core
= info
->pool()->core
;
1334 MethodSigArgDescIter
calleeTypeIter(info
);
1336 return argDescSize(calleeTypeIter
, core
);
1339 // calls "env" with supplied variadic arguments described by the "immediate" flavor of argument
1340 // description in argDesc
1341 // returns an int32_t value
1342 Atom
coerce32CdeclArgDescEnter(Traits
* retTraits
, uintptr_t argDesc
, MethodEnv
* env
, va_list ap
)
1344 MethodInfo
* info
= env
->method
;
1345 Atom result
= coerce32CdeclShim(
1346 (void*)info
->handler_function(), argDescSize(info
),
1347 immArgDescCoercer
, env
, retTraits
, argDesc
, &ap
);
1351 // calls "env" with supplied variadic arguments described by the "pointer" flavor of argument
1352 // description in argDesc
1353 // returns an int32_t value
1354 Atom
coerce32CdeclArgDescEnter(Traits
* retTraits
, char* argDesc
, MethodEnv
* env
, va_list ap
)
1356 MethodInfo
* info
= env
->method
;
1357 Atom result
= coerce32CdeclShim(
1358 (void*)info
->handler_function(), argDescSize(info
), ptrArgDescCoercer
, env
, retTraits
, (uintptr_t)argDesc
, &ap
);
1362 Atom
coerce32CdeclArgDescEnter(Traits
* retTraits
, MethodEnv
* env
, int argc
, Atom
* argv
)
1364 MethodInfo
* info
= env
->method
;
1365 Atom result
= coerce32CdeclShim(
1366 (void*)info
->handler_function(), argDescSize(info
), atomvArgDescCoercer
, env
, retTraits
, (uintptr_t)argc
, (void*)argv
);
1370 Atom
coerce32CdeclArgDescEnter(Traits
* retTraits
, MethodEnv
* env
, int argc
, uint32_t* ap
)
1372 MethodInfo
* info
= env
->method
;
1373 Atom result
= coerce32CdeclShim(
1374 (void*)info
->handler_function(), argDescSize(info
), apArgDescCoercer
, env
, retTraits
, (uintptr_t)argc
, (void*)ap
);
1378 // calls "env" with supplied variadic arguments described by the "immediate" flavor of argument
1379 // description in argDesc
1380 // returns a double value
1381 double coerceNCdeclArgDescEnter(uintptr_t argDesc
, MethodEnv
* env
, va_list ap
)
1383 MethodInfo
* info
= env
->method
;
1384 AvmCore
* core
= env
->core();
1386 double result
= coerceNCdeclShim(
1387 (void*)info
->handler_function(), argDescSize(info
),
1388 immArgDescCoercer
, env
, NUMBER_TYPE
, argDesc
, &ap
);
1392 // calls "env" with supplied variadic arguments described by the "pointer" flavor of argument
1393 // description in argDesc
1394 // returns a double value
1395 double coerceNCdeclArgDescEnter(char* argDesc
, MethodEnv
* env
, va_list ap
)
1397 MethodInfo
* info
= env
->method
;
1398 AvmCore
* core
= env
->core();
1400 double result
= coerceNCdeclShim(
1401 (void*)info
->handler_function(), argDescSize(info
), ptrArgDescCoercer
, env
, NUMBER_TYPE
, (uintptr_t)argDesc
, &ap
);
1405 double coerceNCdeclArgDescEnter(MethodEnv
* env
, int argc
, Atom
* argv
)
1407 MethodInfo
* info
= env
->method
;
1408 AvmCore
* core
= env
->core();
1410 double result
= coerceNCdeclShim(
1411 (void*)info
->handler_function(), argDescSize(info
), atomvArgDescCoercer
, env
, NUMBER_TYPE
, (uintptr_t)argc
, (void*)argv
);
1415 double coerceNCdeclArgDescEnter(MethodEnv
* env
, int argc
, uint32_t* ap
)
1417 MethodInfo
* info
= env
->method
;
1418 AvmCore
* core
= env
->core();
1419 double result
= coerceNCdeclShim(
1420 (void*)info
->handler_function(), argDescSize(info
), apArgDescCoercer
, env
, NUMBER_TYPE
, (uintptr_t)argc
, (void*)ap
);
1424 // calculate size needed for ap style argument block
1425 int32_t argDescApSize(uintptr_t argDesc
, MethodEnv
* env
)
1427 APArgDescIter
calleeTypeIter(-1, env
->method
);
1428 ImmArgDescIter
callerTypeIter(argDesc
, env
->core());
1429 return apArgDescSize(callerTypeIter
, calleeTypeIter
, env
->core());
1432 int32_t argDescApSize(char* argDesc
, MethodEnv
* env
)
1434 APArgDescIter
calleeTypeIter(-1, env
->method
);
1435 PtrArgDescIter
callerTypeIter(argDesc
, env
->core());
1436 return apArgDescSize(callerTypeIter
, calleeTypeIter
, env
->core());
1439 // convert arguments to ap style argument block, returning "argc"
1440 int32_t argDescArgsToAp(void* calleeArgDescBuf
, uintptr_t argDesc
, MethodEnv
* env
, va_list ap
)
1442 APArgDescIter
calleeTypeIter(-1, env
->method
);
1443 ImmArgDescIter
callerTypeIter(argDesc
, env
->core());
1444 APType dst
= (APType
)calleeArgDescBuf
;
1445 return argCoerceLoop(env
, callerTypeIter
, ap
, calleeTypeIter
, dst
) - 1;
1448 int32_t argDescArgsToAp(void* calleeArgDescBuf
, char* argDesc
, MethodEnv
* env
, va_list ap
)
1450 APArgDescIter
calleeTypeIter(-1, env
->method
);
1451 PtrArgDescIter
callerTypeIter(argDesc
, env
->core());
1452 APType dst
= (APType
)calleeArgDescBuf
;
1453 return argCoerceLoop(env
, callerTypeIter
, ap
, calleeTypeIter
, dst
) - 1;
1456 // count arguments... no size calculations
1457 template <class ARG_TYPE_ITER
> static int32_t argDescArgCount(ARG_TYPE_ITER iter
)
1461 while (iter
.nextTypeKind() != kVOID
)
1466 // return number of arguments in description
1467 int32_t argDescArgCount(uintptr_t argDesc
)
1469 ImmArgDescIter
iter(argDesc
, NULL
);
1470 return argDescArgCount(iter
);
1473 int32_t argDescArgCount(char* argDesc
)
1475 PtrArgDescIter
iter(argDesc
, NULL
);
1476 return argDescArgCount(iter
);
1479 // convert arguments to Atoms
1480 void argDescArgsToAtomv(Atom
* args
, uintptr_t argDesc
, MethodEnv
* env
, va_list ap
)
1482 AvmCore
* core
= env
->core();
1483 ImmArgDescIter
callerTypeIter(argDesc
, core
);
1484 AtomvArgDescIter
calleeTypeIter(core
);
1485 APType dst
= (APType
)args
;
1486 argCoerceLoop(env
, callerTypeIter
, ap
, calleeTypeIter
, dst
);
1489 void argDescArgsToAtomv(Atom
* args
, char* argDesc
, MethodEnv
* env
, va_list ap
)
1491 AvmCore
* core
= env
->core();
1492 PtrArgDescIter
callerTypeIter(argDesc
, core
);
1493 AtomvArgDescIter
calleeTypeIter(core
);
1494 APType dst
= (APType
)args
;
1495 argCoerceLoop(env
, callerTypeIter
, ap
, calleeTypeIter
, dst
);
1498 // convert arguments to AtomList
1499 void argDescArgsToAtomList(AtomList
& dst
, uintptr_t argDesc
, MethodEnv
* env
, va_list ap
)
1501 AvmCore
* core
= env
->core();
1502 ImmArgDescIter
callerTypeIter(argDesc
, core
);
1503 AtomvArgDescIter
calleeTypeIter(core
);
1504 argCoerceLoop(env
, callerTypeIter
, ap
, calleeTypeIter
, dst
);
1507 void argDescArgsToAtomList(AtomList
& dst
, char* argDesc
, MethodEnv
* env
, va_list ap
)
1509 AvmCore
* core
= env
->core();
1510 PtrArgDescIter
callerTypeIter(argDesc
, core
);
1511 AtomvArgDescIter
calleeTypeIter(core
);
1512 argCoerceLoop(env
, callerTypeIter
, ap
, calleeTypeIter
, dst
);
1516 uintptr_t aotThunker(MethodEnv
* env
, int32_t argc
, uint32_t* argv
)
1518 Traits
* rt
= env
->method
->getMethodSignature()->returnTraits();
1519 return coerce32CdeclArgDescEnter(rt
, env
, argc
, (uint32_t* )argv
);
1522 double aotThunkerN(MethodEnv
* env
, int32_t argc
, uint32_t* argv
)
1524 return coerceNCdeclArgDescEnter(env
, argc
, (uint32_t* )argv
);
1529 #endif // VMCFG_CDECL