Merge remote-tracking branch 'redux/master' into sh4-pool
[tamarin-stm.git] / core / CdeclThunk.cpp
blob19f3fec095aefa53a5f71ae5236df54c1a5d97f5
1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2 /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3 /* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
16 * The Original Code is [Open Source Virtual Machine.].
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2010
21 * the Initial Developer. All Rights Reserved.
23 * Contributor(s):
24 * Adobe AS3 Team
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
38 * ***** END LICENSE BLOCK ***** */
40 #include "avmplus.h"
41 #include "MethodInfo.h"
43 #ifdef VMCFG_CDECL
45 #include "CdeclThunk.h"
47 namespace avmplus
49 // stores arbitrary Avm values without allocation
50 class AvmValue
52 private:
53 enum { _tagBits = 3 };
54 double _d __attribute__((aligned(16)));
55 Atom _a;
57 double* dblPtr()
59 return &_d;
62 Atom dblAtom()
64 return kDoubleType | (Atom)dblPtr();
67 public:
68 int kind()
70 return atomKind(_a);
73 Atom get(Toplevel* toplevel, Traits* t)
75 AvmCore* core = toplevel->core();
77 BuiltinType bt = Traits::getBuiltinType(t);
79 if ((bt == BUILTIN_any || bt == BUILTIN_object) && _a == dblAtom())
80 _a = core->doubleToAtom(*dblPtr());
82 // should not be in here if we want a void return type.
83 AvmAssert(bt != BUILTIN_void);
85 switch(bt)
87 case BUILTIN_any:
88 return _a;
89 case BUILTIN_int:
90 return (Atom)AvmCore::integer(_a);
91 case BUILTIN_uint:
92 return (Atom)AvmCore::toUInt32(_a);
93 case BUILTIN_boolean:
94 return (Atom)AvmCore::boolean(_a);
95 case BUILTIN_string:
96 return (Atom)core->coerce_s(_a);
97 case BUILTIN_namespace:
98 if (atomKind(_a) == kNamespaceType)
99 return (Atom)core->atomToNamespace(_a);
100 AvmAssert(AvmCore::isNullOrUndefined(_a));
101 return nullNsAtom;
102 case BUILTIN_object:
103 return AvmCore::isNullOrUndefined(_a) ? nullObjectAtom : _a;
104 default:
105 AvmAssert(t != NUMBER_TYPE); // use getDouble
106 return (Atom)AvmCore::atomToScriptObject(toplevel->coerce(_a, t));
110 double getDouble()
112 return AvmCore::number(_a);
115 void set(Atom a, Traits* t)
117 if (!t) {
118 _a = a;
119 } else {
120 switch(Traits::getBuiltinType(t))
122 case BUILTIN_void:
123 AvmAssert(a == AtomConstants::undefinedAtom);
124 _a = AtomConstants::undefinedAtom;
125 break;
126 case BUILTIN_int:
127 setInt((intptr_t)a);
128 break;
129 case BUILTIN_uint:
130 setUint((uintptr_t)a);
131 break;
132 case BUILTIN_boolean:
133 _a = a ? trueAtom : falseAtom;
134 break;
135 case BUILTIN_string:
136 _a = a ? ((Stringp)a)->atom() : nullStringAtom;
137 break;
138 case BUILTIN_namespace:
139 _a = a ? ((Namespace*)a)->atom() : nullNsAtom;
140 break;
141 case BUILTIN_object:
142 if (atomKind(a) == kUnusedAtomTag)
143 _a = a ? ((ScriptObject*)a)->atom() : nullObjectAtom;
144 else
145 _a = a;
146 break;
147 default:
148 AvmAssert(Traits::getBuiltinType(t) != BUILTIN_number);
149 _a = a ? ((ScriptObject*)a)->atom() : nullObjectAtom;
154 void setInt(intptr_t i)
156 intptr_t iwt = i << _tagBits;
157 if ((iwt >> _tagBits) == i) {
158 _a = (iwt | kIntptrType);
159 } else {
160 *dblPtr() = i;
161 _a = dblAtom();
165 void setUint(uintptr_t u)
167 if (u & (~(static_cast<uintptr_t>(-1) >> (_tagBits + 1)))) {
168 *dblPtr() = u;
169 _a = dblAtom();
170 } else {
171 _a = (u << 3) | kIntptrType;
175 void setDouble(double d)
177 *dblPtr() = d;
178 _a = dblAtom();
182 enum // we try to stick to 4 bits here instead of the 5 in BUILTIN_xxx
184 kVOID = 0,
185 kOBJECT,
186 kCLASS,
187 kFUNCTION,
188 kARRAY,
189 kSTRING,
190 kNUMBER,
191 kINT,
192 kUINT,
193 kBOOLEAN,
194 kANY,
195 kNAMESPACE,
196 kVECTORINT,
197 kVECTORUINT,
198 kVECTORDOUBLE,
199 kVECTOROBJ
202 static Traits* argTraitsFromType(const AvmCore* core, int32_t n)
204 switch(n)
206 case kOBJECT: return OBJECT_TYPE;
207 case kCLASS: return CLASS_TYPE;
208 case kFUNCTION: return FUNCTION_TYPE;
209 case kARRAY: return ARRAY_TYPE;
210 case kSTRING: return STRING_TYPE;
211 case kNUMBER: return NUMBER_TYPE;
212 case kINT: return INT_TYPE;
213 case kUINT: return UINT_TYPE;
214 case kBOOLEAN: return BOOLEAN_TYPE;
215 case kVOID: return VOID_TYPE;
216 case kANY: return NULL;
217 case kNAMESPACE: return NAMESPACE_TYPE;
218 case kVECTORINT: return VECTORINT_TYPE;
219 case kVECTORUINT: return VECTORUINT_TYPE;
220 case kVECTORDOUBLE: return VECTORDOUBLE_TYPE;
221 case kVECTOROBJ: return VECTOROBJ_TYPE;
223 AvmAssert(false); // shouldn't happen...
224 return NULL;
227 // iterates over callee types for a method's signature
228 class MethodSigArgDescIter
230 protected:
231 int32_t m_n;
232 const MethodInfo* m_methInfo;
233 const MethodSignature* m_methSig;
235 public:
236 MethodSigArgDescIter(MethodInfo* methInfo) : m_n(0), m_methInfo(methInfo), m_methSig(methInfo->getMethodSignature())
240 Traits* nextType()
242 if (m_n <= m_methSig->param_count())
243 return m_methSig->paramTraits(m_n++);
244 AvmCore* core = m_methInfo->pool()->core;
245 return VOID_TYPE;
248 bool hasRest()
250 return m_methInfo->needRest() ? true : false;
253 bool needArguments()
255 return m_methInfo->needArguments() ? true : false;
258 bool needOptionalArgs()
260 return true;
263 bool isVarArg()
265 return false;
269 typedef struct _APType* APType; // type doesn't matter.. just don't clash w/ va_list
271 class APArgDescIter : public MethodSigArgDescIter
273 protected:
274 int32_t m_argc;
276 public:
277 APArgDescIter(int argc, MethodInfo* mi) : MethodSigArgDescIter(mi), m_argc(argc)
279 #if CDECL_VERBOSE
280 if (argc >= 0 && !m_methSig->argcOk(argc))
282 AvmCore* core = m_methInfo->pool()->core;
284 core->console << "argc bad: " << m_methInfo->format(core) << " : " << argc << "\n";
286 #endif
287 AvmAssert(argc < 0 || m_methSig->argcOk(argc));
290 Traits* nextType()
292 AvmCore* core = m_methInfo->pool()->core;
293 #if CDECL_VERBOSE
294 core->console << "APArgDescIter::nextType() m_n: " << m_n << "\n";
295 #endif
296 if (m_argc < 0 || m_n <= m_argc)
298 if (m_n <= m_methSig->param_count())
299 return MethodSigArgDescIter::nextType();
300 else
302 m_n++;
303 return NULL;
306 return VOID_TYPE;
309 bool hasRest()
311 return false;
314 bool needArguments()
316 return false;
319 bool needOptionalArgs()
321 return false;
324 bool isVarArg() // can just keep pushing atoms
326 return true;
330 class AtomvArgDescIter
332 protected:
333 AvmCore* m_core;
334 int32_t m_argc;
336 public: // TODO is this counting right?
337 AtomvArgDescIter(AvmCore* core, int argc = -2) : m_core(core), m_argc(argc) {}
339 Traits* nextType()
341 if (m_argc == -2)
342 return NULL; // all atoms all the time!
343 else if (m_argc >= 0)
345 m_argc--;
346 return NULL;
348 AvmCore* core = m_core;
349 return VOID_TYPE;
352 bool hasRest()
354 return false;
357 bool needArguments()
359 return false;
362 bool needOptionalArgs()
364 return false;
367 bool isVarArg()
369 return true;
373 // read 4 bit types out of a uintptr_t bitwise
374 // 32 bits holds 7 argument descriptions (plus ret type)
375 // 64 bits can hold 15 (plus ret type)
376 class ImmArgDescIter
378 protected:
379 uintptr_t m_argDesc;
380 AvmCore* m_core;
382 public:
383 ImmArgDescIter(uintptr_t argDesc, AvmCore* core) : m_argDesc(argDesc), m_core(core)
387 // iterates over the types in the argument descriptor
388 // VOID signals end of arguments; it is invalid to call nextType again after
389 // end of arguments!
390 Traits* nextType()
392 return argTraitsFromType(m_core, nextTypeKind());
395 unsigned nextTypeKind()
397 unsigned type = (unsigned)(m_argDesc >> (sizeof(m_argDesc) * 8 - 4));
398 m_argDesc <<= 4;
399 return type;
402 bool hasRest()
404 return false;
407 bool needArguments()
409 return false;
412 bool needOptionalArgs()
414 return true;
417 bool isVarArg()
419 return false;
423 // read types out of an unsigned char*
424 class PtrArgDescIter
426 protected:
427 // pointer to next byte of arg description
428 unsigned char* m_p;
429 // currently loaded bits of arg description
430 unsigned char m_bitBuf;
431 // number of valid bits in m_bitBuf
432 unsigned char m_bits;
433 AvmCore* m_core;
435 public:
436 PtrArgDescIter(void* argDesc, AvmCore* core) : m_p((unsigned char* )argDesc), m_bitBuf(0), m_bits(0), m_core(core)
440 // iterates over the types in the argument descriptor
441 // VOID signals end of arguments; it is invalid to call nextType again after
442 // end of arguments!
443 Traits* nextType()
445 return argTraitsFromType(m_core, nextTypeKind());
448 unsigned nextTypeKind()
450 if (m_bits == 0)
452 m_bitBuf = *m_p++;
453 m_bits = 8;
456 unsigned type = m_bitBuf >> 4;
458 m_bitBuf <<= 4;
459 m_bits -= 4;
460 return type;
463 bool hasRest()
465 return false;
468 bool needArguments()
470 return false;
473 bool needOptionalArgs()
475 return true;
478 bool isVarArg()
480 return false;
484 // lay out an argument description
485 // on x86-32, an argument description is exactly a stack layout
486 // on ARM, the first 4 words are r0-r3 and the rest is exactly a stack layout
487 // on x86-64, it will no doubt be more complicated...
488 class ArgDescLayout
490 protected:
491 void* m_dst;
492 #ifdef AVMPLUS_ARM
493 void* m_minDst;
494 #endif
496 public:
497 // NULL dst is legal for measuring a call layout
498 ArgDescLayout(void* dst) : m_dst(dst)
499 #ifdef AVMPLUS_ARM
500 , m_minDst((void*)((uintptr_t)dst + 16)) // must make room for a1-a4 always
501 #endif
503 #ifdef AVMPLUS_ARM
504 AvmAssert(!(7 & (uintptr_t)dst));
505 #else
506 AvmAssert(!(15 & (uintptr_t)dst));
507 #endif
510 void* argEnd()
512 #ifdef AVMPLUS_ARM
513 return (m_dst > m_minDst) ? m_dst : m_minDst;
514 #else
515 return m_dst;
516 #endif
519 // TODO really just platform int? that's what AtomMethodProc style uses
520 int32_t* int32Arg()
522 int32_t* result = (int32_t*)m_dst;
523 m_dst = (void*)(4 + (uintptr_t)m_dst);
524 return result;
527 double* doubleArg()
529 // TODO: doubles on ARM may need to be aligned, but because we have only
530 // two arg sizes (4 and 8 bytes) it won't affect us right now
531 double* result = (double* )m_dst;
532 m_dst = (void*)(8 + (uintptr_t)m_dst);
533 return result;
536 void** ptrArg()
538 void** result = (void**)m_dst;
539 m_dst = (void*)(4 + (uintptr_t)m_dst);
540 return result;
544 // calculate the size required for the given set of argument types for a cdecl call
545 // including MethodEnv
546 template <class ARG_TYPE_ITER> static int32_t argDescSize(ARG_TYPE_ITER calleeTypeIter, AvmCore*)
548 ArgDescLayout l(NULL);
550 for (;;)
552 Traits* t = calleeTypeIter.nextType();
554 BuiltinType bt = Traits::getBuiltinType(t);
556 if (bt == BUILTIN_void) break;
558 switch(Traits::getBuiltinType(t))
560 case BUILTIN_int:
561 case BUILTIN_uint:
562 case BUILTIN_boolean:
563 l.int32Arg();
564 break;
565 case BUILTIN_number:
566 l.doubleArg();
567 break;
568 default:
569 l.ptrArg();
570 break;
573 if (calleeTypeIter.needArguments() || calleeTypeIter.hasRest())
575 l.ptrArg(); // argv
576 l.int32Arg(); // argc
577 l.ptrArg(); // rest
579 l.ptrArg(); // MethodEnv
580 return (int32_t)(uintptr_t)l.argEnd();
583 // calculate the size required for the given set of argument types passed "ap" style
584 // just figures out the size of the ap struct... doesn't include MethodEnv
585 template <class ARG_TYPE_ITER1, class ARG_TYPE_ITER2> static int32_t apArgDescSize(ARG_TYPE_ITER1 callerTypeIter, ARG_TYPE_ITER2 calleeTypeIter, AvmCore* core)
587 int32_t size = 0;
589 // count "regular" arguments... stored as unaligned native
590 for (;;)
592 Traits* t = calleeTypeIter.nextType();
594 BuiltinType bt = Traits::getBuiltinType(t);
596 // no more "regular" arguments? break out
597 if (bt == BUILTIN_void) break;
599 // no more caller arguments? done
600 if (callerTypeIter.nextType() == VOID_TYPE)
601 return size;
603 switch(Traits::getBuiltinType(t))
605 case BUILTIN_int:
606 case BUILTIN_uint:
607 case BUILTIN_boolean:
608 size += sizeof(int32_t);
609 break;
610 case BUILTIN_number:
611 size += sizeof(double);
612 break;
613 default:
614 size += sizeof(void*);
615 break;
618 // count the rest as pointers (Atoms)
619 while (callerTypeIter.nextType() != VOID_TYPE)
620 size += sizeof(void*);
621 return size;
624 #if defined(AVMPLUS_IA32)
625 // pseudo-cross-compiler macros for inline assembly...
626 // gcc/x86 in particular isn't really production grade
627 #ifdef _WIN32
628 // msvc stuff works well inline...
629 #define ASM_FUNC_BEGIN(R, N, A) static R __declspec(naked) N A {
630 #define ASM_FUNC_END(N) }
631 #define ASM1(X) __asm { X }
632 #define ASM2(X,Y) __asm { X,Y }
633 #define ASM_CALL(X) __asm { call X }
634 #define ASM_REDIR(F) __asm { jmp F }
635 #else
636 // gcc doesn't support naked inline functions despite the clear use for them
637 // this stuff is hackery to help development -- gcc targets should have .s files for production
638 #define ASM_FUNC_BEGIN(R, N, A) typedef R (* N##_type)A; static void* N##_container () { \
639 void* result; \
640 __asm__ (" mov $L"#N"_astart, %[result]" : [result] "=r" (result)); \
641 goto ret; asmlbl: \
642 __asm__ (" .intel_syntax noprefix "); \
643 __asm__ ("L"#N"_astart: ");
644 #define ASM_FUNC_END(N) __asm__ (" .att_syntax noprefix "); ret: if (result == 0) goto asmlbl; return result; } N##_type N = (N##_type)N##_container();
645 #define ASM1(X) __asm__ ( #X );
646 #define ASM2(X,Y) __asm__ ( #X","#Y );
647 #define ASM_CALL(X) __asm__ ("call _"#X"\n");
648 #define ASM_REDIR(F) __asm__ ( "push [_"#F"]\n ret");
649 #endif
650 #elif defined(AVMPLUS_ARM)
651 // gcc doesn't support naked inline functions despite the clear use for them
652 // this stuff is hackery to help development -- gcc targets should have .s files for production
653 #define ASM_BP __asm__("stmdb sp!, {a1, a2, a3, a4}\n swi 0x80\n ldmia sp!, {a1, a2, a3, a4}")
654 #define ASM_FUNC_BEGIN(R, N, A) \
655 extern R N A; \
656 __asm__(".section __TEXT,__text,regular,pure_instructions"); \
657 __asm__(".align 2"); \
658 __asm__(".globl _"#N" "); \
659 __asm__("_"#N": ");
660 #define ASM_FUNC_END(N)
661 #define ASM_REDIR(F) __asm__ ( "b _"#F" ");
663 #endif
665 // prototype for an argument coercer
666 // env is MethodEnv coercing for
667 // callerArgDesc is an opaque description of the arguments to be found via callerArgDesc
668 // callerAp is the variadic args passed to coerce32CdeclImmArgDesc, et al
669 // calleeArgDescBuf is the argument buffer into which coerced arguments are writter
670 // returns a pointer to a function used for return value conversion or NULL is none is needed
671 typedef void* (*ArgCoercer)(void* callee, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp, void* calleeArgDescBuf);
673 extern "C" {
675 // core of the thunking mechanism...
676 // doesn't necessarily return an int32_t... the N/double variant is identical
677 // -- callee is the cdecl method to invoke
678 // -- calleeArgDescBufSize is the amount of space to allocate for the coerced args (and possibly register alloc info for, say, x86/64)
679 // -- argCoercer is a callback that does the actual filling of the coerced argument buf
680 // -- env is the MethodEnv we're calling (used to get argument traits -- does not make callee redundant as env may have >1 impl)
681 // -- calleeArgDesc is an opaque description of the types of variadic arguments in callerAp
682 // -- callerAp represents the arguments to coerce
683 ASM_FUNC_BEGIN(Atom, coerce32CdeclShim,
684 (void* callee, unsigned calleeArgDescBufSize, ArgCoercer argCoercer, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp))
685 #ifdef AVMPLUS_ARM
686 __asm__("stmdb sp!, {v1, v2, v3, v7, lr}");
687 __asm__("mov v7, sp");
688 // a1 = callee
689 // a2 = calleeArgDescBufSize
690 // a3 = argCoercer
691 // a4 = env
692 // [v7] = retTraits
693 // [v7, #4] = callerArgDesc
694 // [v7, #8] = callerAp
695 __asm__("sub sp, sp, a2"); // make room for args
696 __asm__("and sp, sp, #-8"); // double word align
697 __asm__("mov v2, a3"); // save off argCoercer
698 __asm__("mov v3, a4"); // save off env
700 // a1 stays callee
701 __asm__("mov a2, a4"); // pass env
702 __asm__("ldr a3, [v7, #20]"); // pass retTraits
703 __asm__("ldr a4, [v7, #24]"); // pass callerArgDesc
704 __asm__("mov v1, sp"); // pass calleeArgDescBuf
705 __asm__("stmdb sp!, {v1}");
706 __asm__("ldr v1, [v7, #28]"); // pass callerAp
707 __asm__("stmdb sp!, {v1}");
708 __asm__("mov v1, a1"); // save off callee
709 __asm__("bl _via_v2"); // call coercer!
710 __asm__("add sp, sp, #8"); // restore stack
711 __asm__("mov v2, a1"); // remember returnCoercer
712 __asm__("ldmia sp!, {a1, a2, a3, a4}"); // move first 4 arg words into registers
713 __asm__("bl _via_v1"); // call the implementation!
714 __asm__("mov sp, v7"); // restore stack
715 __asm__("cmp v2, #0"); // maybe call returnCoercer -- a1 and a2 will be the double or a1 will be the 32 and a2 will be a dummy
716 __asm__("ldrne a3, [v7, #20]"); // retTraits
717 __asm__("movne a4, v3"); // env
718 __asm__("blne _via_v2");
719 __asm__("ldmia sp!, {v1, v2, v3, v7, pc}"); // done!
720 __asm__("_via_v1: bx v1");
721 __asm__("_via_v2: bx v2");
722 #else
723 #ifdef _WIN32
724 (void)callee;
725 (void)calleeArgDescBufSize;
726 (void)argCoercer;
727 (void)env;
728 (void)retTraits;
729 (void)callerArgDesc;
730 (void)callerAp;
731 #endif
732 ASM1( push ebp)
733 ASM2( mov ebp, esp)
734 ASM1( push esi)
735 // [ebp] = saved ebp
736 // [ebp+4] = return address
737 // [ebp+8] = callee
738 // [ebp+12] = calleeArgDescBufSize
739 // [ebp+16] = argCoercer
740 // [ebp+20] = env
741 // [ebp+24] = retTraits
742 // [ebp+28] = callerArgDesc
743 // [ebp+32] = callerAp
744 ASM2( sub esp, [ebp+12] ) // make room for args
745 ASM2( and esp, 0xfffffff0 ) // 16 byte aligned
746 ASM2( mov eax, esp)
747 ASM2( sub esp, 8 ) // 16 byte aligned for call
748 ASM1( push eax ) // calleeArgDescBuf
749 ASM1( push [ebp+32] ) // callerAp
750 ASM1( push [ebp+28] ) // callerArgDesc
751 ASM1( push [ebp+24] ) // retTraits
752 ASM1( push [ebp+20] ) // env
753 ASM1( push [ebp+8] ) // callee
754 ASM1( call [ebp+16] ) // map args: argCoercer(callee, env, retTraits, callerArgDesc, callerAp, callerArgDescBuf);
755 ASM2( add esp, 32)
756 ASM2( mov esi, eax ) // save result mapping func
757 ASM1( call [ebp+8] ) // call method
758 ASM2( lea esp, [ebp-4]) // restore stack
759 ASM2( cmp esi, 0)
760 ASM1( je coerce32CdeclShim_done)
761 ASM2( sub esp, 8 ) // 16 byte aligned for call
762 ASM1( push [ebp+20] ) // env
763 ASM1( push [ebp+24] ) // retTraits
764 ASM1( call esi ) // map return value: retCoercer(retTraits, env)
765 ASM2( add esp, 16)
766 ASM1(coerce32CdeclShim_done:)
767 ASM1( pop esi)
768 ASM1( pop ebp)
769 ASM1( ret)
770 #endif
771 ASM_FUNC_END(coerce32CdeclShim)
773 ASM_FUNC_BEGIN(double, coerceNCdeclShim, (void* callee,
774 unsigned calleeArgDescBufSize, ArgCoercer argCoercer, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp))
775 #ifdef _WIN32
776 (void)callee;
777 (void)calleeArgDescBufSize;
778 (void)argCoercer;
779 (void)env;
780 (void)retTraits;
781 (void)callerArgDesc;
782 (void)callerAp;
783 #endif
784 ASM_REDIR(coerce32CdeclShim) // exact same impl
785 ASM_FUNC_END(coerceNCdeclShim)
787 // Number => something
788 Atom returnCoercerNImpl(double n, Traits* retTraits, MethodEnv* env)
790 AvmValue v;
792 v.setDouble(n);
793 return v.get(env->toplevel(), retTraits);
796 // Number => some 32
797 ASM_FUNC_BEGIN(Atom, returnCoercerN, (Traits* retTraits, MethodEnv* env))
798 #ifdef AVMPLUS_ARM
799 __asm__("b _returnCoercerNImpl"); // straight through
800 #else
801 ASM1( push ebp) // this is necessary to keep pthreads happy!
802 ASM2( mov ebp, esp)
803 ASM2( sub esp, 12)
804 ASM1( push [ebp+12]) // env
805 ASM1( push [ebp+8]) // retTraits
806 ASM2( sub esp, 8)
807 ASM1( fstp qword ptr [esp]) // callee will have left a value on the FP stack
808 ASM_CALL(returnCoercerNImpl)
809 ASM2( add esp, 28)
810 ASM1( pop ebp)
811 ASM1( ret)
812 #endif
813 ASM_FUNC_END(returnCoercerN)
815 #ifndef AVMPLUS_ARM
816 ASM_FUNC_BEGIN(Atom, returnCoercerNPop, (Traits* retTraits, MethodEnv* env))
817 ASM1( push ebp) // this is necessary to keep pthreads happy!
818 ASM2( mov ebp, esp)
819 ASM1( fstp st(0)) // callee will have left a value on the FP stack
820 ASM1( pop ebp)
821 ASM1( ret)
822 ASM_FUNC_END(returnCoercerNPop)
823 #endif
825 // something => Number
826 double returnCoercerN32Impl(Atom a, Traits* /*retTraits*/, MethodEnv* env)
828 Traits* calleeRT = env->method->getMethodSignature()->returnTraits();
829 AvmValue v;
831 v.set(a, calleeRT);
833 return v.getDouble();
836 // some 32 => Number
837 ASM_FUNC_BEGIN(double, returnCoercerN32, (Traits* retTraits, MethodEnv* env))
838 #ifdef AVMPLUS_ARM
839 __asm__("mov a2, a3"); // a2 is a dummy
840 __asm__("mov a3, a4");
841 __asm__("b _returnCoercerN32Impl");
842 #else
843 ASM1( push ebp) // this is necessary to keep pthreads happy!
844 ASM2( mov ebp, esp)
845 ASM1( push [ebp+12]) // env
846 ASM1( push [ebp+8]) // retTraits
847 ASM1( push eax)
848 ASM_CALL(returnCoercerN32Impl) // will push something on the FP stack
849 ASM2( add esp, 12)
850 ASM1( pop ebp)
851 ASM1( ret)
852 #endif
853 ASM_FUNC_END(returnCoercerN32)
855 // something => something
856 Atom returnCoercer32Impl(Atom a, Traits* retTraits, MethodEnv* env)
858 Traits* calleeRT = env->method->getMethodSignature()->returnTraits();
859 AvmValue v;
861 v.set(a, calleeRT);
862 return v.get(env->toplevel(), retTraits);
865 // some 32 => some 32
866 ASM_FUNC_BEGIN(Atom, returnCoercer32, (Traits* retTraits, MethodEnv* env))
867 #ifdef AVMPLUS_ARM
868 __asm__("mov a2, a3"); // a2 is a dummy
869 __asm__("mov a3, a4");
870 __asm__("b _returnCoercer32Impl");
871 #else
872 ASM1( push ebp) // this is necessary to keep pthreads happy!
873 ASM2( mov ebp, esp)
874 ASM1( push [ebp+12]) // env
875 ASM1( push [ebp+8]) // retTraits
876 ASM1( push eax)
877 ASM_CALL(returnCoercer32Impl)
878 ASM2( add esp, 12)
879 ASM1( pop ebp)
880 ASM1( ret)
881 #endif
882 ASM_FUNC_END(returnCoercer32)
886 // returns any function required to coerce the callee's return type to the
887 // caller's desired return type
888 static void* returnCoercer(AvmCore* core, Traits* calleeRT, Traits* callerRT)
890 // same or caller will discard? no conversion
891 if (callerRT == calleeRT)
892 return NULL;
893 if (callerRT == VOID_TYPE)
895 #ifndef AVMPLUS_ARM
896 if (calleeRT == NUMBER_TYPE)
897 return (void*)returnCoercerNPop;
898 else
899 #endif
900 return NULL;
902 // both integral types? no conversion
903 if ((callerRT == INT_TYPE || callerRT == UINT_TYPE) &&
904 (calleeRT == INT_TYPE || calleeRT == UINT_TYPE))
905 return NULL;
906 // is callee a double returner?
907 if (calleeRT == NUMBER_TYPE)
908 return (void*)returnCoercerN; // Number => 32
909 // how about caller?
910 if (callerRT == NUMBER_TYPE)
911 return (void*)returnCoercerN32; // 32 => Number
912 // everything else
913 return (void*)returnCoercer32; // 32 => 32
916 static int32_t arg32(va_list& ap)
918 return va_arg(ap, int32_t); // x86-64?
921 static int32_t arg32(APType& ap)
923 int32_t result = *(int32_t *)ap;
924 ap = (APType)((uintptr_t)ap + sizeof(int32_t)); // x86-64?
925 return result;
928 static double argN(va_list& ap)
930 return va_arg(ap, double);
933 static double argN(APType& ap)
935 double result = *(double*)ap;
936 ap = (APType)((uintptr_t)ap + sizeof(double));
937 return result;
940 template <class ARG_ITER> static Atom coerceArgToAny(Toplevel* toplevel, ARG_ITER& ap, Traits* callerT)
942 AvmCore* core = toplevel->core();
943 AvmValue v;
945 if (callerT == NUMBER_TYPE)
946 v.setDouble(argN(ap));
947 else
948 v.set((Atom)arg32(ap), callerT);
949 return v.get(toplevel, NULL);
952 // coerces a single argument and writes it to an argument desc layout
953 template <class ARG_ITER> static void coerceArg(Toplevel* toplevel, ArgDescLayout& l, Traits* calleeT, ARG_ITER& callerAp, Traits* callerT)
955 AvmCore* core = toplevel->core();
957 if (calleeT == callerT && calleeT != OBJECT_TYPE) // OBJECT_TYPE might be a naked ScriptObject... let AvmValue handle it
959 if (calleeT == NUMBER_TYPE)
960 *l.doubleArg() = argN(callerAp);
961 else
962 *l.int32Arg() = arg32(callerAp);
964 else if (calleeT == NUMBER_TYPE)
966 AvmValue v;
967 v.set((Atom)arg32(callerAp), callerT);
968 *l.doubleArg() = v.getDouble();
970 else if (callerT == NUMBER_TYPE)
972 AvmValue v;
973 v.setDouble(argN(callerAp));
974 *l.int32Arg() = v.get(toplevel, calleeT);
976 else
978 AvmValue v;
979 v.set((Atom)arg32(callerAp), callerT);
980 *l.int32Arg() = v.get(toplevel, calleeT);
984 static void coerceArgAtom(Toplevel* toplevel, ArgDescLayout& l, Traits* calleeT, Atom a)
986 APType ap = (APType)&a;
987 coerceArg(toplevel, l, calleeT, ap, NULL);
990 // coerces a single argument and writes it into an AtomList
991 static void coerceArg(Toplevel* toplevel, AtomList &atoms, Traits* calleeT, va_list& callerAp, Traits* callerT)
993 AvmCore* core = toplevel->core();
995 if (callerT == NUMBER_TYPE)
997 AvmValue v;
998 v.setDouble(argN(callerAp));
999 atoms.add((Atom)v.get(toplevel, calleeT));
1001 else
1003 AvmValue v;
1004 v.set((Atom)arg32(callerAp), callerT);
1005 atoms.add((Atom)v.get(toplevel, calleeT));
1009 static void coerceArgAtom(Toplevel*, AtomList &atoms, Traits* /*calleeT*/, Atom a)
1011 atoms.add(a);
1014 // coerces a single argument and writes it to an "ap" style arg list
1015 static void coerceArg(Toplevel* toplevel, APType& ap, Traits* calleeT, va_list& callerAp, Traits* callerT)
1017 AvmCore* core = toplevel->core();
1019 if (calleeT == callerT && calleeT != OBJECT_TYPE) // OBJECT_TYPE might be a naked ScriptObject... let AvmValue handle it
1021 if (calleeT == NUMBER_TYPE)
1023 *(double* )ap = argN(callerAp);
1024 ap = (APType)(sizeof(double) + (uintptr_t)ap);
1026 else
1028 *(int32_t*)ap = arg32(callerAp);
1029 ap = (APType)(sizeof(int32_t) + (uintptr_t)ap);
1032 else if (calleeT == NUMBER_TYPE)
1034 AvmValue v;
1035 v.set((Atom)arg32(callerAp), callerT);
1036 *(double* )ap = v.getDouble();
1037 ap = (APType)(sizeof(double) + (uintptr_t)ap);
1039 else if (callerT == NUMBER_TYPE)
1041 AvmValue v;
1042 v.setDouble(argN(callerAp));
1043 *(int32_t*)ap = v.get(toplevel, calleeT);
1044 ap = (APType)(sizeof(int32_t) + (uintptr_t)ap);
1046 else
1048 AvmValue v;
1049 v.set((Atom)arg32(callerAp), callerT);
1050 *(int32_t*)ap = v.get(toplevel, calleeT);
1051 ap = (APType)(sizeof(int32_t) + (uintptr_t)ap);
1055 static void coerceArgAtomI(Toplevel* toplevel, APType& ap, Traits* calleeT, ...)
1057 va_list va;
1059 va_start(va, calleeT);
1060 coerceArg(toplevel, ap, calleeT, va, NULL);
1061 va_end(va);
1064 static void coerceArgAtom(Toplevel* toplevel, APType& ap, Traits* calleeT, Atom a)
1066 coerceArgAtomI(toplevel, ap, calleeT, a);
1069 static void handleRest(Toplevel*, ArgDescLayout& l, ArrayObject *rest)
1071 uint32_t argc = rest->getDenseLength();
1072 Atom *argv = rest->getDenseCopy();
1074 *l.ptrArg() = argv; // TODO argv
1075 *l.int32Arg() = argc; // TODO argc
1076 *l.ptrArg() = rest; // rest
1079 static void handleRest(Toplevel*, APType&, ArrayObject*)
1081 AvmAssert(false); // AP doesn't handle rest in the CC
1084 static void handleRest(Toplevel*, AtomList&, ArrayObject*)
1086 AvmAssert(false);
1089 // coerces a set of arguments and writes to a given argument description
1090 template <class ARG_TYPE_ITER1, class ARG_ITER, class ARG_TYPE_ITER2, class ARG_WRITER>
1091 static int32_t argCoerceLoop(MethodEnv* env, ARG_TYPE_ITER1 callerTypeIter, ARG_ITER callerAp,
1092 ARG_TYPE_ITER2 calleeTypeIter, ARG_WRITER &argDescWriter)
1094 Toplevel* toplevel = env->toplevel();
1095 MethodInfo* info = env->method;
1096 AvmCore* core = env->core();
1097 int32_t argc = 0;
1099 // map args
1100 ArrayObject* argsOrRest = NULL;
1101 ArrayObject* args = NULL; // "arguments"
1102 const MethodSignature* ms = env->method->getMethodSignature();
1104 // "arguments" captures all arguments
1105 if (calleeTypeIter.needArguments())
1107 ARG_TYPE_ITER1 callerTypeIterTemp = callerTypeIter;
1108 ARG_ITER callerApTemp = callerAp;
1109 Traits* callerT = callerTypeIterTemp.nextType();
1111 AvmAssert(callerT != VOID_TYPE);
1112 Atom a = coerceArgToAny(toplevel, callerApTemp, callerT);
1113 args = env->createArguments(&a, 0);
1116 for (;;)
1118 Traits* callerT = callerTypeIter.nextType();
1120 #if CDECL_VERBOSE
1121 if (callerT)
1122 core->console << " callerT: " << callerT->formatClassName() << "\n";
1123 else
1124 core->console << " callerT: *\n";
1125 #endif
1126 // no more params from caller? break out
1127 if (callerT == VOID_TYPE)
1128 break;
1130 Traits* calleeT = calleeTypeIter.nextType();
1132 #if CDECL_VERBOSE
1133 if (calleeT)
1134 core->console << " calleeT: " << calleeT->formatClassName() << "\n";
1135 else
1136 core->console << " calleeT: *\n";
1137 #endif
1139 // no more normal params for callee
1140 if (calleeT == VOID_TYPE)
1142 if (!ms->allowExtraArgs())
1144 toplevel->argumentErrorClass()->throwError(kWrongArgumentCountError,
1145 core->toErrorString(info),
1146 core->toErrorString(ms->requiredParamCount()),
1147 core->toErrorString(argc));
1149 else // fill up rest/argument/var args
1151 // can we just keep pushing args? (i.e., "ap" style)
1152 if (calleeTypeIter.isVarArg())
1154 #if CDECL_VERBOSE
1155 core->console << " argCoerceLoop: passing extra params as vararg\n";
1156 #endif
1157 AvmAssert(!argsOrRest); // shouldn't have rest or arguments if vararg
1160 // just keep writing "atom"s
1161 coerceArg(toplevel, argDescWriter, NULL, callerAp, callerT);
1162 callerT = callerTypeIter.nextType();
1163 argc++;
1164 } while (callerT != VOID_TYPE);
1166 else
1168 // are we actually using the args?
1169 if (calleeTypeIter.needArguments() || calleeTypeIter.hasRest())
1171 #if CDECL_VERBOSE
1172 core->console << " argCoerceLoop: passing extra params as Array\n";
1173 #endif
1174 if (!argsOrRest)
1175 argsOrRest = args ? args : toplevel->arrayClass()->newArray(1);
1178 Atom a = coerceArgToAny(toplevel, callerAp, callerT);
1179 argsOrRest->push(&a, 1);
1180 callerT = callerTypeIter.nextType();
1181 } while (callerT != VOID_TYPE);
1183 #if CDECL_VERBOSE
1184 else
1185 core->console << " argCoerceLoop: discarding extra params\n";
1186 #endif
1188 break;
1192 // copy arg into "arguments"
1193 if (argsOrRest)
1195 ARG_ITER ap = callerAp;
1196 Atom a = coerceArgToAny(toplevel, ap, callerT);
1197 argsOrRest->push(&a, 1);
1199 else if (args) // arguments doesn't take "this" so
1200 argsOrRest = args;
1201 coerceArg(toplevel, argDescWriter, calleeT, callerAp, callerT);
1202 argc++;
1205 if (calleeTypeIter.needOptionalArgs())
1207 // deal with "optional" args
1208 int32_t regArgs = ms->param_count() + 1;
1209 int optNum = argc - regArgs + ms->optional_count();
1211 if (optNum < 0) // not enough non-optional arguments passed...
1212 toplevel->argumentErrorClass()->throwError(kWrongArgumentCountError,
1213 core->toErrorString(info),
1214 core->toErrorString(ms->requiredParamCount()),
1215 core->toErrorString(argc));
1217 while (argc < regArgs) // optional...
1219 Traits* calleeT = calleeTypeIter.nextType();
1220 coerceArgAtom(toplevel, argDescWriter, calleeT, ms->getDefaultValue(optNum++));
1221 argc++;
1224 // pass ArrayObject through
1225 if (calleeTypeIter.needArguments() || calleeTypeIter.hasRest())
1227 if (!argsOrRest)
1228 argsOrRest = args ? args : toplevel->arrayClass()->newArray(0);
1229 handleRest(toplevel, argDescWriter, argsOrRest);
1231 return argc;
1234 static void passBaseArgs(MethodEnv* env, va_list& callerAp, ArgDescLayout& l)
1236 (void)callerAp;
1237 (void)l;
1238 (void)env;
1241 static void passBaseArgs(MethodEnv* env, APType& callerAp, ArgDescLayout& l)
1243 (void)callerAp;
1244 (void)l;
1245 (void)env;
1248 static void passTailArgs(MethodEnv* env, va_list& callerAp, ArgDescLayout& l)
1250 (void)callerAp;
1251 (void)l;
1253 // pass MethodEnv at the end
1254 *l.ptrArg() = env;
1257 static void passTailArgs(MethodEnv* env, APType& callerAp, ArgDescLayout& l)
1259 (void)callerAp;
1260 (void)l;
1262 // pass MethodEnv at the end
1263 *l.ptrArg() = env;
1266 // coerces a set of variadic arguments to a cdecl arg description
1267 template <class ARG_TYPE_ITER, class AP_TYPE> static void* argCoercer(void* /*callee*/, MethodEnv* env, Traits* callerRT, ARG_TYPE_ITER callerTypeIter, AP_TYPE callerAp, void* calleeArgDescBuf)
1269 ArgDescLayout l(calleeArgDescBuf);
1271 passBaseArgs(env, callerAp, l);
1273 MethodInfo* info = env->method;
1274 MethodSigArgDescIter calleeTypeIter(info);
1275 Traits* calleeRT = info->getMethodSignature()->returnTraits();
1276 AvmCore* core = info->pool()->core;
1277 argCoerceLoop(env, callerTypeIter, callerAp, calleeTypeIter, l);
1278 #if CDECL_VERBOSE
1279 core->console << "argCoercer: " << info->format(core) << " ";
1280 if (callerRT)
1281 core->console << callerRT->format(core);
1282 else
1283 core->console << "*";
1284 core->console << " -> ";
1285 if (calleeRT)
1286 core->console << calleeRT->format(core);
1287 else
1288 core->console << "*";
1289 core->console << "\n";
1290 #endif
1292 passTailArgs(env, callerAp, l);
1293 // return any return type Coercer
1294 return returnCoercer(core, calleeRT, callerRT);
1297 // callerArgDesc is the number of atoms
1298 static void* atomvArgDescCoercer(void* callee, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp, void* calleeArgDescBuf)
1300 AtomvArgDescIter callerTypeIter(env->core(), (int32_t)callerArgDesc);
1302 AvmAssert(false); // TESTME -- AtomvArgDescIter off by one error or not
1303 return argCoercer(callee, env, retTraits, callerTypeIter, (APType)callerAp, calleeArgDescBuf);
1306 // callerArgDesc is the number of args
1307 static void* apArgDescCoercer(void* callee, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp, void* calleeArgDescBuf)
1309 APArgDescIter callerTypeIter((int32_t)callerArgDesc, env->method);
1311 return argCoercer(callee, env, retTraits, callerTypeIter, (APType)callerAp, calleeArgDescBuf);
1314 // callerArgDesc is a pointer to a string of nybbles describing arg types
1315 static void* ptrArgDescCoercer(void* callee, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp, void* calleeArgDescBuf)
1317 PtrArgDescIter callerTypeIter((void*)callerArgDesc, env->core());
1319 return argCoercer(callee, env, retTraits, callerTypeIter, *(va_list*)callerAp, calleeArgDescBuf);
1322 // callerArgDesc is a value containing nybbles describing arg types
1323 static void* immArgDescCoercer(void* callee, MethodEnv* env, Traits* retTraits, uintptr_t callerArgDesc, void* callerAp, void* calleeArgDescBuf)
1325 ImmArgDescIter callerTypeIter(callerArgDesc, env->core());
1327 return argCoercer(callee, env, retTraits, callerTypeIter, *(va_list*)callerAp, calleeArgDescBuf);
1330 // amount of stack space needed to call the given method cdecl style
1331 static int32_t argDescSize(MethodInfo* info)
1333 AvmCore* core = info->pool()->core;
1334 MethodSigArgDescIter calleeTypeIter(info);
1336 return argDescSize(calleeTypeIter, core);
1339 // calls "env" with supplied variadic arguments described by the "immediate" flavor of argument
1340 // description in argDesc
1341 // returns an int32_t value
1342 Atom coerce32CdeclArgDescEnter(Traits* retTraits, uintptr_t argDesc, MethodEnv* env, va_list ap)
1344 MethodInfo* info = env->method;
1345 Atom result = coerce32CdeclShim(
1346 (void*)info->handler_function(), argDescSize(info),
1347 immArgDescCoercer, env, retTraits, argDesc, &ap);
1348 return result;
1351 // calls "env" with supplied variadic arguments described by the "pointer" flavor of argument
1352 // description in argDesc
1353 // returns an int32_t value
1354 Atom coerce32CdeclArgDescEnter(Traits* retTraits, char* argDesc, MethodEnv* env, va_list ap)
1356 MethodInfo* info = env->method;
1357 Atom result = coerce32CdeclShim(
1358 (void*)info->handler_function(), argDescSize(info), ptrArgDescCoercer, env, retTraits, (uintptr_t)argDesc, &ap);
1359 return result;
1362 Atom coerce32CdeclArgDescEnter(Traits* retTraits, MethodEnv* env, int argc, Atom* argv)
1364 MethodInfo* info = env->method;
1365 Atom result = coerce32CdeclShim(
1366 (void*)info->handler_function(), argDescSize(info), atomvArgDescCoercer, env, retTraits, (uintptr_t)argc, (void*)argv);
1367 return result;
1370 Atom coerce32CdeclArgDescEnter(Traits* retTraits, MethodEnv* env, int argc, uint32_t* ap)
1372 MethodInfo* info = env->method;
1373 Atom result = coerce32CdeclShim(
1374 (void*)info->handler_function(), argDescSize(info), apArgDescCoercer, env, retTraits, (uintptr_t)argc, (void*)ap);
1375 return result;
1378 // calls "env" with supplied variadic arguments described by the "immediate" flavor of argument
1379 // description in argDesc
1380 // returns a double value
1381 double coerceNCdeclArgDescEnter(uintptr_t argDesc, MethodEnv* env, va_list ap)
1383 MethodInfo* info = env->method;
1384 AvmCore* core = env->core();
1386 double result = coerceNCdeclShim(
1387 (void*)info->handler_function(), argDescSize(info),
1388 immArgDescCoercer, env, NUMBER_TYPE, argDesc, &ap);
1389 return result;
1392 // calls "env" with supplied variadic arguments described by the "pointer" flavor of argument
1393 // description in argDesc
1394 // returns a double value
1395 double coerceNCdeclArgDescEnter(char* argDesc, MethodEnv* env, va_list ap)
1397 MethodInfo* info = env->method;
1398 AvmCore* core = env->core();
1400 double result = coerceNCdeclShim(
1401 (void*)info->handler_function(), argDescSize(info), ptrArgDescCoercer, env, NUMBER_TYPE, (uintptr_t)argDesc, &ap);
1402 return result;
1405 double coerceNCdeclArgDescEnter(MethodEnv* env, int argc, Atom* argv)
1407 MethodInfo* info = env->method;
1408 AvmCore* core = env->core();
1410 double result = coerceNCdeclShim(
1411 (void*)info->handler_function(), argDescSize(info), atomvArgDescCoercer, env, NUMBER_TYPE, (uintptr_t)argc, (void*)argv);
1412 return result;
1415 double coerceNCdeclArgDescEnter(MethodEnv* env, int argc, uint32_t* ap)
1417 MethodInfo* info = env->method;
1418 AvmCore* core = env->core();
1419 double result = coerceNCdeclShim(
1420 (void*)info->handler_function(), argDescSize(info), apArgDescCoercer, env, NUMBER_TYPE, (uintptr_t)argc , (void*)ap);
1421 return result;
1424 // calculate size needed for ap style argument block
1425 int32_t argDescApSize(uintptr_t argDesc, MethodEnv* env)
1427 APArgDescIter calleeTypeIter(-1, env->method);
1428 ImmArgDescIter callerTypeIter(argDesc, env->core());
1429 return apArgDescSize(callerTypeIter, calleeTypeIter, env->core());
1432 int32_t argDescApSize(char* argDesc, MethodEnv* env)
1434 APArgDescIter calleeTypeIter(-1, env->method);
1435 PtrArgDescIter callerTypeIter(argDesc, env->core());
1436 return apArgDescSize(callerTypeIter, calleeTypeIter, env->core());
1439 // convert arguments to ap style argument block, returning "argc"
1440 int32_t argDescArgsToAp(void* calleeArgDescBuf, uintptr_t argDesc, MethodEnv* env, va_list ap)
1442 APArgDescIter calleeTypeIter(-1, env->method);
1443 ImmArgDescIter callerTypeIter(argDesc, env->core());
1444 APType dst = (APType)calleeArgDescBuf;
1445 return argCoerceLoop(env, callerTypeIter, ap, calleeTypeIter, dst) - 1;
1448 int32_t argDescArgsToAp(void* calleeArgDescBuf, char* argDesc, MethodEnv* env, va_list ap)
1450 APArgDescIter calleeTypeIter(-1, env->method);
1451 PtrArgDescIter callerTypeIter(argDesc, env->core());
1452 APType dst = (APType)calleeArgDescBuf;
1453 return argCoerceLoop(env, callerTypeIter, ap, calleeTypeIter, dst) - 1;
1456 // count arguments... no size calculations
1457 template <class ARG_TYPE_ITER> static int32_t argDescArgCount(ARG_TYPE_ITER iter)
1459 int32_t result = 0;
1461 while (iter.nextTypeKind() != kVOID)
1462 result++;
1463 return result;
1466 // return number of arguments in description
1467 int32_t argDescArgCount(uintptr_t argDesc)
1469 ImmArgDescIter iter(argDesc, NULL);
1470 return argDescArgCount(iter);
1473 int32_t argDescArgCount(char* argDesc)
1475 PtrArgDescIter iter(argDesc, NULL);
1476 return argDescArgCount(iter);
1479 // convert arguments to Atoms
1480 void argDescArgsToAtomv(Atom* args, uintptr_t argDesc, MethodEnv* env, va_list ap)
1482 AvmCore* core = env->core();
1483 ImmArgDescIter callerTypeIter(argDesc, core);
1484 AtomvArgDescIter calleeTypeIter(core);
1485 APType dst = (APType)args;
1486 argCoerceLoop(env, callerTypeIter, ap, calleeTypeIter, dst);
1489 void argDescArgsToAtomv(Atom* args, char* argDesc, MethodEnv* env, va_list ap)
1491 AvmCore* core = env->core();
1492 PtrArgDescIter callerTypeIter(argDesc, core);
1493 AtomvArgDescIter calleeTypeIter(core);
1494 APType dst = (APType)args;
1495 argCoerceLoop(env, callerTypeIter, ap, calleeTypeIter, dst);
1498 // convert arguments to AtomList
1499 void argDescArgsToAtomList(AtomList& dst, uintptr_t argDesc, MethodEnv* env, va_list ap)
1501 AvmCore* core = env->core();
1502 ImmArgDescIter callerTypeIter(argDesc, core);
1503 AtomvArgDescIter calleeTypeIter(core);
1504 argCoerceLoop(env, callerTypeIter, ap, calleeTypeIter, dst);
1507 void argDescArgsToAtomList(AtomList& dst, char* argDesc, MethodEnv* env, va_list ap)
1509 AvmCore* core = env->core();
1510 PtrArgDescIter callerTypeIter(argDesc, core);
1511 AtomvArgDescIter calleeTypeIter(core);
1512 argCoerceLoop(env, callerTypeIter, ap, calleeTypeIter, dst);
1515 #ifdef VMCFG_AOT
1516 uintptr_t aotThunker(MethodEnv* env, int32_t argc, uint32_t* argv)
1518 Traits* rt = env->method->getMethodSignature()->returnTraits();
1519 return coerce32CdeclArgDescEnter(rt, env, argc, (uint32_t* )argv);
1522 double aotThunkerN(MethodEnv* env, int32_t argc, uint32_t* argv)
1524 return coerceNCdeclArgDescEnter(env, argc, (uint32_t* )argv);
1526 #endif // VMCFG_AOT
1529 #endif // VMCFG_CDECL