1 /* ***** BEGIN LICENSE BLOCK *****
2 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 * The contents of this file are subject to the Mozilla Public License Version 1.1 (the
5 * "License"); you may not use this file except in compliance with the License. You may obtain
6 * a copy of the License at http://www.mozilla.org/MPL/
8 * Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
9 * WARRANTY OF ANY KIND, either express or implied. See the License for the specific
10 * language governing rights and limitations under the License.
12 * The Original Code is [Open Source Virtual Machine.]
14 * The Initial Developer of the Original Code is Adobe System Incorporated. Portions created
15 * by the Initial Developer are Copyright (C)[ 2004-2006 ] Adobe Systems Incorporated. All Rights
18 * Contributor(s): Adobe AS3 Team
19 * Andreas Gal <gal@mozilla.com>
20 * Asko Tontti <atontti@cc.hut.fi>
22 * Alternatively, the contents of this file may be used under the terms of either the GNU
23 * General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public
24 * License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the
25 * LGPL are applicable instead of those above. If you wish to allow use of your version of this
26 * file only under the terms of either the GPL or the LGPL, and not to allow others to use your
27 * version of this file under the terms of the MPL, indicate your decision by deleting provisions
28 * above and replace them with the notice and other provisions required by the GPL or the
29 * LGPL. If you do not delete the provisions above, a recipient may use your version of this file
30 * under the terms of any one of the MPL, the GPL or the LGPL.
32 ***** END LICENSE BLOCK ***** */
42 #if defined(AVMPLUS_UNIX)
49 #define FASTCALL JS_FASTCALL
51 #if defined(JS_NO_FASTCALL)
52 #define NJ_NO_FASTCALL
53 #if defined(AVMPLUS_IA32)
54 #define SIMULATE_FASTCALL(lr, state_ptr, frag_ptr, func_addr) \
58 : "c" (state_ptr), "d" (frag_ptr), "S" (func_addr) \
61 #endif /* defined(AVMPLUS_IA32) */
62 #endif /* defined(JS_NO_FASTCALL) */
68 #if defined(DEBUG) || defined(_MSC_VER) && _MSC_VER < 1400
78 void NanoAssertFail();
81 #define AvmAssert(x) assert(x)
82 #define AvmAssertMsg(x, y)
83 #define AvmDebugLog(x) printf x
87 * Can we just take a moment to think about what it means that MSVC doesn't have stdint.h in 2008?
88 * Thanks for your time.
90 typedef JSUint8
uint8_t;
91 typedef JSInt8
int8_t;
92 typedef JSUint16
uint16_t;
93 typedef JSInt16
int16_t;
94 typedef JSUint32
uint32_t;
95 typedef JSInt32
int32_t;
96 typedef JSUint64
uint64_t;
97 typedef JSInt64
int64_t;
102 #if defined(AVMPLUS_IA32)
103 #if defined(_MSC_VER)
104 __declspec(naked
) static inline __int64
rdtsc()
112 #elif defined(SOLARIS)
113 static inline unsigned long long rdtsc(void)
115 unsigned long long int x
;
116 asm volatile (".byte 0x0f, 0x31" : "=A" (x
));
119 #elif defined(__i386__)
120 static __inline__
unsigned long long rdtsc(void)
122 unsigned long long int x
;
123 __asm__
volatile (".byte 0x0f, 0x31" : "=A" (x
));
126 #endif /* compilers */
128 #elif defined(__x86_64__)
130 static __inline__
uint64_t rdtsc(void)
133 __asm__
__volatile__ ("rdtsc" : "=a"(lo
), "=d"(hi
));
134 return ( (uint64_t)lo
)|( ((uint64_t)hi
)<<32 );
137 #elif defined(__powerpc__)
139 typedef unsigned long long int unsigned long long;
141 static __inline__
unsigned long long rdtsc(void)
143 unsigned long long int result
=0;
144 unsigned long int upper
, lower
,tmp
;
152 : "=r"(upper
),"=r"(lower
),"=r"(tmp
)
156 result
= result
|lower
;
161 #endif /* architecture */
173 operator new(size_t size
, GC
* gc
)
175 return calloc(1, size
);
178 static void operator delete (void *gcObject
)
184 #define MMGC_SUBCLASS_DECL : public avmplus::GCObject
186 class GCFinalizedObject
: public GCObject
189 static void operator delete (void *gcObject
)
198 int32_t kNativePageSize
;
202 #if defined _SC_PAGE_SIZE
203 kNativePageSize
= sysconf(_SC_PAGE_SIZE
);
205 kNativePageSize
= 4096; // @todo: what is this?
210 Alloc(uint32_t pages
)
213 return VirtualAlloc(NULL
,
214 pages
* kNativePageSize
,
215 MEM_COMMIT
| MEM_RESERVE
,
216 PAGE_EXECUTE_READWRITE
);
217 #elif defined AVMPLUS_UNIX
219 * Don't use normal heap with mprotect+PROT_EXEC for executable code.
220 * SELinux and friends don't allow this.
223 pages
* kNativePageSize
,
224 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
225 MAP_PRIVATE
| MAP_ANON
,
229 return valloc(pages
* kNativePageSize
);
234 Free(void* p
, uint32_t pages
)
237 VirtualFree(p
, 0, MEM_RELEASE
);
238 #elif defined AVMPLUS_UNIX
240 munmap((char*)p
, pages
* kNativePageSize
);
242 munmap(p
, pages
* kNativePageSize
);
257 * flags to be passed as second argument to alloc
268 Alloc(uint32_t bytes
, int flags
=kZero
)
271 return calloc(1, bytes
);
273 return malloc(bytes
);
282 static inline GCHeap
*
291 #define WB(gc, container, addr, value) do { *(addr) = (value); } while(0)
292 #define WBRC(gc, container, addr, value) do { *(addr) = (value); } while(0)
294 #define MMGC_MEM_TYPE(x)
296 typedef int FunctionID
;
302 typedef class String AvmString
;
304 class StringNullTerminatedUTF8
309 StringNullTerminatedUTF8(GC
* gc
, String
* s
)
311 cstr
= strdup((const char*)s
);
314 ~StringNullTerminatedUTF8()
326 typedef String
* Stringp
;
332 memset(this, 0, sizeof(Config
));
334 verbose
= getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
340 #if defined (AVMPLUS_AMD64)
348 uint32_t quiet_opt
:1;
350 uint32_t verbose_addrs
:1;
351 uint32_t verbose_live
:1;
352 uint32_t verbose_exits
:1;
353 uint32_t show_stats
:1;
355 #if defined (AVMPLUS_IA32) || defined(AVMPLUS_AMD64)
361 static const int kstrconst_emptyString
= 0;
367 const char* format(const void* ip
)
370 sprintf(buf
, "%p", ip
);
389 AvmConsole
& operator<<(const char* s
)
391 fprintf(stdout
, "%s", s
);
399 AvmInterpreter interp
;
402 static Config config
;
404 static String
* k_str
[];
406 #if defined (AVMPLUS_IA32) || defined(AVMPLUS_AMD64)
416 return config
.use_cmov
;
423 return config
.quiet_opt
;
429 return config
.verbose
;
438 static inline String
* newString(const char* cstr
) {
439 return (String
*)strdup(cstr
);
442 static inline void freeString(String
* str
) {
443 return free((char*)str
);
457 * The List<T> template implements a simple List, which can
458 * be templated to support different types.
460 * Elements can be added to the end, modified in the middle,
461 * but no holes are allowed. That is for set(n, v) to work
464 * Note that [] operators are provided and you can violate the
465 * set properties using these operators, if you want a real
466 * list dont use the [] operators, if you want a general purpose
467 * array use the [] operators.
470 enum ListElementType
{
471 LIST_NonGCObjects
= 0,
476 template <typename T
, ListElementType kElementType
>
480 enum { kInitialCapacity
= 128 };
482 List(GC
*_gc
, uint32_t _capacity
=kInitialCapacity
) : data(NULL
), len(0), capacity(0)
484 ensureCapacity(_capacity
);
491 // zero out in case we are part of an RCObject
495 inline void destroy()
501 const T
*getData() const { return data
; }
503 // 'this' steals the guts of 'that' and 'that' gets reset.
504 void FASTCALL
become(List
& that
)
508 this->data
= that
.data
;
509 this->len
= that
.len
;
510 this->capacity
= that
.capacity
;
516 uint32_t FASTCALL
add(T value
)
518 if (len
>= capacity
) {
525 inline bool isEmpty() const
530 inline uint32_t size() const
535 inline T
get(uint32_t index
) const
537 AvmAssert(index
< len
);
538 return *(T
*)(data
+ index
);
541 void FASTCALL
set(uint32_t index
, T value
)
543 AvmAssert(index
< capacity
);
548 AvmAssert(len
<= capacity
);
552 void add(const List
<T
, kElementType
>& l
)
554 ensureCapacity(len
+l
.size());
555 // FIXME: make RCObject version
556 AvmAssert(kElementType
!= LIST_RCObjects
);
557 arraycopy(l
.getData(), 0, data
, len
, l
.size());
567 int FASTCALL
indexOf(T value
) const
569 for(uint32_t i
=0; i
<len
; i
++)
575 int FASTCALL
lastIndexOf(T value
) const
577 for(int32_t i
=len
-1; i
>=0; i
--)
583 inline T
last() const
588 T FASTCALL
removeLast()
591 return undef_list_val();
593 set(len
-1, undef_list_val());
598 inline T
operator[](uint32_t index
) const
600 AvmAssert(index
< capacity
);
604 void FASTCALL
ensureCapacity(uint32_t cap
)
606 if (cap
> capacity
) {
608 data
= (T
*)calloc(1, factor(cap
));
610 data
= (T
*)realloc(data
, factor(cap
));
611 zero_range(capacity
, cap
- capacity
);
617 void FASTCALL
insert(uint32_t index
, T value
, uint32_t count
= 1)
619 AvmAssert(index
<= len
);
620 AvmAssert(count
> 0);
621 ensureCapacity(len
+count
);
622 memmove(data
+ index
+ count
, data
+ index
, factor(len
- index
));
623 wbzm(index
, index
+count
, value
);
627 T FASTCALL
removeAt(uint32_t index
)
630 // dec the refcount on the one we're removing
631 wb(index
, undef_list_val());
632 memmove(data
+ index
, data
+ index
+ 1, factor(len
- index
- 1));
640 // growth is fast at first, then slows at larger list sizes.
642 const uint32_t curMax
= capacity
;
644 newMax
= kInitialCapacity
;
646 newMax
= curMax
* 3/2;
650 ensureCapacity(newMax
);
653 void arraycopy(const T
* src
, int srcStart
, T
* dst
, int dstStart
, int nbr
)
655 // we have 2 cases, either closing a gap or opening it.
656 if ((src
== dst
) && (srcStart
> dstStart
) )
658 for(int i
=0; i
<nbr
; i
++)
659 dst
[i
+dstStart
] = src
[i
+srcStart
];
663 for(int i
=nbr
-1; i
>=0; i
--)
664 dst
[i
+dstStart
] = src
[i
+srcStart
];
668 inline void do_wb_nongc(T
* slot
, T value
)
673 inline void do_wb_gc(GCObject
** slot
, const GCObject
** value
)
675 *slot
= (GCObject
*)*value
;
678 void FASTCALL
wb(uint32_t index
, T value
)
680 AvmAssert(index
< capacity
);
681 AvmAssert(data
!= NULL
);
682 T
* slot
= &data
[index
];
683 do_wb_nongc(slot
, value
);
686 // multiple wb call with the same value, and assumption that existing value is all zero bits,
688 // for (uint32_t u = index; u < index_end; ++u)
690 void FASTCALL
wbzm(uint32_t index
, uint32_t index_end
, T value
)
692 AvmAssert(index
< capacity
);
693 AvmAssert(index_end
<= capacity
);
694 AvmAssert(index
< index_end
);
695 AvmAssert(data
!= NULL
);
696 T
* slot
= data
+ index
;
697 for ( ; index
< index_end
; ++index
, ++slot
)
698 do_wb_nongc(slot
, value
);
701 inline uint32_t factor(uint32_t index
) const
703 return index
* sizeof(T
);
706 void FASTCALL
zero_range(uint32_t _first
, uint32_t _count
)
708 memset(data
+ _first
, 0, factor(_count
));
711 // stuff that needs specialization based on the type
712 static inline T
undef_list_val();
715 List(const List
& toCopy
); // unimplemented
716 void operator=(const List
& that
); // unimplemented
718 // ------------------------ DATA SECTION BEGIN
723 // ------------------------ DATA SECTION END
727 // stuff that needs specialization based on the type
728 template<typename T
, ListElementType kElementType
>
729 /* static */ inline T List
<T
, kElementType
>::undef_list_val() { return T(0); }
732 * The SortedMap<K,T> template implements an object that
733 * maps keys to values. The keys are sorted
734 * from smallest to largest in the map. Time of operations
736 * put() is O(1) if the key is higher than any existing
737 * key; O(logN) if the key already exists,
738 * and O(N) otherwise.
739 * get() is an O(logN) binary search.
741 * no duplicates are allowed.
743 template <class K
, class T
, ListElementType valType
>
744 class SortedMap
: public GCObject
747 enum { kInitialCapacity
= 64 };
749 SortedMap(GC
* gc
, int _capacity
=kInitialCapacity
)
750 : keys(gc
, _capacity
), values(gc
, _capacity
)
756 return keys
.size() == 0;
778 if (keys
.size() == 0 || k
> keys
.last())
796 i
= -i
- 1; // recover the insertion point
797 AvmAssert(keys
.size() != (uint32_t)i
);
808 return i
>= 0 ? values
[i
] : 0;
811 bool get(K k
, T
& v
) const
822 bool containsKey(K k
) const
825 return (i
>= 0) ? true : false;
836 T old
= values
.removeAt(i
);
841 T
removeFirst() { return isEmpty() ? (T
)0 : removeAt(0); }
842 T
removeLast() { return isEmpty() ? (T
)0 : removeAt(keys
.size()-1); }
843 T
first() const { return isEmpty() ? (T
)0 : values
[0]; }
844 T
last() const { return isEmpty() ? (T
)0 : values
[keys
.size()-1]; }
846 K
firstKey() const { return isEmpty() ? 0 : keys
[0]; }
847 K
lastKey() const { return isEmpty() ? 0 : keys
[keys
.size()-1]; }
850 T
at(int i
) const { return values
[i
]; }
851 K
keyAt(int i
) const { return keys
[i
]; }
853 int findNear(K k
) const {
855 return i
>= 0 ? i
: -i
-2;
858 List
<K
, LIST_NonGCObjects
> keys
;
859 List
<T
, valType
> values
;
864 int hi
= keys
.size()-1;
875 return i
; // key found
877 return -(lo
+ 1); // key not found, low is the insertion point
881 #define GCSortedMap SortedMap
884 * Bit vectors are an efficent method of keeping True/False information
885 * on a set of items or conditions. Class BitSet provides functions
886 * to manipulate individual bits in the vector.
888 * Since most vectors are rather small an array of longs is used by
889 * default to house the value of the bits. If more bits are needed
890 * then an array is allocated dynamically outside of this object.
892 * This object is not optimized for a fixed sized bit vector
893 * it instead allows for dynamically growing the bit vector.
898 enum { kUnit
= 8*sizeof(long),
899 kDefaultCapacity
= 4 };
903 capacity
= kDefaultCapacity
;
909 if (capacity
> kDefaultCapacity
)
915 if (capacity
> kDefaultCapacity
)
916 for(int i
=0; i
<capacity
; i
++)
919 for(int i
=0; i
<capacity
; i
++)
923 void set(GC
*gc
, int bitNbr
)
925 int index
= bitNbr
/ kUnit
;
926 int bit
= bitNbr
% kUnit
;
927 if (index
>= capacity
)
930 if (capacity
> kDefaultCapacity
)
931 bits
.ptr
[index
] |= (1<<bit
);
933 bits
.ar
[index
] |= (1<<bit
);
936 void clear(int bitNbr
)
938 int index
= bitNbr
/ kUnit
;
939 int bit
= bitNbr
% kUnit
;
940 if (index
< capacity
)
942 if (capacity
> kDefaultCapacity
)
943 bits
.ptr
[index
] &= ~(1<<bit
);
945 bits
.ar
[index
] &= ~(1<<bit
);
949 bool get(int bitNbr
) const
951 int index
= bitNbr
/ kUnit
;
952 int bit
= bitNbr
% kUnit
;
954 if (index
< capacity
)
956 if (capacity
> kDefaultCapacity
)
957 value
= ( bits
.ptr
[index
] & (1<<bit
) ) ? true : false;
959 value
= ( bits
.ar
[index
] & (1<<bit
) ) ? true : false;
965 // Grow the array until at least newCapacity big
966 void grow(GC
*gc
, int newCapacity
)
968 // create vector that is 2x bigger than requested
970 //MEMTAG("BitVector::Grow - long[]");
971 long* newBits
= (long*)calloc(1, newCapacity
* sizeof(long));
972 //memset(newBits, 0, newCapacity * sizeof(long));
975 if (capacity
> kDefaultCapacity
)
976 for(int i
=0; i
<capacity
; i
++)
977 newBits
[i
] = bits
.ptr
[i
];
979 for(int i
=0; i
<capacity
; i
++)
980 newBits
[i
] = bits
.ar
[i
];
982 // in with the new out with the old
983 if (capacity
> kDefaultCapacity
)
987 capacity
= newCapacity
;
990 // by default we use the array, but if the vector
991 // size grows beyond kDefaultCapacity we allocate
992 // space dynamically.
996 long ar
[kDefaultCapacity
];