1 /* ***** BEGIN LICENSE BLOCK *****
2 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 * The contents of this file are subject to the Mozilla Public License Version 1.1 (the
5 * "License"); you may not use this file except in compliance with the License. You may obtain
6 * a copy of the License at http://www.mozilla.org/MPL/
8 * Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
9 * WARRANTY OF ANY KIND, either express or implied. See the License for the specific
10 * language governing rights and limitations under the License.
12 * The Original Code is [Open Source Virtual Machine.]
14 * The Initial Developer of the Original Code is Adobe System Incorporated. Portions created
15 * by the Initial Developer are Copyright (C)[ 2004-2006 ] Adobe Systems Incorporated. All Rights
18 * Contributor(s): Adobe AS3 Team
19 * Andreas Gal <gal@mozilla.com>
20 * Asko Tontti <atontti@cc.hut.fi>
22 * Alternatively, the contents of this file may be used under the terms of either the GNU
23 * General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public
24 * License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the
25 * LGPL are applicable instead of those above. If you wish to allow use of your version of this
26 * file only under the terms of either the GPL or the LGPL, and not to allow others to use your
27 * version of this file under the terms of the MPL, indicate your decision by deleting provisions
28 * above and replace them with the notice and other provisions required by the GPL or the
29 * LGPL. If you do not delete the provisions above, a recipient may use your version of this file
30 * under the terms of any one of the MPL, the GPL or the LGPL.
32 ***** END LICENSE BLOCK ***** */
42 #if defined(AVMPLUS_LINUX) || defined(DARWIN) || defined(__FreeBSD__)
49 #define FASTCALL JS_FASTCALL
65 void NanoAssertFail();
68 #define AvmAssert(x) assert(x)
69 #define AvmAssertMsg(x, y)
70 #define AvmDebugLog(x) printf x
74 * Can we just take a moment to think about what it means that MSVC doesn't have stdint.h in 2008?
75 * Thanks for your time.
77 typedef JSUint8
uint8_t;
78 typedef JSInt8
int8_t;
79 typedef JSUint16
uint16_t;
80 typedef JSInt16
int16_t;
81 typedef JSUint32
uint32_t;
82 typedef JSInt32
int32_t;
83 typedef JSUint64
uint64_t;
84 typedef JSInt64
int64_t;
89 #if defined(_MSC_VER) && defined(AVMPLUS_IA32)
90 __declspec(naked
) static inline __int64
rdtsc()
100 #if defined(__i386__)
102 static __inline__
unsigned long long rdtsc(void)
104 unsigned long long int x
;
105 __asm__
volatile (".byte 0x0f, 0x31" : "=A" (x
));
108 #elif defined(__x86_64__)
110 static __inline__
uint64_t rdtsc(void)
113 __asm__
__volatile__ ("rdtsc" : "=a"(lo
), "=d"(hi
));
114 return ( (uint64_t)lo
)|( ((uint64_t)hi
)<<32 );
117 #elif defined(__powerpc__)
119 typedef unsigned long long int unsigned long long;
121 static __inline__
unsigned long long rdtsc(void)
123 unsigned long long int result
=0;
124 unsigned long int upper
, lower
,tmp
;
132 : "=r"(upper
),"=r"(lower
),"=r"(tmp
)
136 result
= result
|lower
;
166 uint32 numGlobalSlots
;
167 uint32 numStackSlots
;
168 uint32 numStackSlotsBelowCurrentFrame
;
171 #if defined NJ_VERBOSE
185 GuardRecord
*outgoing
;
189 #if defined NJ_VERBOSE
195 #define GuardRecordSize(g) sizeof(GuardRecord)
196 #define SideExitSize(e) sizeof(SideExit)
202 static void operator delete (void *gcObject
)
208 #define MMGC_SUBCLASS_DECL : public GCObject
210 class GCFinalizedObject
: public GCObject
213 static void operator delete (void *gcObject
)
222 int32_t kNativePageSize
;
226 #if defined _SC_PAGE_SIZE
227 kNativePageSize
= sysconf(_SC_PAGE_SIZE
);
229 kNativePageSize
= 4096; // @todo: what is this?
234 Alloc(uint32_t pages
)
237 return VirtualAlloc(NULL
,
238 pages
* kNativePageSize
,
239 MEM_COMMIT
| MEM_RESERVE
,
240 PAGE_EXECUTE_READWRITE
);
241 #elif defined AVMPLUS_LINUX || defined DARWIN
243 * Don't use normal heap with mprotect+PROT_EXEC for executable code.
244 * SELinux and friends don't allow this.
247 pages
* kNativePageSize
,
248 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
249 MAP_PRIVATE
| MAP_ANON
,
253 return valloc(pages
* kNativePageSize
);
258 Free(void* p
, uint32_t pages
)
261 VirtualFree(p
, 0, MEM_RELEASE
);
262 #elif defined AVMPLUS_LINUX || defined DARWIN
263 munmap(p
, pages
* kNativePageSize
);
277 Alloc(uint32_t bytes
)
279 return calloc(1, bytes
);
288 static inline GCHeap
*
296 operator new(size_t size
, GC
* gc
)
298 return calloc(1, size
);
302 operator delete(void* p
)
310 #define MMGC_MEM_TYPE(x)
312 typedef int FunctionID
;
318 void* sp
; /* native stack pointer, stack[0] is spbase[0] */
319 void* rp
; /* call stack pointer */
320 void* gp
; /* global frame pointer */
321 JSContext
*cx
; /* current VM context handle */
322 void* eos
; /* first unusable word after the native stack */
323 void* eor
; /* first unusable word after the call stack */
324 nanojit::GuardRecord
* nestedExit
; /* innermost nested guard for NESTED_EXIT exits */
331 typedef class String AvmString
;
333 class StringNullTerminatedUTF8
338 StringNullTerminatedUTF8(GC
* gc
, String
* s
)
340 cstr
= strdup((const char*)s
);
343 ~StringNullTerminatedUTF8()
355 typedef String
* Stringp
;
357 class AvmConfiguration
361 memset(this, 0, sizeof(AvmConfiguration
));
363 verbose
= getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
373 uint32_t quiet_opt
:1;
375 uint32_t verbose_addrs
:1;
376 uint32_t verbose_live
:1;
377 uint32_t verbose_exits
:1;
378 uint32_t show_stats
:1;
381 static const int kstrconst_emptyString
= 0;
387 const char* format(const void* ip
)
390 sprintf(buf
, "%p", ip
);
409 AvmConsole
& operator<<(const char* s
)
411 fprintf(stdout
, "%s", s
);
419 AvmInterpreter interp
;
422 static AvmConfiguration config
;
424 static String
* k_str
[];
425 static bool sse2_available
;
430 return sse2_available
;
436 return config
.quiet_opt
;
442 return config
.verbose
;
451 static inline String
* newString(const char* cstr
) {
452 return (String
*)strdup(cstr
);
466 * The List<T> template implements a simple List, which can
467 * be templated to support different types.
469 * Elements can be added to the end, modified in the middle,
470 * but no holes are allowed. That is for set(n, v) to work
473 * Note that [] operators are provided and you can violate the
474 * set properties using these operators, if you want a real
475 * list dont use the [] operators, if you want a general purpose
476 * array use the [] operators.
479 enum ListElementType
{ LIST_NonGCObjects
, LIST_GCObjects
};
481 template <typename T
, ListElementType kElementType
>
485 enum { kInitialCapacity
= 128 };
487 List(GC
*_gc
, uint32_t _capacity
=kInitialCapacity
) : data(NULL
), len(0), capacity(0)
489 ensureCapacity(_capacity
);
496 // zero out in case we are part of an RCObject
500 inline void destroy()
506 // 'this' steals the guts of 'that' and 'that' gets reset.
507 void FASTCALL
become(List
& that
)
511 this->data
= that
.data
;
512 this->len
= that
.len
;
513 this->capacity
= that
.capacity
;
519 uint32_t FASTCALL
add(T value
)
521 if (len
>= capacity
) {
528 inline bool isEmpty() const
533 inline uint32_t size() const
538 inline T
get(uint32_t index
) const
540 AvmAssert(index
< len
);
541 return *(T
*)(data
+ index
);
544 void FASTCALL
set(uint32_t index
, T value
)
546 AvmAssert(index
< capacity
);
551 AvmAssert(len
<= capacity
);
561 int FASTCALL
indexOf(T value
) const
563 for(uint32_t i
=0; i
<len
; i
++)
569 int FASTCALL
lastIndexOf(T value
) const
571 for(int32_t i
=len
-1; i
>=0; i
--)
582 T FASTCALL
removeLast()
585 return undef_list_val();
587 set(len
-1, undef_list_val());
592 inline T
operator[](uint32_t index
) const
594 AvmAssert(index
< capacity
);
598 void FASTCALL
ensureCapacity(uint32_t cap
)
600 if (cap
> capacity
) {
602 data
= (T
*)calloc(1, factor(cap
));
604 data
= (T
*)realloc(data
, factor(cap
));
605 zero_range(capacity
, cap
- capacity
);
611 void FASTCALL
insert(uint32_t index
, T value
, uint32_t count
= 1)
613 AvmAssert(index
<= len
);
614 AvmAssert(count
> 0);
615 ensureCapacity(len
+count
);
616 memmove(data
+ index
+ count
, data
+ index
, factor(len
- index
));
617 wbzm(index
, index
+count
, value
);
621 T FASTCALL
removeAt(uint32_t index
)
624 // dec the refcount on the one we're removing
625 wb(index
, undef_list_val());
626 memmove(data
+ index
, data
+ index
+ 1, factor(len
- index
- 1));
634 // growth is fast at first, then slows at larger list sizes.
636 const uint32_t curMax
= capacity
;
638 newMax
= kInitialCapacity
;
640 newMax
= curMax
* 3/2;
644 ensureCapacity(newMax
);
647 inline void do_wb_nongc(T
* slot
, T value
)
652 inline void do_wb_gc(GCObject
** slot
, const GCObject
** value
)
654 *slot
= (GCObject
*)*value
;
657 void FASTCALL
wb(uint32_t index
, T value
)
659 AvmAssert(index
< capacity
);
660 AvmAssert(data
!= NULL
);
661 T
* slot
= &data
[index
];
662 do_wb_nongc(slot
, value
);
665 // multiple wb call with the same value, and assumption that existing value is all zero bits,
667 // for (uint32_t u = index; u < index_end; ++u)
669 void FASTCALL
wbzm(uint32_t index
, uint32_t index_end
, T value
)
671 AvmAssert(index
< capacity
);
672 AvmAssert(index_end
<= capacity
);
673 AvmAssert(index
< index_end
);
674 AvmAssert(data
!= NULL
);
675 T
* slot
= data
+ index
;
676 for ( ; index
< index_end
; ++index
, ++slot
)
677 do_wb_nongc(slot
, value
);
680 inline uint32_t factor(uint32_t index
) const
682 return index
* sizeof(T
);
685 void FASTCALL
zero_range(uint32_t _first
, uint32_t _count
)
687 memset(data
+ _first
, 0, factor(_count
));
690 // stuff that needs specialization based on the type
691 static inline T
undef_list_val();
694 List(const List
& toCopy
); // unimplemented
695 void operator=(const List
& that
); // unimplemented
697 // ------------------------ DATA SECTION BEGIN
702 // ------------------------ DATA SECTION END
706 // stuff that needs specialization based on the type
707 template<typename T
, ListElementType kElementType
>
708 /* static */ inline T List
<T
, kElementType
>::undef_list_val() { return T(0); }
711 * The SortedMap<K,T> template implements an object that
712 * maps keys to values. The keys are sorted
713 * from smallest to largest in the map. Time of operations
715 * put() is O(1) if the key is higher than any existing
716 * key; O(logN) if the key already exists,
717 * and O(N) otherwise.
718 * get() is an O(logN) binary search.
720 * no duplicates are allowed.
722 template <class K
, class T
, ListElementType valType
>
726 enum { kInitialCapacity
= 64 };
728 SortedMap(GC
* gc
, int _capacity
=kInitialCapacity
)
729 : keys(gc
, _capacity
), values(gc
, _capacity
)
735 return keys
.size() == 0;
757 if (keys
.size() == 0 || k
> keys
.last())
775 i
= -i
- 1; // recover the insertion point
776 AvmAssert(keys
.size() != (uint32_t)i
);
787 return i
>= 0 ? values
[i
] : 0;
790 bool get(K k
, T
& v
) const
801 bool containsKey(K k
) const
804 return (i
>= 0) ? true : false;
815 T old
= values
.removeAt(i
);
820 T
removeFirst() { return isEmpty() ? (T
)0 : removeAt(0); }
821 T
removeLast() { return isEmpty() ? (T
)0 : removeAt(keys
.size()-1); }
822 T
first() const { return isEmpty() ? (T
)0 : values
[0]; }
823 T
last() const { return isEmpty() ? (T
)0 : values
[keys
.size()-1]; }
825 K
firstKey() const { return isEmpty() ? 0 : keys
[0]; }
826 K
lastKey() const { return isEmpty() ? 0 : keys
[keys
.size()-1]; }
829 T
at(int i
) const { return values
[i
]; }
830 K
keyAt(int i
) const { return keys
[i
]; }
832 int findNear(K k
) const {
834 return i
>= 0 ? i
: -i
-2;
837 List
<K
, LIST_NonGCObjects
> keys
;
838 List
<T
, valType
> values
;
843 int hi
= keys
.size()-1;
854 return i
; // key found
856 return -(lo
+ 1); // key not found, low is the insertion point
860 #define GCSortedMap SortedMap
863 * Bit vectors are an efficent method of keeping True/False information
864 * on a set of items or conditions. Class BitSet provides functions
865 * to manipulate individual bits in the vector.
867 * Since most vectors are rather small an array of longs is used by
868 * default to house the value of the bits. If more bits are needed
869 * then an array is allocated dynamically outside of this object.
871 * This object is not optimized for a fixed sized bit vector
872 * it instead allows for dynamically growing the bit vector.
877 enum { kUnit
= 8*sizeof(long),
878 kDefaultCapacity
= 4 };
882 capacity
= kDefaultCapacity
;
888 if (capacity
> kDefaultCapacity
)
894 if (capacity
> kDefaultCapacity
)
895 for(int i
=0; i
<capacity
; i
++)
898 for(int i
=0; i
<capacity
; i
++)
902 void set(GC
*gc
, int bitNbr
)
904 int index
= bitNbr
/ kUnit
;
905 int bit
= bitNbr
% kUnit
;
906 if (index
>= capacity
)
909 if (capacity
> kDefaultCapacity
)
910 bits
.ptr
[index
] |= (1<<bit
);
912 bits
.ar
[index
] |= (1<<bit
);
915 void clear(int bitNbr
)
917 int index
= bitNbr
/ kUnit
;
918 int bit
= bitNbr
% kUnit
;
919 if (index
< capacity
)
921 if (capacity
> kDefaultCapacity
)
922 bits
.ptr
[index
] &= ~(1<<bit
);
924 bits
.ar
[index
] &= ~(1<<bit
);
928 bool get(int bitNbr
) const
930 int index
= bitNbr
/ kUnit
;
931 int bit
= bitNbr
% kUnit
;
933 if (index
< capacity
)
935 if (capacity
> kDefaultCapacity
)
936 value
= ( bits
.ptr
[index
] & (1<<bit
) ) ? true : false;
938 value
= ( bits
.ar
[index
] & (1<<bit
) ) ? true : false;
944 // Grow the array until at least newCapacity big
945 void grow(GC
*gc
, int newCapacity
)
947 // create vector that is 2x bigger than requested
949 //MEMTAG("BitVector::Grow - long[]");
950 long* newBits
= (long*)calloc(1, newCapacity
* sizeof(long));
951 //memset(newBits, 0, newCapacity * sizeof(long));
954 if (capacity
> kDefaultCapacity
)
955 for(int i
=0; i
<capacity
; i
++)
956 newBits
[i
] = bits
.ptr
[i
];
958 for(int i
=0; i
<capacity
; i
++)
959 newBits
[i
] = bits
.ar
[i
];
961 // in with the new out with the old
962 if (capacity
> kDefaultCapacity
)
966 capacity
= newCapacity
;
969 // by default we use the array, but if the vector
970 // size grows beyond kDefaultCapacity we allocate
971 // space dynamically.
975 long ar
[kDefaultCapacity
];