On x86 compilers without fastcall, simulate it when invoking traces and un-simulate...
[wine-gecko.git] / js / src / nanojit / avmplus.h
blob1a916c1bd47f3ca059bb211b54fbf4173d31b83f
1 /* ***** BEGIN LICENSE BLOCK *****
2 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 * The contents of this file are subject to the Mozilla Public License Version 1.1 (the
5 * "License"); you may not use this file except in compliance with the License. You may obtain
6 * a copy of the License at http://www.mozilla.org/MPL/
7 *
8 * Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
9 * WARRANTY OF ANY KIND, either express or implied. See the License for the specific
10 * language governing rights and limitations under the License.
12 * The Original Code is [Open Source Virtual Machine.]
14 * The Initial Developer of the Original Code is Adobe System Incorporated. Portions created
15 * by the Initial Developer are Copyright (C)[ 2004-2006 ] Adobe Systems Incorporated. All Rights
16 * Reserved.
18 * Contributor(s): Adobe AS3 Team
19 * Andreas Gal <gal@mozilla.com>
20 * Asko Tontti <atontti@cc.hut.fi>
22 * Alternatively, the contents of this file may be used under the terms of either the GNU
23 * General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public
24 * License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the
25 * LGPL are applicable instead of those above. If you wish to allow use of your version of this
26 * file only under the terms of either the GPL or the LGPL, and not to allow others to use your
27 * version of this file under the terms of the MPL, indicate your decision by deleting provisions
28 * above and replace them with the notice and other provisions required by the GPL or the
29 * LGPL. If you do not delete the provisions above, a recipient may use your version of this file
30 * under the terms of any one of the MPL, the GPL or the LGPL.
32 ***** END LICENSE BLOCK ***** */
34 #ifndef avm_h___
35 #define avm_h___
37 #include <assert.h>
38 #include <string.h>
39 #include <stdio.h>
40 #include <stdlib.h>
42 #if defined(AVMPLUS_LINUX) || defined(DARWIN) || defined(__FreeBSD__)
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #endif
47 #include "jstypes.h"
49 #define FASTCALL JS_FASTCALL
51 #if defined(JS_NO_FASTCALL)
52 #define NJ_NO_FASTCALL
53 #if defined(AVMPLUS_IA32)
54 #define SIMULATE_FASTCALL(lr, state_ptr, frag_ptr, func_addr) \
55 asm volatile( \
56 "call *%%esi" \
57 : "=a" (lr) \
58 : "c" (state_ptr), "d" (frag_ptr), "S" (func_addr) \
59 : "memory", "cc" \
61 #endif /* defined(AVMPLUS_IA32) */
62 #endif /* defined(JS_NO_FASTCALL) */
64 #ifdef WIN32
65 #include <windows.h>
66 #endif
68 #ifdef DEBUG
69 #if !defined _DEBUG
70 #define _DEBUG
71 #endif
72 #define NJ_VERBOSE
73 #define NJ_PROFILE
74 #include <stdarg.h>
75 #endif
77 #ifdef _DEBUG
78 void NanoAssertFail();
79 #endif
81 #define AvmAssert(x) assert(x)
82 #define AvmAssertMsg(x, y)
83 #define AvmDebugLog(x) printf x
85 #ifdef _MSC_VER
87 * Can we just take a moment to think about what it means that MSVC doesn't have stdint.h in 2008?
88 * Thanks for your time.
90 typedef JSUint8 uint8_t;
91 typedef JSInt8 int8_t;
92 typedef JSUint16 uint16_t;
93 typedef JSInt16 int16_t;
94 typedef JSUint32 uint32_t;
95 typedef JSInt32 int32_t;
96 typedef JSUint64 uint64_t;
97 typedef JSInt64 int64_t;
98 #else
99 #include <stdint.h>
100 #endif
102 #if defined(_MSC_VER) && defined(AVMPLUS_IA32)
103 __declspec(naked) static inline __int64 rdtsc()
105 __asm
107 rdtsc;
108 ret;
111 #endif
113 #if defined(__i386__)
115 static __inline__ unsigned long long rdtsc(void)
117 unsigned long long int x;
118 __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
119 return x;
121 #elif defined(__x86_64__)
123 static __inline__ uint64_t rdtsc(void)
125 unsigned hi, lo;
126 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
127 return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
130 #elif defined(__powerpc__)
132 typedef unsigned long long int unsigned long long;
134 static __inline__ unsigned long long rdtsc(void)
136 unsigned long long int result=0;
137 unsigned long int upper, lower,tmp;
138 __asm__ volatile(
139 "0: \n"
140 "\tmftbu %0 \n"
141 "\tmftb %1 \n"
142 "\tmftbu %2 \n"
143 "\tcmpw %2,%0 \n"
144 "\tbne 0b \n"
145 : "=r"(upper),"=r"(lower),"=r"(tmp)
147 result = upper;
148 result = result<<32;
149 result = result|lower;
151 return(result);
154 #endif
156 struct JSContext;
158 namespace nanojit
160 class Fragment;
162 enum ExitType {
163 BRANCH_EXIT,
164 LOOP_EXIT,
165 NESTED_EXIT,
166 MISMATCH_EXIT,
167 OOM_EXIT,
168 OVERFLOW_EXIT
171 struct SideExit
173 intptr_t ip_adj;
174 intptr_t sp_adj;
175 intptr_t rp_adj;
176 Fragment *target;
177 Fragment *from;
178 int32_t calldepth;
179 uint32 numGlobalSlots;
180 uint32 numStackSlots;
181 uint32 numStackSlotsBelowCurrentFrame;
182 uint8 *typeMap;
183 ExitType exitType;
184 #if defined NJ_VERBOSE
185 uint32_t sid;
186 #endif
189 class LIns;
191 struct GuardRecord
193 Fragment *target;
194 Fragment *from;
195 void *jmp;
196 void *origTarget;
197 SideExit *exit;
198 GuardRecord *outgoing;
199 GuardRecord *next;
200 LIns *guard;
201 int32_t calldepth;
202 #if defined NJ_VERBOSE
203 uint32_t compileNbr;
204 uint32_t sid;
205 #endif
208 #define GuardRecordSize(g) sizeof(GuardRecord)
209 #define SideExitSize(e) sizeof(SideExit)
212 class GCObject
214 public:
215 static void operator delete (void *gcObject)
217 free(gcObject);
221 #define MMGC_SUBCLASS_DECL : public GCObject
223 class GCFinalizedObject : public GCObject
225 public:
226 static void operator delete (void *gcObject)
228 free(gcObject);
232 class GCHeap
234 public:
235 int32_t kNativePageSize;
237 GCHeap()
239 #if defined _SC_PAGE_SIZE
240 kNativePageSize = sysconf(_SC_PAGE_SIZE);
241 #else
242 kNativePageSize = 4096; // @todo: what is this?
243 #endif
246 inline void*
247 Alloc(uint32_t pages)
249 #ifdef XP_WIN
250 return VirtualAlloc(NULL,
251 pages * kNativePageSize,
252 MEM_COMMIT | MEM_RESERVE,
253 PAGE_EXECUTE_READWRITE);
254 #elif defined AVMPLUS_LINUX || defined DARWIN
256 * Don't use normal heap with mprotect+PROT_EXEC for executable code.
257 * SELinux and friends don't allow this.
259 return mmap(NULL,
260 pages * kNativePageSize,
261 PROT_READ | PROT_WRITE | PROT_EXEC,
262 MAP_PRIVATE | MAP_ANON,
265 #else
266 return valloc(pages * kNativePageSize);
267 #endif
270 inline void
271 Free(void* p, uint32_t pages)
273 #ifdef XP_WIN
274 VirtualFree(p, 0, MEM_RELEASE);
275 #elif defined AVMPLUS_LINUX || defined DARWIN
276 munmap(p, pages * kNativePageSize);
277 #else
278 free(p);
279 #endif
284 class GC
286 static GCHeap heap;
288 public:
289 static inline void*
290 Alloc(uint32_t bytes)
292 return calloc(1, bytes);
295 static inline void
296 Free(void* p)
298 free(p);
301 static inline GCHeap*
302 GetGCHeap()
304 return &heap;
308 inline void*
309 operator new(size_t size, GC* gc)
311 return calloc(1, size);
314 inline void
315 operator delete(void* p)
317 free(p);
320 #define DWB(x) x
321 #define DRCWB(x) x
323 #define MMGC_MEM_TYPE(x)
325 typedef int FunctionID;
327 namespace avmplus
329 struct InterpState
331 void* sp; /* native stack pointer, stack[0] is spbase[0] */
332 void* rp; /* call stack pointer */
333 void* gp; /* global frame pointer */
334 JSContext *cx; /* current VM context handle */
335 void* eos; /* first unusable word after the native stack */
336 void* eor; /* first unusable word after the call stack */
337 nanojit::GuardRecord* nestedExit; /* innermost nested guard for NESTED_EXIT exits */
340 class String
344 typedef class String AvmString;
346 class StringNullTerminatedUTF8
348 const char* cstr;
350 public:
351 StringNullTerminatedUTF8(GC* gc, String* s)
353 cstr = strdup((const char*)s);
356 ~StringNullTerminatedUTF8()
358 free((void*)cstr);
361 inline
362 const char* c_str()
364 return cstr;
368 typedef String* Stringp;
370 class AvmConfiguration
372 public:
373 AvmConfiguration() {
374 memset(this, 0, sizeof(AvmConfiguration));
375 #ifdef DEBUG
376 verbose = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
377 verbose_addrs = 1;
378 verbose_exits = 1;
379 verbose_live = 1;
380 show_stats = 1;
381 #endif
382 tree_opt = 0;
385 uint32_t tree_opt:1;
386 uint32_t quiet_opt:1;
387 uint32_t verbose:1;
388 uint32_t verbose_addrs:1;
389 uint32_t verbose_live:1;
390 uint32_t verbose_exits:1;
391 uint32_t show_stats:1;
394 static const int kstrconst_emptyString = 0;
396 class AvmInterpreter
398 class Labels {
399 public:
400 const char* format(const void* ip)
402 static char buf[33];
403 sprintf(buf, "%p", ip);
404 return buf;
408 Labels _labels;
409 public:
410 Labels* labels;
412 AvmInterpreter()
414 labels = &_labels;
419 class AvmConsole
421 public:
422 AvmConsole& operator<<(const char* s)
424 fprintf(stdout, "%s", s);
425 return *this;
429 class AvmCore
431 public:
432 AvmInterpreter interp;
433 AvmConsole console;
435 static AvmConfiguration config;
436 static GC* gc;
437 static String* k_str[];
438 static bool sse2_available;
440 static inline bool
441 use_sse2()
443 return sse2_available;
446 static inline bool
447 quiet_opt()
449 return config.quiet_opt;
452 static inline bool
453 verbose()
455 return config.verbose;
458 static inline GC*
459 GetGC()
461 return gc;
464 static inline String* newString(const char* cstr) {
465 return (String*)strdup(cstr);
469 class OSDep
471 public:
472 static inline void
473 getDate()
479 * The List<T> template implements a simple List, which can
480 * be templated to support different types.
482 * Elements can be added to the end, modified in the middle,
483 * but no holes are allowed. That is for set(n, v) to work
484 * size() > n
486 * Note that [] operators are provided and you can violate the
487 * set properties using these operators, if you want a real
488 * list dont use the [] operators, if you want a general purpose
489 * array use the [] operators.
492 enum ListElementType { LIST_NonGCObjects, LIST_GCObjects };
494 template <typename T, ListElementType kElementType>
495 class List
497 public:
498 enum { kInitialCapacity = 128 };
500 List(GC *_gc, uint32_t _capacity=kInitialCapacity) : data(NULL), len(0), capacity(0)
502 ensureCapacity(_capacity);
505 ~List()
507 //clear();
508 destroy();
509 // zero out in case we are part of an RCObject
510 len = 0;
513 inline void destroy()
515 if (data)
516 free(data);
519 // 'this' steals the guts of 'that' and 'that' gets reset.
520 void FASTCALL become(List& that)
522 this->destroy();
524 this->data = that.data;
525 this->len = that.len;
526 this->capacity = that.capacity;
528 that.data = 0;
529 that.len = 0;
530 that.capacity = 0;
532 uint32_t FASTCALL add(T value)
534 if (len >= capacity) {
535 grow();
537 wb(len++, value);
538 return len-1;
541 inline bool isEmpty() const
543 return len == 0;
546 inline uint32_t size() const
548 return len;
551 inline T get(uint32_t index) const
553 AvmAssert(index < len);
554 return *(T*)(data + index);
557 void FASTCALL set(uint32_t index, T value)
559 AvmAssert(index < capacity);
560 if (index >= len)
562 len = index+1;
564 AvmAssert(len <= capacity);
565 wb(index, value);
568 inline void clear()
570 zero_range(0, len);
571 len = 0;
574 int FASTCALL indexOf(T value) const
576 for(uint32_t i=0; i<len; i++)
577 if (get(i) == value)
578 return i;
579 return -1;
582 int FASTCALL lastIndexOf(T value) const
584 for(int32_t i=len-1; i>=0; i--)
585 if (get(i) == value)
586 return i;
587 return -1;
590 inline T last()
592 return get(len-1);
595 T FASTCALL removeLast()
597 if(isEmpty())
598 return undef_list_val();
599 T t = get(len-1);
600 set(len-1, undef_list_val());
601 len--;
602 return t;
605 inline T operator[](uint32_t index) const
607 AvmAssert(index < capacity);
608 return get(index);
611 void FASTCALL ensureCapacity(uint32_t cap)
613 if (cap > capacity) {
614 if (data == NULL) {
615 data = (T*)calloc(1, factor(cap));
616 } else {
617 data = (T*)realloc(data, factor(cap));
618 zero_range(capacity, cap - capacity);
620 capacity = cap;
624 void FASTCALL insert(uint32_t index, T value, uint32_t count = 1)
626 AvmAssert(index <= len);
627 AvmAssert(count > 0);
628 ensureCapacity(len+count);
629 memmove(data + index + count, data + index, factor(len - index));
630 wbzm(index, index+count, value);
631 len += count;
634 T FASTCALL removeAt(uint32_t index)
636 T old = get(index);
637 // dec the refcount on the one we're removing
638 wb(index, undef_list_val());
639 memmove(data + index, data + index + 1, factor(len - index - 1));
640 len--;
641 return old;
644 private:
645 void FASTCALL grow()
647 // growth is fast at first, then slows at larger list sizes.
648 uint32_t newMax = 0;
649 const uint32_t curMax = capacity;
650 if (curMax == 0)
651 newMax = kInitialCapacity;
652 else if(curMax > 15)
653 newMax = curMax * 3/2;
654 else
655 newMax = curMax * 2;
657 ensureCapacity(newMax);
660 inline void do_wb_nongc(T* slot, T value)
662 *slot = value;
665 inline void do_wb_gc(GCObject** slot, const GCObject** value)
667 *slot = (GCObject*)*value;
670 void FASTCALL wb(uint32_t index, T value)
672 AvmAssert(index < capacity);
673 AvmAssert(data != NULL);
674 T* slot = &data[index];
675 do_wb_nongc(slot, value);
678 // multiple wb call with the same value, and assumption that existing value is all zero bits,
679 // like
680 // for (uint32_t u = index; u < index_end; ++u)
681 // wb(u, value);
682 void FASTCALL wbzm(uint32_t index, uint32_t index_end, T value)
684 AvmAssert(index < capacity);
685 AvmAssert(index_end <= capacity);
686 AvmAssert(index < index_end);
687 AvmAssert(data != NULL);
688 T* slot = data + index;
689 for ( ; index < index_end; ++index, ++slot)
690 do_wb_nongc(slot, value);
693 inline uint32_t factor(uint32_t index) const
695 return index * sizeof(T);
698 void FASTCALL zero_range(uint32_t _first, uint32_t _count)
700 memset(data + _first, 0, factor(_count));
703 // stuff that needs specialization based on the type
704 static inline T undef_list_val();
706 private:
707 List(const List& toCopy); // unimplemented
708 void operator=(const List& that); // unimplemented
710 // ------------------------ DATA SECTION BEGIN
711 private:
712 T* data;
713 uint32_t len;
714 uint32_t capacity;
715 // ------------------------ DATA SECTION END
719 // stuff that needs specialization based on the type
720 template<typename T, ListElementType kElementType>
721 /* static */ inline T List<T, kElementType>::undef_list_val() { return T(0); }
724 * The SortedMap<K,T> template implements an object that
725 * maps keys to values. The keys are sorted
726 * from smallest to largest in the map. Time of operations
727 * is as follows:
728 * put() is O(1) if the key is higher than any existing
729 * key; O(logN) if the key already exists,
730 * and O(N) otherwise.
731 * get() is an O(logN) binary search.
733 * no duplicates are allowed.
735 template <class K, class T, ListElementType valType>
736 class SortedMap
738 public:
739 enum { kInitialCapacity= 64 };
741 SortedMap(GC* gc, int _capacity=kInitialCapacity)
742 : keys(gc, _capacity), values(gc, _capacity)
746 bool isEmpty() const
748 return keys.size() == 0;
751 int size() const
753 return keys.size();
756 void clear()
758 keys.clear();
759 values.clear();
762 void destroy()
764 keys.destroy();
765 values.destroy();
768 T put(K k, T v)
770 if (keys.size() == 0 || k > keys.last())
772 keys.add(k);
773 values.add(v);
774 return (T)v;
776 else
778 int i = find(k);
779 if (i >= 0)
781 T old = values[i];
782 keys.set(i, k);
783 values.set(i, v);
784 return old;
786 else
788 i = -i - 1; // recover the insertion point
789 AvmAssert(keys.size() != (uint32_t)i);
790 keys.insert(i, k);
791 values.insert(i, v);
792 return v;
797 T get(K k) const
799 int i = find(k);
800 return i >= 0 ? values[i] : 0;
803 bool get(K k, T& v) const
805 int i = find(k);
806 if (i >= 0)
808 v = values[i];
809 return true;
811 return false;
814 bool containsKey(K k) const
816 int i = find(k);
817 return (i >= 0) ? true : false;
820 T remove(K k)
822 int i = find(k);
823 return removeAt(i);
826 T removeAt(int i)
828 T old = values.removeAt(i);
829 keys.removeAt(i);
830 return old;
833 T removeFirst() { return isEmpty() ? (T)0 : removeAt(0); }
834 T removeLast() { return isEmpty() ? (T)0 : removeAt(keys.size()-1); }
835 T first() const { return isEmpty() ? (T)0 : values[0]; }
836 T last() const { return isEmpty() ? (T)0 : values[keys.size()-1]; }
838 K firstKey() const { return isEmpty() ? 0 : keys[0]; }
839 K lastKey() const { return isEmpty() ? 0 : keys[keys.size()-1]; }
841 // iterator
842 T at(int i) const { return values[i]; }
843 K keyAt(int i) const { return keys[i]; }
845 int findNear(K k) const {
846 int i = find(k);
847 return i >= 0 ? i : -i-2;
849 protected:
850 List<K, LIST_NonGCObjects> keys;
851 List<T, valType> values;
853 int find(K k) const
855 int lo = 0;
856 int hi = keys.size()-1;
858 while (lo <= hi)
860 int i = (lo + hi)/2;
861 K m = keys[i];
862 if (k > m)
863 lo = i + 1;
864 else if (k < m)
865 hi = i - 1;
866 else
867 return i; // key found
869 return -(lo + 1); // key not found, low is the insertion point
873 #define GCSortedMap SortedMap
876 * Bit vectors are an efficent method of keeping True/False information
877 * on a set of items or conditions. Class BitSet provides functions
878 * to manipulate individual bits in the vector.
880 * Since most vectors are rather small an array of longs is used by
881 * default to house the value of the bits. If more bits are needed
882 * then an array is allocated dynamically outside of this object.
884 * This object is not optimized for a fixed sized bit vector
885 * it instead allows for dynamically growing the bit vector.
887 class BitSet
889 public:
890 enum { kUnit = 8*sizeof(long),
891 kDefaultCapacity = 4 };
893 BitSet()
895 capacity = kDefaultCapacity;
896 reset();
899 ~BitSet()
901 if (capacity > kDefaultCapacity)
902 delete bits.ptr;
905 void reset()
907 if (capacity > kDefaultCapacity)
908 for(int i=0; i<capacity; i++)
909 bits.ptr[i] = 0;
910 else
911 for(int i=0; i<capacity; i++)
912 bits.ar[i] = 0;
915 void set(GC *gc, int bitNbr)
917 int index = bitNbr / kUnit;
918 int bit = bitNbr % kUnit;
919 if (index >= capacity)
920 grow(gc, index+1);
922 if (capacity > kDefaultCapacity)
923 bits.ptr[index] |= (1<<bit);
924 else
925 bits.ar[index] |= (1<<bit);
928 void clear(int bitNbr)
930 int index = bitNbr / kUnit;
931 int bit = bitNbr % kUnit;
932 if (index < capacity)
934 if (capacity > kDefaultCapacity)
935 bits.ptr[index] &= ~(1<<bit);
936 else
937 bits.ar[index] &= ~(1<<bit);
941 bool get(int bitNbr) const
943 int index = bitNbr / kUnit;
944 int bit = bitNbr % kUnit;
945 bool value = false;
946 if (index < capacity)
948 if (capacity > kDefaultCapacity)
949 value = ( bits.ptr[index] & (1<<bit) ) ? true : false;
950 else
951 value = ( bits.ar[index] & (1<<bit) ) ? true : false;
953 return value;
956 private:
957 // Grow the array until at least newCapacity big
958 void grow(GC *gc, int newCapacity)
960 // create vector that is 2x bigger than requested
961 newCapacity *= 2;
962 //MEMTAG("BitVector::Grow - long[]");
963 long* newBits = (long*)calloc(1, newCapacity * sizeof(long));
964 //memset(newBits, 0, newCapacity * sizeof(long));
966 // copy the old one
967 if (capacity > kDefaultCapacity)
968 for(int i=0; i<capacity; i++)
969 newBits[i] = bits.ptr[i];
970 else
971 for(int i=0; i<capacity; i++)
972 newBits[i] = bits.ar[i];
974 // in with the new out with the old
975 if (capacity > kDefaultCapacity)
976 free(bits.ptr);
978 bits.ptr = newBits;
979 capacity = newCapacity;
982 // by default we use the array, but if the vector
983 // size grows beyond kDefaultCapacity we allocate
984 // space dynamically.
985 int capacity;
986 union
988 long ar[kDefaultCapacity];
989 long* ptr;
991 bits;
995 #endif