Bug 469739 - Add support for displaying Vista UAC shield icon; r=joe sr=vladimir
[wine-gecko.git] / js / src / nanojit / avmplus.h
blobcea192b899c36373de3bafb8f21a045a9e490f24
1 /* ***** BEGIN LICENSE BLOCK *****
2 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4 * The contents of this file are subject to the Mozilla Public License Version 1.1 (the
5 * "License"); you may not use this file except in compliance with the License. You may obtain
6 * a copy of the License at http://www.mozilla.org/MPL/
7 *
8 * Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
9 * WARRANTY OF ANY KIND, either express or implied. See the License for the specific
10 * language governing rights and limitations under the License.
12 * The Original Code is [Open Source Virtual Machine.]
14 * The Initial Developer of the Original Code is Adobe System Incorporated. Portions created
15 * by the Initial Developer are Copyright (C)[ 2004-2006 ] Adobe Systems Incorporated. All Rights
16 * Reserved.
18 * Contributor(s): Adobe AS3 Team
19 * Andreas Gal <gal@mozilla.com>
20 * Asko Tontti <atontti@cc.hut.fi>
22 * Alternatively, the contents of this file may be used under the terms of either the GNU
23 * General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public
24 * License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the
25 * LGPL are applicable instead of those above. If you wish to allow use of your version of this
26 * file only under the terms of either the GPL or the LGPL, and not to allow others to use your
27 * version of this file under the terms of the MPL, indicate your decision by deleting provisions
28 * above and replace them with the notice and other provisions required by the GPL or the
29 * LGPL. If you do not delete the provisions above, a recipient may use your version of this file
30 * under the terms of any one of the MPL, the GPL or the LGPL.
32 ***** END LICENSE BLOCK ***** */
34 #ifndef avm_h___
35 #define avm_h___
37 #include <assert.h>
38 #include <string.h>
39 #include <stdio.h>
40 #include <stdlib.h>
42 #if defined(AVMPLUS_UNIX)
43 #include <unistd.h>
44 #include <sys/mman.h>
45 #endif
47 #include "jstypes.h"
49 #define FASTCALL JS_FASTCALL
51 #if defined(JS_NO_FASTCALL)
52 #define NJ_NO_FASTCALL
53 #if defined(AVMPLUS_IA32)
54 #define SIMULATE_FASTCALL(lr, state_ptr, frag_ptr, func_addr) \
55 asm volatile( \
56 "call *%%esi" \
57 : "=a" (lr) \
58 : "c" (state_ptr), "d" (frag_ptr), "S" (func_addr) \
59 : "memory", "cc" \
61 #endif /* defined(AVMPLUS_IA32) */
62 #endif /* defined(JS_NO_FASTCALL) */
64 #ifdef WIN32
65 #include <windows.h>
66 #endif
68 #if defined(DEBUG) || defined(_MSC_VER) && _MSC_VER < 1400
69 #if !defined _DEBUG
70 #define _DEBUG
71 #endif
72 #define NJ_VERBOSE 1
73 #define NJ_PROFILE 1
74 #include <stdarg.h>
75 #endif
77 #ifdef _DEBUG
78 void NanoAssertFail();
79 #endif
81 #define AvmAssert(x) assert(x)
82 #define AvmAssertMsg(x, y)
83 #define AvmDebugLog(x) printf x
85 #ifdef _MSC_VER
87 * Can we just take a moment to think about what it means that MSVC doesn't have stdint.h in 2008?
88 * Thanks for your time.
90 typedef JSUint8 uint8_t;
91 typedef JSInt8 int8_t;
92 typedef JSUint16 uint16_t;
93 typedef JSInt16 int16_t;
94 typedef JSUint32 uint32_t;
95 typedef JSInt32 int32_t;
96 typedef JSUint64 uint64_t;
97 typedef JSInt64 int64_t;
98 #else
99 #include <stdint.h>
100 #endif
102 #if defined(AVMPLUS_IA32)
103 #if defined(_MSC_VER)
104 __declspec(naked) static inline __int64 rdtsc()
106 __asm
108 rdtsc;
109 ret;
112 #elif defined(SOLARIS)
113 static inline unsigned long long rdtsc(void)
115 unsigned long long int x;
116 asm volatile (".byte 0x0f, 0x31" : "=A" (x));
117 return x;
119 #elif defined(__i386__)
120 static __inline__ unsigned long long rdtsc(void)
122 unsigned long long int x;
123 __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
124 return x;
126 #endif /* compilers */
128 #elif defined(__x86_64__)
130 static __inline__ uint64_t rdtsc(void)
132 unsigned hi, lo;
133 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
134 return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
137 #elif defined(__powerpc__)
139 typedef unsigned long long int unsigned long long;
141 static __inline__ unsigned long long rdtsc(void)
143 unsigned long long int result=0;
144 unsigned long int upper, lower,tmp;
145 __asm__ volatile(
146 "0: \n"
147 "\tmftbu %0 \n"
148 "\tmftb %1 \n"
149 "\tmftbu %2 \n"
150 "\tcmpw %2,%0 \n"
151 "\tbne 0b \n"
152 : "=r"(upper),"=r"(lower),"=r"(tmp)
154 result = upper;
155 result = result<<32;
156 result = result|lower;
158 return(result);
161 #endif /* architecture */
163 struct JSContext;
165 namespace avmplus {
167 class GC;
169 class GCObject
171 public:
172 inline void*
173 operator new(size_t size, GC* gc)
175 return calloc(1, size);
178 static void operator delete (void *gcObject)
180 free(gcObject);
184 #define MMGC_SUBCLASS_DECL : public avmplus::GCObject
186 class GCFinalizedObject : public GCObject
188 public:
189 static void operator delete (void *gcObject)
191 free(gcObject);
195 class GCHeap
197 public:
198 int32_t kNativePageSize;
200 GCHeap()
202 #if defined _SC_PAGE_SIZE
203 kNativePageSize = sysconf(_SC_PAGE_SIZE);
204 #else
205 kNativePageSize = 4096; // @todo: what is this?
206 #endif
209 inline void*
210 Alloc(uint32_t pages)
212 #ifdef XP_WIN
213 return VirtualAlloc(NULL,
214 pages * kNativePageSize,
215 MEM_COMMIT | MEM_RESERVE,
216 PAGE_EXECUTE_READWRITE);
217 #elif defined AVMPLUS_UNIX
219 * Don't use normal heap with mprotect+PROT_EXEC for executable code.
220 * SELinux and friends don't allow this.
222 return mmap(NULL,
223 pages * kNativePageSize,
224 PROT_READ | PROT_WRITE | PROT_EXEC,
225 MAP_PRIVATE | MAP_ANON,
228 #else
229 return valloc(pages * kNativePageSize);
230 #endif
233 inline void
234 Free(void* p, uint32_t pages)
236 #ifdef XP_WIN
237 VirtualFree(p, 0, MEM_RELEASE);
238 #elif defined AVMPLUS_UNIX
239 #if defined SOLARIS
240 munmap((char*)p, pages * kNativePageSize);
241 #else
242 munmap(p, pages * kNativePageSize);
243 #endif
244 #else
245 free(p);
246 #endif
251 class GC
253 static GCHeap heap;
255 public:
257 * flags to be passed as second argument to alloc
259 enum AllocFlags
261 kZero=1,
262 kContainsPointers=2,
263 kFinalize=4,
264 kRCObject=8
267 static inline void*
268 Alloc(uint32_t bytes, int flags=kZero)
270 if (flags & kZero)
271 return calloc(1, bytes);
272 else
273 return malloc(bytes);
276 static inline void
277 Free(void* p)
279 free(p);
282 static inline GCHeap*
283 GetGCHeap()
285 return &heap;
289 #define DWB(x) x
290 #define DRCWB(x) x
291 #define WB(gc, container, addr, value) do { *(addr) = (value); } while(0)
292 #define WBRC(gc, container, addr, value) do { *(addr) = (value); } while(0)
294 #define MMGC_MEM_TYPE(x)
296 typedef int FunctionID;
298 class String
302 typedef class String AvmString;
304 class StringNullTerminatedUTF8
306 const char* cstr;
308 public:
309 StringNullTerminatedUTF8(GC* gc, String* s)
311 cstr = strdup((const char*)s);
314 ~StringNullTerminatedUTF8()
316 free((void*)cstr);
319 inline
320 const char* c_str()
322 return cstr;
326 typedef String* Stringp;
328 class Config
330 public:
331 Config() {
332 memset(this, 0, sizeof(Config));
333 #ifdef DEBUG
334 verbose = getenv("TRACEMONKEY") && strstr(getenv("TRACEMONKEY"), "verbose");
335 verbose_addrs = 1;
336 verbose_exits = 1;
337 verbose_live = 1;
338 show_stats = 1;
339 #endif
340 #if defined (AVMPLUS_AMD64)
341 sse2 = true;
342 use_cmov = true;
343 #endif
344 tree_opt = 0;
347 uint32_t tree_opt:1;
348 uint32_t quiet_opt:1;
349 uint32_t verbose:1;
350 uint32_t verbose_addrs:1;
351 uint32_t verbose_live:1;
352 uint32_t verbose_exits:1;
353 uint32_t show_stats:1;
355 #if defined (AVMPLUS_IA32) || defined(AVMPLUS_AMD64)
356 bool sse2;
357 bool use_cmov;
358 #endif
361 static const int kstrconst_emptyString = 0;
363 class AvmInterpreter
365 class Labels {
366 public:
367 const char* format(const void* ip)
369 static char buf[33];
370 sprintf(buf, "%p", ip);
371 return buf;
375 Labels _labels;
376 public:
377 Labels* labels;
379 AvmInterpreter()
381 labels = &_labels;
386 class AvmConsole
388 public:
389 AvmConsole& operator<<(const char* s)
391 fprintf(stdout, "%s", s);
392 return *this;
396 class AvmCore
398 public:
399 AvmInterpreter interp;
400 AvmConsole console;
402 static Config config;
403 static GC* gc;
404 static String* k_str[];
406 #if defined (AVMPLUS_IA32) || defined(AVMPLUS_AMD64)
407 static inline bool
408 use_sse2()
410 return config.sse2;
413 static inline bool
414 use_cmov()
416 return config.use_cmov;
418 #endif
420 static inline bool
421 quiet_opt()
423 return config.quiet_opt;
426 static inline bool
427 verbose()
429 return config.verbose;
432 static inline GC*
433 GetGC()
435 return gc;
438 static inline String* newString(const char* cstr) {
439 return (String*)strdup(cstr);
442 static inline void freeString(String* str) {
443 return free((char*)str);
447 class OSDep
449 public:
450 static inline void
451 getDate()
457 * The List<T> template implements a simple List, which can
458 * be templated to support different types.
460 * Elements can be added to the end, modified in the middle,
461 * but no holes are allowed. That is for set(n, v) to work
462 * size() > n
464 * Note that [] operators are provided and you can violate the
465 * set properties using these operators, if you want a real
466 * list dont use the [] operators, if you want a general purpose
467 * array use the [] operators.
470 enum ListElementType {
471 LIST_NonGCObjects = 0,
472 LIST_GCObjects = 1,
473 LIST_RCObjects = 2
476 template <typename T, ListElementType kElementType>
477 class List
479 public:
480 enum { kInitialCapacity = 128 };
482 List(GC *_gc, uint32_t _capacity=kInitialCapacity) : data(NULL), len(0), capacity(0)
484 ensureCapacity(_capacity);
487 ~List()
489 //clear();
490 destroy();
491 // zero out in case we are part of an RCObject
492 len = 0;
495 inline void destroy()
497 if (data)
498 free(data);
501 const T *getData() const { return data; }
503 // 'this' steals the guts of 'that' and 'that' gets reset.
504 void FASTCALL become(List& that)
506 this->destroy();
508 this->data = that.data;
509 this->len = that.len;
510 this->capacity = that.capacity;
512 that.data = 0;
513 that.len = 0;
514 that.capacity = 0;
516 uint32_t FASTCALL add(T value)
518 if (len >= capacity) {
519 grow();
521 wb(len++, value);
522 return len-1;
525 inline bool isEmpty() const
527 return len == 0;
530 inline uint32_t size() const
532 return len;
535 inline T get(uint32_t index) const
537 AvmAssert(index < len);
538 return *(T*)(data + index);
541 void FASTCALL set(uint32_t index, T value)
543 AvmAssert(index < capacity);
544 if (index >= len)
546 len = index+1;
548 AvmAssert(len <= capacity);
549 wb(index, value);
552 void add(const List<T, kElementType>& l)
554 ensureCapacity(len+l.size());
555 // FIXME: make RCObject version
556 AvmAssert(kElementType != LIST_RCObjects);
557 arraycopy(l.getData(), 0, data, len, l.size());
558 len += l.size();
561 inline void clear()
563 zero_range(0, len);
564 len = 0;
567 int FASTCALL indexOf(T value) const
569 for(uint32_t i=0; i<len; i++)
570 if (get(i) == value)
571 return i;
572 return -1;
575 int FASTCALL lastIndexOf(T value) const
577 for(int32_t i=len-1; i>=0; i--)
578 if (get(i) == value)
579 return i;
580 return -1;
583 inline T last() const
585 return get(len-1);
588 T FASTCALL removeLast()
590 if(isEmpty())
591 return undef_list_val();
592 T t = get(len-1);
593 set(len-1, undef_list_val());
594 len--;
595 return t;
598 inline T operator[](uint32_t index) const
600 AvmAssert(index < capacity);
601 return get(index);
604 void FASTCALL ensureCapacity(uint32_t cap)
606 if (cap > capacity) {
607 if (data == NULL) {
608 data = (T*)calloc(1, factor(cap));
609 } else {
610 data = (T*)realloc(data, factor(cap));
611 zero_range(capacity, cap - capacity);
613 capacity = cap;
617 void FASTCALL insert(uint32_t index, T value, uint32_t count = 1)
619 AvmAssert(index <= len);
620 AvmAssert(count > 0);
621 ensureCapacity(len+count);
622 memmove(data + index + count, data + index, factor(len - index));
623 wbzm(index, index+count, value);
624 len += count;
627 T FASTCALL removeAt(uint32_t index)
629 T old = get(index);
630 // dec the refcount on the one we're removing
631 wb(index, undef_list_val());
632 memmove(data + index, data + index + 1, factor(len - index - 1));
633 len--;
634 return old;
637 private:
638 void FASTCALL grow()
640 // growth is fast at first, then slows at larger list sizes.
641 uint32_t newMax = 0;
642 const uint32_t curMax = capacity;
643 if (curMax == 0)
644 newMax = kInitialCapacity;
645 else if(curMax > 15)
646 newMax = curMax * 3/2;
647 else
648 newMax = curMax * 2;
650 ensureCapacity(newMax);
653 void arraycopy(const T* src, int srcStart, T* dst, int dstStart, int nbr)
655 // we have 2 cases, either closing a gap or opening it.
656 if ((src == dst) && (srcStart > dstStart) )
658 for(int i=0; i<nbr; i++)
659 dst[i+dstStart] = src[i+srcStart];
661 else
663 for(int i=nbr-1; i>=0; i--)
664 dst[i+dstStart] = src[i+srcStart];
668 inline void do_wb_nongc(T* slot, T value)
670 *slot = value;
673 inline void do_wb_gc(GCObject** slot, const GCObject** value)
675 *slot = (GCObject*)*value;
678 void FASTCALL wb(uint32_t index, T value)
680 AvmAssert(index < capacity);
681 AvmAssert(data != NULL);
682 T* slot = &data[index];
683 do_wb_nongc(slot, value);
686 // multiple wb call with the same value, and assumption that existing value is all zero bits,
687 // like
688 // for (uint32_t u = index; u < index_end; ++u)
689 // wb(u, value);
690 void FASTCALL wbzm(uint32_t index, uint32_t index_end, T value)
692 AvmAssert(index < capacity);
693 AvmAssert(index_end <= capacity);
694 AvmAssert(index < index_end);
695 AvmAssert(data != NULL);
696 T* slot = data + index;
697 for ( ; index < index_end; ++index, ++slot)
698 do_wb_nongc(slot, value);
701 inline uint32_t factor(uint32_t index) const
703 return index * sizeof(T);
706 void FASTCALL zero_range(uint32_t _first, uint32_t _count)
708 memset(data + _first, 0, factor(_count));
711 // stuff that needs specialization based on the type
712 static inline T undef_list_val();
714 private:
715 List(const List& toCopy); // unimplemented
716 void operator=(const List& that); // unimplemented
718 // ------------------------ DATA SECTION BEGIN
719 private:
720 T* data;
721 uint32_t len;
722 uint32_t capacity;
723 // ------------------------ DATA SECTION END
727 // stuff that needs specialization based on the type
728 template<typename T, ListElementType kElementType>
729 /* static */ inline T List<T, kElementType>::undef_list_val() { return T(0); }
732 * The SortedMap<K,T> template implements an object that
733 * maps keys to values. The keys are sorted
734 * from smallest to largest in the map. Time of operations
735 * is as follows:
736 * put() is O(1) if the key is higher than any existing
737 * key; O(logN) if the key already exists,
738 * and O(N) otherwise.
739 * get() is an O(logN) binary search.
741 * no duplicates are allowed.
743 template <class K, class T, ListElementType valType>
744 class SortedMap : public GCObject
746 public:
747 enum { kInitialCapacity= 64 };
749 SortedMap(GC* gc, int _capacity=kInitialCapacity)
750 : keys(gc, _capacity), values(gc, _capacity)
754 bool isEmpty() const
756 return keys.size() == 0;
759 int size() const
761 return keys.size();
764 void clear()
766 keys.clear();
767 values.clear();
770 void destroy()
772 keys.destroy();
773 values.destroy();
776 T put(K k, T v)
778 if (keys.size() == 0 || k > keys.last())
780 keys.add(k);
781 values.add(v);
782 return (T)v;
784 else
786 int i = find(k);
787 if (i >= 0)
789 T old = values[i];
790 keys.set(i, k);
791 values.set(i, v);
792 return old;
794 else
796 i = -i - 1; // recover the insertion point
797 AvmAssert(keys.size() != (uint32_t)i);
798 keys.insert(i, k);
799 values.insert(i, v);
800 return v;
805 T get(K k) const
807 int i = find(k);
808 return i >= 0 ? values[i] : 0;
811 bool get(K k, T& v) const
813 int i = find(k);
814 if (i >= 0)
816 v = values[i];
817 return true;
819 return false;
822 bool containsKey(K k) const
824 int i = find(k);
825 return (i >= 0) ? true : false;
828 T remove(K k)
830 int i = find(k);
831 return removeAt(i);
834 T removeAt(int i)
836 T old = values.removeAt(i);
837 keys.removeAt(i);
838 return old;
841 T removeFirst() { return isEmpty() ? (T)0 : removeAt(0); }
842 T removeLast() { return isEmpty() ? (T)0 : removeAt(keys.size()-1); }
843 T first() const { return isEmpty() ? (T)0 : values[0]; }
844 T last() const { return isEmpty() ? (T)0 : values[keys.size()-1]; }
846 K firstKey() const { return isEmpty() ? 0 : keys[0]; }
847 K lastKey() const { return isEmpty() ? 0 : keys[keys.size()-1]; }
849 // iterator
850 T at(int i) const { return values[i]; }
851 K keyAt(int i) const { return keys[i]; }
853 int findNear(K k) const {
854 int i = find(k);
855 return i >= 0 ? i : -i-2;
857 protected:
858 List<K, LIST_NonGCObjects> keys;
859 List<T, valType> values;
861 int find(K k) const
863 int lo = 0;
864 int hi = keys.size()-1;
866 while (lo <= hi)
868 int i = (lo + hi)/2;
869 K m = keys[i];
870 if (k > m)
871 lo = i + 1;
872 else if (k < m)
873 hi = i - 1;
874 else
875 return i; // key found
877 return -(lo + 1); // key not found, low is the insertion point
881 #define GCSortedMap SortedMap
884 * Bit vectors are an efficent method of keeping True/False information
885 * on a set of items or conditions. Class BitSet provides functions
886 * to manipulate individual bits in the vector.
888 * Since most vectors are rather small an array of longs is used by
889 * default to house the value of the bits. If more bits are needed
890 * then an array is allocated dynamically outside of this object.
892 * This object is not optimized for a fixed sized bit vector
893 * it instead allows for dynamically growing the bit vector.
895 class BitSet
897 public:
898 enum { kUnit = 8*sizeof(long),
899 kDefaultCapacity = 4 };
901 BitSet()
903 capacity = kDefaultCapacity;
904 reset();
907 ~BitSet()
909 if (capacity > kDefaultCapacity)
910 free(bits.ptr);
913 void reset()
915 if (capacity > kDefaultCapacity)
916 for(int i=0; i<capacity; i++)
917 bits.ptr[i] = 0;
918 else
919 for(int i=0; i<capacity; i++)
920 bits.ar[i] = 0;
923 void set(GC *gc, int bitNbr)
925 int index = bitNbr / kUnit;
926 int bit = bitNbr % kUnit;
927 if (index >= capacity)
928 grow(gc, index+1);
930 if (capacity > kDefaultCapacity)
931 bits.ptr[index] |= (1<<bit);
932 else
933 bits.ar[index] |= (1<<bit);
936 void clear(int bitNbr)
938 int index = bitNbr / kUnit;
939 int bit = bitNbr % kUnit;
940 if (index < capacity)
942 if (capacity > kDefaultCapacity)
943 bits.ptr[index] &= ~(1<<bit);
944 else
945 bits.ar[index] &= ~(1<<bit);
949 bool get(int bitNbr) const
951 int index = bitNbr / kUnit;
952 int bit = bitNbr % kUnit;
953 bool value = false;
954 if (index < capacity)
956 if (capacity > kDefaultCapacity)
957 value = ( bits.ptr[index] & (1<<bit) ) ? true : false;
958 else
959 value = ( bits.ar[index] & (1<<bit) ) ? true : false;
961 return value;
964 private:
965 // Grow the array until at least newCapacity big
966 void grow(GC *gc, int newCapacity)
968 // create vector that is 2x bigger than requested
969 newCapacity *= 2;
970 //MEMTAG("BitVector::Grow - long[]");
971 long* newBits = (long*)calloc(1, newCapacity * sizeof(long));
972 //memset(newBits, 0, newCapacity * sizeof(long));
974 // copy the old one
975 if (capacity > kDefaultCapacity)
976 for(int i=0; i<capacity; i++)
977 newBits[i] = bits.ptr[i];
978 else
979 for(int i=0; i<capacity; i++)
980 newBits[i] = bits.ar[i];
982 // in with the new out with the old
983 if (capacity > kDefaultCapacity)
984 free(bits.ptr);
986 bits.ptr = newBits;
987 capacity = newCapacity;
990 // by default we use the array, but if the vector
991 // size grows beyond kDefaultCapacity we allocate
992 // space dynamically.
993 int capacity;
994 union
996 long ar[kDefaultCapacity];
997 long* ptr;
999 bits;
1003 #endif