1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=4 sw=4 et tw=99 ft=cpp:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
17 * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
20 * The Initial Developer of the Original Code is
21 * Brendan Eich <brendan@mozilla.org>
24 * Andreas Gal <gal@mozilla.com>
25 * Mike Shaver <shaver@mozilla.org>
26 * David Anderson <danderson@mozilla.com>
28 * Alternatively, the contents of this file may be used under the terms of
29 * either of the GNU General Public License Version 2 or later (the "GPL"),
30 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
31 * in which case the provisions of the GPL or the LGPL are applicable instead
32 * of those above. If you wish to allow use of your version of this file only
33 * under the terms of either the GPL or the LGPL, and not to allow others to
34 * use your version of this file under the terms of the MPL, indicate your
35 * decision by deleting the provisions above and replace them with the notice
36 * and other provisions required by the GPL or the LGPL. If you do not delete
37 * the provisions above, a recipient may use your version of this file under
38 * the terms of any one of the MPL, the GPL or the LGPL.
40 * ***** END LICENSE BLOCK ***** */
48 #include "jsbuiltins.h"
55 #include "jscompartment.h"
65 nanojit::Allocator
* alloc
;
68 void ensure(unsigned size
) {
73 _max
= JS_MAX(_max
* 2, size
);
75 T
* tmp
= new (*alloc
) T
[_max
];
76 memcpy(tmp
, _data
, _len
* sizeof(T
));
79 _data
= (T
*)js_realloc(_data
, _max
* sizeof(T
));
82 memset(&_data
[_len
], 0xcd, _max
- _len
);
86 Queue(nanojit::Allocator
* alloc
)
100 for (unsigned n
= 0; n
< _len
; ++n
) {
109 JS_ASSERT(_len
<= _max
);
113 void add(T
* chunk
, unsigned size
) {
115 JS_ASSERT(_len
<= _max
);
116 memcpy(&_data
[_len
], chunk
, size
* sizeof(T
));
120 void addUnique(T a
) {
125 void setLength(unsigned len
) {
134 T
& get(unsigned i
) {
135 JS_ASSERT(i
< length());
139 const T
& get(unsigned i
) const {
140 JS_ASSERT(i
< length());
144 T
& operator [](unsigned i
) {
148 const T
& operator [](unsigned i
) const {
152 unsigned length() const {
160 int offsetOf(T slot
) {
163 for (n
= 0; n
< _len
; ++n
)
172 * Tracker is used to keep track of values being manipulated by the interpreter
173 * during trace recording. It maps opaque, 4-byte aligned address to LIns pointers.
174 * pointers. To do this efficiently, we observe that the addresses of jsvals
175 * living in the interpreter tend to be aggregated close to each other -
176 * usually on the same page (where a tracker page doesn't have to be the same
177 * size as the OS page size, but it's typically similar). The Tracker
178 * consists of a linked-list of structures representing a memory page, which
179 * are created on-demand as memory locations are used.
181 * For every address, first we split it into two parts: upper bits which
182 * represent the "base", and lower bits which represent an offset against the
183 * base. For the offset, we then right-shift it by two because the bottom two
184 * bits of a 4-byte aligned address are always zero. The mapping then
187 * page = page in pagelist such that Base(address) == page->base,
188 * page->map[Offset(address)]
191 #define TRACKER_PAGE_SZB 4096
192 #define TRACKER_PAGE_ENTRIES (TRACKER_PAGE_SZB >> 2) // each slot is 4 bytes
193 #define TRACKER_PAGE_MASK jsuword(TRACKER_PAGE_SZB - 1)
196 struct TrackerPage
* next
;
198 nanojit::LIns
* map
[TRACKER_PAGE_ENTRIES
];
200 struct TrackerPage
* pagelist
;
202 jsuword
getTrackerPageBase(const void* v
) const;
203 jsuword
getTrackerPageOffset(const void* v
) const;
204 struct TrackerPage
* findTrackerPage(const void* v
) const;
205 struct TrackerPage
* addTrackerPage(const void* v
);
210 bool has(const void* v
) const;
211 nanojit::LIns
* get(const void* v
) const;
212 void set(const void* v
, nanojit::LIns
* ins
);
216 class VMFragment
: public nanojit::Fragment
{
218 VMFragment(const void* _ip
verbose_only(, uint32_t profFragID
))
219 : Fragment(_ip
verbose_only(, profFragID
))
223 * If this is anchored off a TreeFragment, this points to that tree fragment.
224 * Otherwise, it is |this|.
228 TreeFragment
* toTreeFragment();
231 #ifdef NJ_NO_VARIADIC_MACROS
233 #define debug_only_stmt(action) /* */
234 static void debug_only_printf(int mask
, const char *fmt
, ...) JS_BEGIN_MACRO JS_END_MACRO
235 #define debug_only_print0(mask, str) JS_BEGIN_MACRO JS_END_MACRO
237 #elif defined(JS_JIT_SPEW)
239 // Top level logging controller object.
240 extern nanojit::LogControl LogController
;
242 // Top level profiling hook, needed to harvest profile info from Fragments
243 // whose logical lifetime is about to finish
244 extern void FragProfiling_FragFinalizer(nanojit::Fragment
* f
, TraceMonitor
*);
246 #define debug_only_stmt(stmt) \
249 #define debug_only_printf(mask, fmt, ...) \
251 if ((LogController.lcbits & (mask)) > 0) { \
252 LogController.printf(fmt, __VA_ARGS__); \
257 #define debug_only_print0(mask, str) \
259 if ((LogController.lcbits & (mask)) > 0) { \
260 LogController.printf("%s", str); \
267 #define debug_only_stmt(action) /* */
268 #define debug_only_printf(mask, fmt, ...) JS_BEGIN_MACRO JS_END_MACRO
269 #define debug_only_print0(mask, str) JS_BEGIN_MACRO JS_END_MACRO
274 * The oracle keeps track of hit counts for program counter locations, as
275 * well as slots that should not be demoted to int because we know them to
276 * overflow or they result in type-unstable traces. We are using simple
277 * hash tables. Collisions lead to loss of optimization (demotable slots
278 * are not demoted, etc.) but have no correctness implications.
280 #define ORACLE_SIZE 4096
283 avmplus::BitSet _stackDontDemote
;
284 avmplus::BitSet _globalDontDemote
;
285 avmplus::BitSet _pcDontDemote
;
286 avmplus::BitSet _pcSlowZeroTest
;
290 JS_REQUIRES_STACK
void markGlobalSlotUndemotable(JSContext
* cx
, unsigned slot
);
291 JS_REQUIRES_STACK
bool isGlobalSlotUndemotable(JSContext
* cx
, unsigned slot
) const;
292 JS_REQUIRES_STACK
void markStackSlotUndemotable(JSContext
* cx
, unsigned slot
);
293 JS_REQUIRES_STACK
void markStackSlotUndemotable(JSContext
* cx
, unsigned slot
, const void* pc
);
294 JS_REQUIRES_STACK
bool isStackSlotUndemotable(JSContext
* cx
, unsigned slot
) const;
295 JS_REQUIRES_STACK
bool isStackSlotUndemotable(JSContext
* cx
, unsigned slot
, const void* pc
) const;
296 void markInstructionUndemotable(jsbytecode
* pc
);
297 bool isInstructionUndemotable(jsbytecode
* pc
) const;
298 void markInstructionSlowZeroTest(jsbytecode
* pc
);
299 bool isInstructionSlowZeroTest(jsbytecode
* pc
) const;
301 void clearDemotability();
307 typedef Queue
<uint16
> SlotList
;
309 class TypeMap
: public Queue
<JSValueType
> {
312 TypeMap(nanojit::Allocator
* alloc
, Oracle
*oracle
)
313 : Queue
<JSValueType
>(alloc
),
316 void set(unsigned stackSlots
, unsigned ngslots
,
317 const JSValueType
* stackTypeMap
, const JSValueType
* globalTypeMap
);
318 JS_REQUIRES_STACK
void captureTypes(JSContext
* cx
, JSObject
* globalObj
, SlotList
& slots
, unsigned callDepth
,
320 JS_REQUIRES_STACK
void captureMissingGlobalTypes(JSContext
* cx
, JSObject
* globalObj
, SlotList
& slots
,
321 unsigned stackSlots
, bool speculate
);
322 bool matches(TypeMap
& other
) const;
323 void fromRaw(JSValueType
* other
, unsigned numSlots
);
326 #define JS_TM_EXITCODES(_) \
328 * An exit at a possible branch-point in the trace at which to attach a \
329 * future secondary trace. Therefore the recorder must generate different \
330 * code to handle the other outcome of the branch condition from the \
331 * primary trace's outcome. \
335 * Exit at a tableswitch via a numbered case. \
339 * Exit at a tableswitch via the default case. \
345 * An exit from a trace because a condition relied upon at recording time \
346 * no longer holds, where the alternate path of execution is so rare or \
347 * difficult to address in native code that it is not traced at all, e.g. \
348 * negative array index accesses, which differ from positive indexes in \
349 * that they require a string-based property lookup rather than a simple \
354 * A specialization of MISMATCH_EXIT to handle allocation failures. \
365 #define MAKE_EXIT_CODE(x) x##_EXIT,
366 JS_TM_EXITCODES(MAKE_EXIT_CODE
)
367 #undef MAKE_EXIT_CODE
373 struct VMSideExit
: public nanojit::SideExit
380 uint32 numGlobalSlots
;
381 uint32 numStackSlots
;
382 uint32 numStackSlotsBelowCurrentFrame
;
387 inline JSValueType
* stackTypeMap() {
388 return (JSValueType
*)(this + 1);
391 inline JSValueType
& stackType(unsigned i
) {
392 JS_ASSERT(i
< numStackSlots
);
393 return stackTypeMap()[i
];
396 inline JSValueType
* globalTypeMap() {
397 return (JSValueType
*)(this + 1) + this->numStackSlots
;
400 inline JSValueType
* fullTypeMap() {
401 return stackTypeMap();
404 inline VMFragment
* fromFrag() {
405 return (VMFragment
*)from
;
408 inline TreeFragment
* root() {
409 return fromFrag()->root
;
413 class VMAllocator
: public nanojit::Allocator
417 VMAllocator(char* reserve
, size_t reserveSize
)
418 : mOutOfMemory(false), mSize(0), mReserve(reserve
),
419 mReserveCurr(uintptr_t(reserve
)), mReserveLimit(uintptr_t(reserve
+ reserveSize
))
438 nanojit::Allocator::Chunk
* saved_chunk
;
443 Mark(VMAllocator
& vma
) :
446 saved_chunk(vma
.current_chunk
),
447 saved_top(vma
.current_top
),
448 saved_limit(vma
.current_limit
),
449 saved_size(vma
.mSize
)
458 void commit() { committed
= true; }
461 void rewind(const Mark
& m
) {
462 while (current_chunk
!= m
.saved_chunk
) {
463 Chunk
*prev
= current_chunk
->prev
;
464 freeChunk(current_chunk
);
465 current_chunk
= prev
;
467 current_top
= m
.saved_top
;
468 current_limit
= m
.saved_limit
;
469 mSize
= m
.saved_size
;
470 memset(current_top
, 0, current_limit
- current_top
);
476 /* See nanojit::Allocator::allocChunk() for details on these. */
478 uintptr_t mReserveCurr
;
479 uintptr_t mReserveLimit
;
483 JSObject
* block
; // caller block chain head
484 jsbytecode
* pc
; // caller fp->regs->pc
485 jsbytecode
* imacpc
; // caller fp->imacpc
486 uint32 spdist
; // distance from fp->slots to fp->regs->sp at JSOP_CALL
489 * Bit 15 (0x8000) is a flag that is set if constructing (called through new).
490 * Bits 0-14 are the actual argument count. This may be less than fun->nargs.
491 * NB: This is argc for the callee, not the caller.
496 * Number of stack slots in the caller, not counting slots pushed when
497 * invoking the callee. That is, slots after JSOP_CALL completes but
498 * without the return value. This is also equal to the number of slots
499 * between fp->prev->argv[-2] (calleR fp->callee) and fp->argv[-2]
500 * (calleE fp->callee).
504 /* argc of the caller */
507 // Safer accessors for argc.
508 enum { CONSTRUCTING_FLAG
= 0x10000 };
509 void set_argc(uint16 argc
, bool constructing
) {
510 this->argc
= uint32(argc
) | (constructing
? CONSTRUCTING_FLAG
: 0);
512 uint16
get_argc() const { return uint16(argc
& ~CONSTRUCTING_FLAG
); }
513 bool is_constructing() const { return (argc
& CONSTRUCTING_FLAG
) != 0; }
515 // The typemap just before the callee is called.
516 JSValueType
* get_typemap() { return (JSValueType
*) (this+1); }
517 const JSValueType
* get_typemap() const { return (JSValueType
*) (this+1); }
522 VMFragment
* fragment
;
527 struct LinkableFragment
: public VMFragment
529 LinkableFragment(const void* _ip
, nanojit::Allocator
* alloc
, Oracle
*oracle
530 verbose_only(, uint32_t profFragID
))
531 : VMFragment(_ip
verbose_only(, profFragID
)), typeMap(alloc
, oracle
), nStackTypes(0)
536 unsigned nStackTypes
;
537 unsigned spOffsetAtEntry
;
538 SlotList
* globalSlots
;
542 * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments
543 * pushed for the innermost JS frame. This is required as part of the fragment
544 * key because the fragment will write those arguments back to the interpreter
545 * stack when it exits, using its typemap, which implicitly incorporates a
546 * given value of argc. Without this feature, a fragment could be called as an
547 * inner tree with two different values of argc, and entry type checking or
548 * exit frame synthesis could crash.
550 struct TreeFragment
: public LinkableFragment
552 TreeFragment(const void* _ip
, nanojit::Allocator
* alloc
, Oracle
*oracle
, JSObject
* _globalObj
,
553 uint32 _globalShape
, uint32 _argc
verbose_only(, uint32_t profFragID
)):
554 LinkableFragment(_ip
, alloc
, oracle
verbose_only(, profFragID
)),
558 globalObj(_globalObj
),
559 globalShape(_globalShape
),
561 dependentTrees(alloc
),
574 /* Dependent trees must be trashed if this tree dies, and updated on missing global types */
575 Queue
<TreeFragment
*> dependentTrees
;
576 /* Linked trees must be updated on missing global types, but are not dependent */
577 Queue
<TreeFragment
*> linkedTrees
;
579 const char* treeFileName
;
580 uintN treeLineNumber
;
584 UnstableExit
* unstableExits
;
585 Queue
<VMSideExit
*> sideExits
;
586 ptrdiff_t nativeStackBase
;
587 unsigned maxCallDepth
;
588 /* All embedded GC things are registered here so the GC can scan them. */
589 Queue
<Value
> gcthings
;
590 Queue
<const js::Shape
*> shapes
;
591 unsigned maxNativeStackSlots
;
592 /* Gives the number of times we have entered this trace. */
594 /* Gives the total number of iterations executed by the trace (up to a limit). */
597 inline unsigned nGlobalTypes() {
598 return typeMap
.length() - nStackTypes
;
600 inline JSValueType
* globalTypeMap() {
601 return typeMap
.data() + nStackTypes
;
603 inline JSValueType
* stackTypeMap() {
604 return typeMap
.data();
607 JS_REQUIRES_STACK
void initialize(JSContext
* cx
, SlotList
*globalSlots
, bool speculate
);
608 UnstableExit
* removeUnstableExit(VMSideExit
* exit
);
612 VMFragment::toTreeFragment()
614 JS_ASSERT(root
== this);
615 return static_cast<TreeFragment
*>(this);
620 MONITOR_NOT_RECORDING
,
624 const uintN PROFILE_MAX_INNER_LOOPS
= 8;
625 const uintN PROFILE_MAX_STACK
= 6;
628 * A loop profile keeps track of the instruction mix of a hot loop. We use this
629 * information to predict whether tracing would be beneficial for the loop.
634 /* Instructions are divided into a few categories. */
636 OP_FLOAT
, // Floating point arithmetic
637 OP_INT
, // Integer arithmetic
638 OP_BIT
, // Bit operations
640 OP_EVAL
, // Calls to eval()
641 OP_CALL
, // JSOP_CALL instructions
642 OP_FWDJUMP
, // Jumps with positive delta
643 OP_NEW
, // JSOP_NEW instructions
644 OP_RECURSIVE
, // Recursive calls
645 OP_ARRAY_READ
, // Reads from dense arrays
646 OP_TYPED_ARRAY
, // Accesses to typed arrays
650 /* The TraceMonitor for which we're profiling. */
651 TraceMonitor
*traceMonitor
;
653 /* The script in which the loop header lives. */
654 JSScript
*entryScript
;
656 /* The stack frame where we started profiling. Only valid while profiling! */
657 JSStackFrame
*entryfp
;
659 /* The bytecode locations of the loop header and the back edge. */
660 jsbytecode
*top
, *bottom
;
662 /* Number of times we have seen this loop executed; used to decide when to profile. */
665 /* Whether we have run a complete profile of the loop. */
668 /* Sometimes we can't decide in one profile run whether to trace, so we set undecided. */
671 /* If we have profiled the loop, this saves the decision of whether to trace it. */
674 /* Memoized value of isCompilationUnprofitable. */
678 * Sometimes loops are not good tracing opportunities, but they are nested inside
679 * loops that we want to trace. In that case, we set their traceOK flag to true,
680 * but we set execOK to false. That way, the loop is traced so that it can be
681 * integrated into the outer trace. But we never execute the trace on its only.
685 /* Instruction mix for the loop and total number of instructions. */
686 uintN allOps
[OP_LIMIT
];
689 /* Instruction mix and total for the loop, excluding nested inner loops. */
690 uintN selfOps
[OP_LIMIT
];
694 * A prediction of the number of instructions we would have to compile
695 * for the loop. This takes into account the fact that a branch may cause us to
696 * compile every instruction after it twice. Polymorphic calls are
697 * treated as n-way branches.
699 double numSelfOpsMult
;
702 * This keeps track of the number of times that every succeeding instruction
703 * in the trace will have to be compiled. Every time we hit a branch, we
704 * double this number. Polymorphic calls multiply it by n (for n-way
707 double branchMultiplier
;
709 /* Set to true if the loop is short (i.e., has fewer than 8 iterations). */
712 /* Set to true if the loop may be short (has few iterations at profiling time). */
716 * When we hit a nested loop while profiling, we record where it occurs
717 * and how many iterations we execute it.
720 JSStackFrame
*entryfp
;
721 jsbytecode
*top
, *bottom
;
725 InnerLoop(JSStackFrame
*entryfp
, jsbytecode
*top
, jsbytecode
*bottom
)
726 : entryfp(entryfp
), top(top
), bottom(bottom
), iters(0) {}
729 /* These two variables track all the inner loops seen while profiling (up to a limit). */
730 InnerLoop innerLoops
[PROFILE_MAX_INNER_LOOPS
];
734 * These two variables track the loops that we are currently nested
735 * inside while profiling. Loops get popped off here when they exit.
737 InnerLoop loopStack
[PROFILE_MAX_INNER_LOOPS
];
738 uintN loopStackDepth
;
741 * These fields keep track of values on the JS stack. If the stack grows larger
742 * than PROFILE_MAX_STACK, we continue to track sp, but we return conservative results
750 StackValue() : isConst(false), hasValue(false) {}
751 StackValue(bool isConst
) : isConst(isConst
), hasValue(false) {}
752 StackValue(bool isConst
, int value
) : isConst(isConst
), hasValue(true), value(value
) {}
754 StackValue stack
[PROFILE_MAX_STACK
];
757 inline void stackClear() { sp
= 0; }
759 inline void stackPush(const StackValue
&v
) {
760 if (sp
< PROFILE_MAX_STACK
)
766 inline void stackPop() { if (sp
> 0) sp
--; }
768 inline StackValue
stackAt(int pos
) {
770 if (pos
>= 0 && uintN(pos
) < PROFILE_MAX_STACK
)
773 return StackValue(false);
776 LoopProfile(TraceMonitor
*tm
, JSStackFrame
*entryfp
, jsbytecode
*top
, jsbytecode
*bottom
);
785 /* These two functions track the instruction mix. */
786 inline void increment(OpKind kind
)
789 if (loopStackDepth
== 0)
793 inline uintN
count(OpKind kind
) { return allOps
[kind
]; }
795 /* Called for every back edge being profiled. */
796 MonitorResult
profileLoopEdge(JSContext
* cx
, uintN
& inlineCallCount
);
798 /* Called for every instruction being profiled. */
799 ProfileAction
profileOperation(JSContext
*cx
, JSOp op
);
801 /* Once a loop's profile is done, these decide whether it should be traced. */
802 bool isCompilationExpensive(JSContext
*cx
, uintN depth
);
803 bool isCompilationUnprofitable(JSContext
*cx
, uintN goodOps
);
804 void decide(JSContext
*cx
);
806 void stopProfiling(JSContext
*cx
);
810 * BUILTIN_NO_FIXUP_NEEDED indicates that after the initial LeaveTree of a deep
811 * bail, the builtin call needs no further fixup when the trace exits and calls
812 * LeaveTree the second time.
814 typedef enum BuiltinStatus
{
819 static JS_INLINE
void
820 SetBuiltinError(TraceMonitor
*tm
)
822 tm
->tracerState
->builtinStatus
|= BUILTIN_ERROR
;
825 static JS_INLINE
bool
826 WasBuiltinSuccessful(TraceMonitor
*tm
)
828 return tm
->tracerState
->builtinStatus
== 0;
831 #ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
832 /* #define DEBUG_RECORDING_STATUS_NOT_BOOL to detect misuses of RecordingStatus */
833 struct RecordingStatus
{
835 bool operator==(RecordingStatus
&s
) { return this->code
== s
.code
; };
836 bool operator!=(RecordingStatus
&s
) { return this->code
!= s
.code
; };
838 enum RecordingStatusCodes
{
839 RECORD_ERROR_code
= 0,
840 RECORD_STOP_code
= 1,
842 RECORD_CONTINUE_code
= 3,
843 RECORD_IMACRO_code
= 4
845 RecordingStatus RECORD_CONTINUE
= { RECORD_CONTINUE_code
};
846 RecordingStatus RECORD_STOP
= { RECORD_STOP_code
};
847 RecordingStatus RECORD_IMACRO
= { RECORD_IMACRO_code
};
848 RecordingStatus RECORD_ERROR
= { RECORD_ERROR_code
};
850 struct AbortableRecordingStatus
{
852 bool operator==(AbortableRecordingStatus
&s
) { return this->code
== s
.code
; };
853 bool operator!=(AbortableRecordingStatus
&s
) { return this->code
!= s
.code
; };
855 enum AbortableRecordingStatusCodes
{
856 ARECORD_ERROR_code
= 0,
857 ARECORD_STOP_code
= 1,
858 ARECORD_ABORTED_code
= 2,
859 ARECORD_CONTINUE_code
= 3,
860 ARECORD_IMACRO_code
= 4,
861 ARECORD_IMACRO_ABORTED_code
= 5,
862 ARECORD_COMPLETED_code
= 6
864 AbortableRecordingStatus ARECORD_ERROR
= { ARECORD_ERROR_code
};
865 AbortableRecordingStatus ARECORD_STOP
= { ARECORD_STOP_code
};
866 AbortableRecordingStatus ARECORD_CONTINUE
= { ARECORD_CONTINUE_code
};
867 AbortableRecordingStatus ARECORD_IMACRO
= { ARECORD_IMACRO_code
};
868 AbortableRecordingStatus ARECORD_IMACRO_ABORTED
= { ARECORD_IMACRO_ABORTED_code
};
869 AbortableRecordingStatus ARECORD_ABORTED
= { ARECORD_ABORTED_code
};
870 AbortableRecordingStatus ARECORD_COMPLETED
= { ARECORD_COMPLETED_code
};
872 static inline AbortableRecordingStatus
873 InjectStatus(RecordingStatus rs
)
875 AbortableRecordingStatus ars
= { rs
.code
};
878 static inline AbortableRecordingStatus
879 InjectStatus(AbortableRecordingStatus ars
)
885 StatusAbortsRecorderIfActive(AbortableRecordingStatus ars
)
887 return ars
== ARECORD_ERROR
|| ars
== ARECORD_STOP
;
892 * Normally, during recording, when the recorder cannot continue, it returns
893 * ARECORD_STOP to indicate that recording should be aborted by the top-level
894 * recording function. However, if the recorder reenters the interpreter (e.g.,
895 * when executing an inner loop), there will be an immediate abort. This
896 * condition must be carefully detected and propagated out of all nested
897 * recorder calls lest the now-invalid TraceRecorder object be accessed
898 * accidentally. This condition is indicated by the ARECORD_ABORTED value.
900 * The AbortableRecordingStatus enumeration represents the general set of
901 * possible results of calling a recorder function. Functions that cannot
902 * possibly return ARECORD_ABORTED may statically guarantee this to the caller
903 * using the RecordingStatus enumeration. Ideally, C++ would allow subtyping
904 * of enumerations, but it doesn't. To simulate subtype conversion manually,
905 * code should call InjectStatus to inject a value of the restricted set into a
906 * value of the general set.
909 enum RecordingStatus
{
910 RECORD_STOP
= 0, // Recording should be aborted at the top-level
911 // call to the recorder.
912 RECORD_ERROR
= 1, // Recording should be aborted at the top-level
913 // call to the recorder and the interpreter should
915 RECORD_CONTINUE
= 2, // Continue recording.
916 RECORD_IMACRO
= 3 // Entered imacro; continue recording.
917 // Only JSOP_IS_IMACOP opcodes may return this.
920 enum AbortableRecordingStatus
{
921 ARECORD_STOP
= 0, // see RECORD_STOP
922 ARECORD_ERROR
= 1, // Recording may or may not have been aborted.
923 // Recording should be aborted at the top-level
924 // if it has not already been and the interpreter
926 ARECORD_CONTINUE
= 2, // see RECORD_CONTINUE
927 ARECORD_IMACRO
= 3, // see RECORD_IMACRO
928 ARECORD_IMACRO_ABORTED
= 4, // see comment in TR::monitorRecording.
929 ARECORD_ABORTED
= 5, // Recording has already been aborted; the
930 // interpreter should continue executing
931 ARECORD_COMPLETED
= 6 // Recording completed successfully, the
932 // trace recorder has been deleted
935 static JS_ALWAYS_INLINE AbortableRecordingStatus
936 InjectStatus(RecordingStatus rs
)
938 return static_cast<AbortableRecordingStatus
>(rs
);
941 static JS_ALWAYS_INLINE AbortableRecordingStatus
942 InjectStatus(AbortableRecordingStatus ars
)
948 * Return whether the recording status requires the current recording session
949 * to be deleted. ERROR means the recording session should be deleted if it
950 * hasn't already. ABORTED and COMPLETED indicate the recording session is
951 * already deleted, so they return 'false'.
953 static JS_ALWAYS_INLINE
bool
954 StatusAbortsRecorderIfActive(AbortableRecordingStatus ars
)
956 return ars
<= ARECORD_ERROR
;
963 /* Results of trying to compare two typemaps together */
966 TypeConsensus_Okay
, /* Two typemaps are compatible */
967 TypeConsensus_Undemotes
, /* Not compatible now, but would be with pending undemotes. */
968 TypeConsensus_Bad
/* Typemaps are not compatible */
971 enum TracePointAction
{
978 typedef HashMap
<nanojit::LIns
*, JSObject
*> GuardedShapeTable
;
981 # define AbortRecording(cx, reason) AbortRecordingImpl(cx, reason)
983 # define AbortRecording(cx, reason) AbortRecordingImpl(cx)
987 AbortProfiling(JSContext
*cx
);
991 /*************************************************************** Recording session constants */
993 /* The context in which recording started. */
996 /* Cached value of JS_TRACE_MONITOR(cx). */
997 TraceMonitor
* const traceMonitor
;
999 /* Cached oracle keeps track of hit counts for program counter locations */
1002 /* The Fragment being recorded by this recording session. */
1003 VMFragment
* const fragment
;
1005 /* The root fragment representing the tree. */
1006 TreeFragment
* const tree
;
1008 /* The global object from the start of recording until now. */
1009 JSObject
* const globalObj
;
1011 /* If non-null, the script of outer loop aborted to start recording this loop. */
1012 JSScript
* const outerScript
;
1014 /* If non-null, the pc of the outer loop aborted to start recording this loop. */
1015 jsbytecode
* const outerPC
;
1017 /* If |outerPC|, the argc to use when looking up |outerPC| in the fragments table. */
1018 uint32
const outerArgc
;
1020 /* If non-null, the side exit from which we are growing. */
1021 VMSideExit
* const anchor
;
1023 /* Instructions yielding the corresponding trace-const members of TracerState. */
1024 nanojit::LIns
* const cx_ins
;
1025 nanojit::LIns
* const eos_ins
;
1026 nanojit::LIns
* const eor_ins
;
1027 nanojit::LIns
* const loopLabel
;
1029 /* Lazy slot import state. */
1030 unsigned importStackSlots
;
1031 unsigned importGlobalSlots
;
1032 TypeMap importTypeMap
;
1035 * The LirBuffer used to supply memory to our LirWriter pipeline. Also contains the most recent
1036 * instruction for {sp, rp, state}. Also contains names for debug JIT spew. Should be split.
1038 nanojit::LirBuffer
* const lirbuf
;
1041 * Remembers traceAlloc state before recording started; automatically rewinds when mark is
1042 * destroyed on a failed compilation.
1044 VMAllocator::Mark mark
;
1046 /* Remembers the number of sideExits in treeInfo before recording started. */
1047 const unsigned numSideExitsBefore
;
1049 /*********************************************************** Recording session mutable state */
1051 /* Maps interpreter stack values to the instruction generating that value. */
1054 /* Maps interpreter stack values to the instruction writing back to the native stack. */
1055 Tracker nativeFrameTracker
;
1057 /* The start of the global object's slots we assume for the trackers. */
1058 Value
* global_slots
;
1060 /* The number of interpreted calls entered (and not yet left) since recording began. */
1063 /* The current atom table, mirroring the interpreter loop's variable of the same name. */
1067 /* An instruction yielding the current script's strict mode code flag. */
1068 nanojit::LIns
* strictModeCode_ins
;
1070 /* FIXME: Dead, but soon to be used for something or other. */
1071 Queue
<jsbytecode
*> cfgMerges
;
1073 /* Indicates whether the current tree should be trashed when the recording session ends. */
1076 /* A list of trees to trash at the end of the recording session. */
1077 Queue
<TreeFragment
*> whichTreesToTrash
;
1079 /* The set of objects whose shapes already have been guarded. */
1080 GuardedShapeTable guardedShapeTable
;
1082 /* Current initializer depth, and whether any of the initializers are unoptimized NEWINIT. */
1088 * If we are expecting a record_AddProperty callback for this instruction,
1089 * the shape of the object before adding the data property. Else NULL.
1091 const js::Shape
* addPropShapeBefore
;
1094 /***************************************** Temporal state hoisted into the recording session */
1096 /* Carry the return value from a STOP/RETURN to the subsequent record_LeaveFrame. */
1097 nanojit::LIns
* rval_ins
;
1099 /* Carry the return value from a native call to the record_NativeCallComplete. */
1100 nanojit::LIns
* native_rval_ins
;
1102 /* Carry the return value of js_CreateThis to record_NativeCallComplete. */
1103 nanojit::LIns
* newobj_ins
;
1105 /* Carry the JSSpecializedNative used to generate a call to record_NativeCallComplete. */
1106 JSSpecializedNative
* pendingSpecializedNative
;
1108 /* Carry whether this is a jsval on the native stack from finishGetProp to monitorRecording. */
1109 Value
* pendingUnboxSlot
;
1111 /* Carry a guard condition to the beginning of the next monitorRecording. */
1112 nanojit::LIns
* pendingGuardCondition
;
1114 /* See AbortRecordingIfUnexpectedGlobalWrite. */
1115 js::Vector
<unsigned> pendingGlobalSlotsToSet
;
1117 /* Carry whether we have an always-exit from emitIf to checkTraceEnd. */
1120 /* Temporary JSSpecializedNative used to describe non-specialized fast natives. */
1121 JSSpecializedNative generatedSpecializedNative
;
1123 /* Temporary JSValueType array used to construct temporary typemaps. */
1124 js::Vector
<JSValueType
, 256> tempTypeMap
;
1126 /* Used to generate LIR. Has a short name because it's used a lot. */
1129 /************************************************************* 10 bajillion member functions */
1132 * These would be in Writer if they didn't modify TraceRecorder state.
1133 * They are invoked the via macros below that make them look like they are
1134 * part of Writer (hence the "w_" prefix, which looks like "w.").
1136 nanojit::LIns
* w_immpObjGC(JSObject
* obj
);
1137 nanojit::LIns
* w_immpFunGC(JSFunction
* fun
);
1138 nanojit::LIns
* w_immpStrGC(JSString
* str
);
1139 nanojit::LIns
* w_immpShapeGC(const js::Shape
* shape
);
1140 nanojit::LIns
* w_immpIdGC(jsid id
);
1142 #define immpObjGC(obj) name(w_immpObjGC(obj), #obj)
1143 #define immpFunGC(fun) name(w_immpFunGC(fun), #fun)
1144 #define immpStrGC(str) name(w_immpStrGC(str), #str)
1145 #define immpAtomGC(atom) name(w_immpStrGC(ATOM_TO_STRING(atom)), "ATOM_TO_STRING(" #atom ")")
1146 #define immpShapeGC(shape) name(w_immpShapeGC(shape), #shape)
1147 #define immpIdGC(id) name(w_immpIdGC(id), #id)
1150 * Examines current interpreter state to record information suitable for returning to the
1151 * interpreter through a side exit of the given type.
1153 JS_REQUIRES_STACK VMSideExit
* snapshot(ExitType exitType
);
1156 * Creates a separate but identical copy of the given side exit, allowing the guards associated
1157 * with each to be entirely separate even after subsequent patching.
1159 JS_REQUIRES_STACK VMSideExit
* copy(VMSideExit
* exit
);
1162 * Creates an instruction whose payload is a GuardRecord for the given exit. The instruction
1163 * is suitable for use as the final argument of a single call to LirBuffer::insGuard; do not
1164 * reuse the returned value.
1166 JS_REQUIRES_STACK
nanojit::GuardRecord
* createGuardRecord(VMSideExit
* exit
);
1168 JS_REQUIRES_STACK JS_INLINE
void markSlotUndemotable(LinkableFragment
* f
, unsigned slot
);
1170 JS_REQUIRES_STACK JS_INLINE
void markSlotUndemotable(LinkableFragment
* f
, unsigned slot
, const void* pc
);
1172 JS_REQUIRES_STACK
unsigned findUndemotesInTypemaps(const TypeMap
& typeMap
, LinkableFragment
* f
,
1173 Queue
<unsigned>& undemotes
);
1175 JS_REQUIRES_STACK
void assertDownFrameIsConsistent(VMSideExit
* anchor
, FrameInfo
* fi
);
1177 JS_REQUIRES_STACK
void captureStackTypes(unsigned callDepth
, JSValueType
* typeMap
);
1179 bool isVoidPtrGlobal(const void* p
) const;
1180 bool isGlobal(const Value
* p
) const;
1181 ptrdiff_t nativeGlobalSlot(const Value
*p
) const;
1182 ptrdiff_t nativeGlobalOffset(const Value
* p
) const;
1183 JS_REQUIRES_STACK
ptrdiff_t nativeStackOffsetImpl(const void* p
) const;
1184 JS_REQUIRES_STACK
ptrdiff_t nativeStackOffset(const Value
* p
) const;
1185 JS_REQUIRES_STACK
ptrdiff_t nativeStackSlotImpl(const void* p
) const;
1186 JS_REQUIRES_STACK
ptrdiff_t nativeStackSlot(const Value
* p
) const;
1187 JS_REQUIRES_STACK
ptrdiff_t nativespOffsetImpl(const void* p
) const;
1188 JS_REQUIRES_STACK
ptrdiff_t nativespOffset(const Value
* p
) const;
1189 JS_REQUIRES_STACK
void importImpl(tjit::Address addr
, const void* p
, JSValueType t
,
1190 const char *prefix
, uintN index
, JSStackFrame
*fp
);
1191 JS_REQUIRES_STACK
void import(tjit::Address addr
, const Value
* p
, JSValueType t
,
1192 const char *prefix
, uintN index
, JSStackFrame
*fp
);
1193 JS_REQUIRES_STACK
void import(TreeFragment
* tree
, nanojit::LIns
* sp
, unsigned stackSlots
,
1194 unsigned callDepth
, unsigned ngslots
, JSValueType
* typeMap
);
1195 void trackNativeStackUse(unsigned slots
);
1197 JS_REQUIRES_STACK
bool isValidSlot(JSObject
*obj
, const js::Shape
* shape
);
1198 JS_REQUIRES_STACK
bool lazilyImportGlobalSlot(unsigned slot
);
1199 JS_REQUIRES_STACK
void importGlobalSlot(unsigned slot
);
1201 void ensureCond(nanojit::LIns
** ins
, bool* cond
);
1203 JS_REQUIRES_STACK RecordingStatus
guard(bool expected
, nanojit::LIns
* cond
, ExitType exitType
,
1204 bool abortIfAlwaysExits
= false);
1205 JS_REQUIRES_STACK RecordingStatus
guard(bool expected
, nanojit::LIns
* cond
, VMSideExit
* exit
,
1206 bool abortIfAlwaysExits
= false);
1207 JS_REQUIRES_STACK
nanojit::LIns
* guard_xov(nanojit::LOpcode op
, nanojit::LIns
* d0
,
1208 nanojit::LIns
* d1
, VMSideExit
* exit
);
1210 nanojit::LIns
* writeBack(nanojit::LIns
* i
, nanojit::LIns
* base
, ptrdiff_t offset
,
1211 bool shouldDemoteToInt32
);
1214 bool isValidFrameObjPtr(void *obj
);
1216 void assertInsideLoop();
1218 JS_REQUIRES_STACK
void setImpl(void* p
, nanojit::LIns
* l
, bool shouldDemoteToInt32
= true);
1219 JS_REQUIRES_STACK
void set(Value
* p
, nanojit::LIns
* l
, bool shouldDemoteToInt32
= true);
1220 JS_REQUIRES_STACK
void setFrameObjPtr(void* p
, nanojit::LIns
* l
,
1221 bool shouldDemoteToInt32
= true);
1222 nanojit::LIns
* getFromTrackerImpl(const void *p
);
1223 nanojit::LIns
* getFromTracker(const Value
* p
);
1224 JS_REQUIRES_STACK
nanojit::LIns
* getImpl(const void* p
);
1225 JS_REQUIRES_STACK
nanojit::LIns
* get(const Value
* p
);
1226 JS_REQUIRES_STACK
nanojit::LIns
* getFrameObjPtr(void* p
);
1227 JS_REQUIRES_STACK
nanojit::LIns
* attemptImport(const Value
* p
);
1228 JS_REQUIRES_STACK
nanojit::LIns
* addr(Value
* p
);
1230 JS_REQUIRES_STACK
bool knownImpl(const void* p
);
1231 JS_REQUIRES_STACK
bool known(const Value
* p
);
1232 JS_REQUIRES_STACK
bool known(JSObject
** p
);
1234 * The slots of the global object are sometimes reallocated by the
1235 * interpreter. This function checks for that condition and re-maps the
1236 * entries of the tracker accordingly.
1238 JS_REQUIRES_STACK
void checkForGlobalObjectReallocation() {
1239 if (global_slots
!= globalObj
->getSlots())
1240 checkForGlobalObjectReallocationHelper();
1242 JS_REQUIRES_STACK
void checkForGlobalObjectReallocationHelper();
1244 JS_REQUIRES_STACK TypeConsensus
selfTypeStability(SlotMap
& smap
);
1245 JS_REQUIRES_STACK TypeConsensus
peerTypeStability(SlotMap
& smap
, const void* ip
,
1246 TreeFragment
** peer
);
1248 JS_REQUIRES_STACK Value
& argval(unsigned n
) const;
1249 JS_REQUIRES_STACK Value
& varval(unsigned n
) const;
1250 JS_REQUIRES_STACK Value
& stackval(int n
) const;
1252 JS_REQUIRES_STACK
void updateAtoms();
1253 JS_REQUIRES_STACK
void updateAtoms(JSScript
*script
);
1256 // |tracked| is true iff the result of the name lookup is a variable that
1257 // is already in the tracker. The rest of the fields are set only if
1258 // |tracked| is false.
1260 Value v
; // current property value
1261 JSObject
*obj
; // Call object where name was found
1262 nanojit::LIns
*obj_ins
; // LIR value for obj
1263 js::Shape
*shape
; // shape name was resolved to
1266 JS_REQUIRES_STACK
nanojit::LIns
* scopeChain();
1267 JS_REQUIRES_STACK
nanojit::LIns
* entryScopeChain() const;
1268 JS_REQUIRES_STACK
nanojit::LIns
* entryFrameIns() const;
1269 JS_REQUIRES_STACK JSStackFrame
* frameIfInRange(JSObject
* obj
, unsigned* depthp
= NULL
) const;
1270 JS_REQUIRES_STACK RecordingStatus
traverseScopeChain(JSObject
*obj
, nanojit::LIns
*obj_ins
, JSObject
*obj2
, nanojit::LIns
*&obj2_ins
);
1271 JS_REQUIRES_STACK AbortableRecordingStatus
scopeChainProp(JSObject
* obj
, Value
*& vp
, nanojit::LIns
*& ins
, NameResult
& nr
);
1272 JS_REQUIRES_STACK RecordingStatus
callProp(JSObject
* obj
, JSProperty
* shape
, jsid id
, Value
*& vp
, nanojit::LIns
*& ins
, NameResult
& nr
);
1274 JS_REQUIRES_STACK
nanojit::LIns
* arg(unsigned n
);
1275 JS_REQUIRES_STACK
void arg(unsigned n
, nanojit::LIns
* i
);
1276 JS_REQUIRES_STACK
nanojit::LIns
* var(unsigned n
);
1277 JS_REQUIRES_STACK
void var(unsigned n
, nanojit::LIns
* i
);
1278 JS_REQUIRES_STACK
nanojit::LIns
* upvar(JSScript
* script
, JSUpvarArray
* uva
, uintN index
, Value
& v
);
1279 nanojit::LIns
* stackLoad(tjit::Address addr
, uint8 type
);
1280 JS_REQUIRES_STACK
nanojit::LIns
* stack(int n
);
1281 JS_REQUIRES_STACK
void stack(int n
, nanojit::LIns
* i
);
1283 JS_REQUIRES_STACK
void guardNonNeg(nanojit::LIns
* d0
, nanojit::LIns
* d1
, VMSideExit
* exit
);
1284 JS_REQUIRES_STACK
nanojit::LIns
* alu(nanojit::LOpcode op
, jsdouble v0
, jsdouble v1
,
1285 nanojit::LIns
* s0
, nanojit::LIns
* s1
);
1287 nanojit::LIns
* d2i(nanojit::LIns
* f
, bool resultCanBeImpreciseIfFractional
= false);
1288 nanojit::LIns
* d2u(nanojit::LIns
* d
);
1289 JS_REQUIRES_STACK RecordingStatus
makeNumberInt32(nanojit::LIns
* d
, nanojit::LIns
** num_ins
);
1290 JS_REQUIRES_STACK RecordingStatus
makeNumberUint32(nanojit::LIns
* d
, nanojit::LIns
** num_ins
);
1291 JS_REQUIRES_STACK
nanojit::LIns
* stringify(const Value
& v
);
1293 JS_REQUIRES_STACK
nanojit::LIns
* newArguments(nanojit::LIns
* callee_ins
, bool strict
);
1295 JS_REQUIRES_STACK
bool canCallImacro() const;
1296 JS_REQUIRES_STACK RecordingStatus
callImacro(jsbytecode
* imacro
);
1297 JS_REQUIRES_STACK RecordingStatus
callImacroInfallibly(jsbytecode
* imacro
);
1299 JS_REQUIRES_STACK AbortableRecordingStatus
ifop();
1300 JS_REQUIRES_STACK RecordingStatus
switchop();
1302 JS_REQUIRES_STACK AbortableRecordingStatus
tableswitch();
1304 JS_REQUIRES_STACK RecordingStatus
inc(Value
& v
, jsint incr
, bool pre
= true);
1305 JS_REQUIRES_STACK RecordingStatus
inc(const Value
&v
, nanojit::LIns
*& v_ins
,
1306 Value
&v_out
, jsint incr
,
1308 JS_REQUIRES_STACK RecordingStatus
incHelper(const Value
&v
, nanojit::LIns
*& v_ins
,
1310 nanojit::LIns
*& v_ins_after
,
1312 JS_REQUIRES_STACK AbortableRecordingStatus
incProp(jsint incr
, bool pre
= true);
1313 JS_REQUIRES_STACK RecordingStatus
incElem(jsint incr
, bool pre
= true);
1314 JS_REQUIRES_STACK AbortableRecordingStatus
incName(jsint incr
, bool pre
= true);
1316 JS_REQUIRES_STACK RecordingStatus
strictEquality(bool equal
, bool cmpCase
);
1317 JS_REQUIRES_STACK AbortableRecordingStatus
equality(bool negate
, bool tryBranchAfterCond
);
1318 JS_REQUIRES_STACK AbortableRecordingStatus
equalityHelper(Value
& l
, Value
& r
,
1319 nanojit::LIns
* l_ins
, nanojit::LIns
* r_ins
,
1320 bool negate
, bool tryBranchAfterCond
,
1322 JS_REQUIRES_STACK AbortableRecordingStatus
relational(nanojit::LOpcode op
, bool tryBranchAfterCond
);
1324 JS_REQUIRES_STACK RecordingStatus
unary(nanojit::LOpcode op
);
1325 JS_REQUIRES_STACK RecordingStatus
binary(nanojit::LOpcode op
);
1327 JS_REQUIRES_STACK RecordingStatus
guardShape(nanojit::LIns
* obj_ins
, JSObject
* obj
,
1328 uint32 shape
, const char* name
, VMSideExit
* exit
);
1330 #if defined DEBUG_notme && defined XP_UNIX
1331 void dumpGuardedShapes(const char* prefix
);
1334 void forgetGuardedShapes();
1336 JS_REQUIRES_STACK AbortableRecordingStatus
test_property_cache(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1337 JSObject
*& obj2
, PCVal
& pcval
);
1338 JS_REQUIRES_STACK RecordingStatus
guardPropertyCacheHit(nanojit::LIns
* obj_ins
,
1341 PropertyCacheEntry
* entry
,
1344 void stobj_set_fslot(nanojit::LIns
*obj_ins
, unsigned slot
, const Value
&v
,
1345 nanojit::LIns
* v_ins
);
1346 void stobj_set_dslot(nanojit::LIns
*obj_ins
, unsigned slot
,
1347 nanojit::LIns
*& slots_ins
, const Value
&v
, nanojit::LIns
* v_ins
);
1348 void stobj_set_slot(JSObject
*obj
, nanojit::LIns
* obj_ins
, unsigned slot
,
1349 nanojit::LIns
*& slots_ins
, const Value
&v
, nanojit::LIns
* v_ins
);
1351 nanojit::LIns
* unbox_slot(JSObject
*obj
, nanojit::LIns
*obj_ins
, uint32 slot
,
1354 JS_REQUIRES_STACK AbortableRecordingStatus
name(Value
*& vp
, nanojit::LIns
*& ins
, NameResult
& nr
);
1355 JS_REQUIRES_STACK AbortableRecordingStatus
prop(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1356 uint32
*slotp
, nanojit::LIns
** v_insp
,
1358 JS_REQUIRES_STACK RecordingStatus
propTail(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1359 JSObject
* obj2
, PCVal pcval
,
1360 uint32
*slotp
, nanojit::LIns
** v_insp
,
1362 JS_REQUIRES_STACK RecordingStatus
denseArrayElement(Value
& oval
, Value
& idx
, Value
*& vp
,
1363 nanojit::LIns
*& v_ins
,
1364 nanojit::LIns
*& addr_ins
,
1366 JS_REQUIRES_STACK
nanojit::LIns
*canonicalizeNaNs(nanojit::LIns
*dval_ins
);
1367 JS_REQUIRES_STACK AbortableRecordingStatus
typedArrayElement(Value
& oval
, Value
& idx
, Value
*& vp
,
1368 nanojit::LIns
*& v_ins
);
1369 JS_REQUIRES_STACK AbortableRecordingStatus
getProp(JSObject
* obj
, nanojit::LIns
* obj_ins
);
1370 JS_REQUIRES_STACK AbortableRecordingStatus
getProp(Value
& v
);
1371 JS_REQUIRES_STACK RecordingStatus
getThis(nanojit::LIns
*& this_ins
);
1373 JS_REQUIRES_STACK
void storeMagic(JSWhyMagic why
, tjit::Address addr
);
1374 JS_REQUIRES_STACK AbortableRecordingStatus
unboxNextValue(nanojit::LIns
* &v_ins
);
1376 JS_REQUIRES_STACK VMSideExit
* enterDeepBailCall();
1377 JS_REQUIRES_STACK
void leaveDeepBailCall();
1379 JS_REQUIRES_STACK RecordingStatus
primitiveToStringInPlace(Value
* vp
);
1380 JS_REQUIRES_STACK
void finishGetProp(nanojit::LIns
* obj_ins
, nanojit::LIns
* vp_ins
,
1381 nanojit::LIns
* ok_ins
, Value
* outp
);
1382 JS_REQUIRES_STACK RecordingStatus
getPropertyByName(nanojit::LIns
* obj_ins
, Value
* idvalp
,
1384 JS_REQUIRES_STACK RecordingStatus
getPropertyByIndex(nanojit::LIns
* obj_ins
,
1385 nanojit::LIns
* index_ins
, Value
* outp
);
1386 JS_REQUIRES_STACK RecordingStatus
getPropertyById(nanojit::LIns
* obj_ins
, Value
* outp
);
1387 JS_REQUIRES_STACK RecordingStatus
getPropertyWithNativeGetter(nanojit::LIns
* obj_ins
,
1388 const js::Shape
* shape
,
1390 JS_REQUIRES_STACK RecordingStatus
getPropertyWithScriptGetter(JSObject
*obj
,
1391 nanojit::LIns
* obj_ins
,
1392 const js::Shape
* shape
);
1394 JS_REQUIRES_STACK RecordingStatus
getCharCodeAt(JSString
*str
,
1395 nanojit::LIns
* str_ins
, nanojit::LIns
* idx_ins
,
1396 nanojit::LIns
** out_ins
);
1397 JS_REQUIRES_STACK
nanojit::LIns
* getUnitString(nanojit::LIns
* str_ins
, nanojit::LIns
* idx_ins
);
1398 JS_REQUIRES_STACK RecordingStatus
getCharAt(JSString
*str
,
1399 nanojit::LIns
* str_ins
, nanojit::LIns
* idx_ins
,
1400 JSOp mode
, nanojit::LIns
** out_ins
);
1402 JS_REQUIRES_STACK RecordingStatus
initOrSetPropertyByName(nanojit::LIns
* obj_ins
,
1403 Value
* idvalp
, Value
* rvalp
,
1405 JS_REQUIRES_STACK RecordingStatus
initOrSetPropertyByIndex(nanojit::LIns
* obj_ins
,
1406 nanojit::LIns
* index_ins
,
1407 Value
* rvalp
, bool init
);
1408 JS_REQUIRES_STACK AbortableRecordingStatus
setElem(int lval_spindex
, int idx_spindex
,
1411 JS_REQUIRES_STACK RecordingStatus
lookupForSetPropertyOp(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1412 jsid id
, bool* safep
,
1414 const js::Shape
** shapep
);
1415 JS_REQUIRES_STACK RecordingStatus
nativeSet(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1416 const js::Shape
* shape
,
1417 const Value
& v
, nanojit::LIns
* v_ins
);
1418 JS_REQUIRES_STACK RecordingStatus
addDataProperty(JSObject
* obj
);
1419 JS_REQUIRES_STACK RecordingStatus
setCallProp(JSObject
* callobj
, nanojit::LIns
* callobj_ins
,
1420 const js::Shape
* shape
, nanojit::LIns
* v_ins
,
1422 JS_REQUIRES_STACK RecordingStatus
setProperty(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1423 const Value
& v
, nanojit::LIns
* v_ins
,
1425 JS_REQUIRES_STACK RecordingStatus
recordSetPropertyOp();
1426 JS_REQUIRES_STACK RecordingStatus
recordInitPropertyOp(jsbytecode op
);
1428 void box_undefined_into(tjit::Address addr
);
1429 #if JS_BITS_PER_WORD == 32
1430 void box_null_into(tjit::Address addr
);
1431 nanojit::LIns
* unbox_number_as_double(tjit::Address addr
, nanojit::LIns
* tag_ins
,
1433 nanojit::LIns
* unbox_object(tjit::Address addr
, nanojit::LIns
* tag_ins
, JSValueType type
,
1435 nanojit::LIns
* unbox_non_double_object(tjit::Address addr
, nanojit::LIns
* tag_ins
,
1436 JSValueType type
, VMSideExit
* exit
);
1437 #elif JS_BITS_PER_WORD == 64
1438 nanojit::LIns
* non_double_object_value_has_type(nanojit::LIns
* v_ins
, JSValueType type
);
1439 nanojit::LIns
* unpack_ptr(nanojit::LIns
* v_ins
);
1440 nanojit::LIns
* unbox_number_as_double(nanojit::LIns
* v_ins
, VMSideExit
* exit
);
1441 nanojit::LIns
* unbox_object(nanojit::LIns
* v_ins
, JSValueType type
, VMSideExit
* exit
);
1442 nanojit::LIns
* unbox_non_double_object(nanojit::LIns
* v_ins
, JSValueType type
, VMSideExit
* exit
);
1445 nanojit::LIns
* unbox_value(const Value
& v
, tjit::Address addr
, VMSideExit
* exit
,
1446 bool force_double
=false);
1447 void unbox_any_object(tjit::Address addr
, nanojit::LIns
** obj_ins
, nanojit::LIns
** is_obj_ins
);
1448 nanojit::LIns
* is_boxed_true(tjit::Address addr
);
1449 nanojit::LIns
* is_boxed_magic(tjit::Address addr
, JSWhyMagic why
);
1451 nanojit::LIns
* is_string_id(nanojit::LIns
* id_ins
);
1452 nanojit::LIns
* unbox_string_id(nanojit::LIns
* id_ins
);
1453 nanojit::LIns
* unbox_int_id(nanojit::LIns
* id_ins
);
1455 /* Box a slot on trace into the given address at the given offset. */
1456 void box_value_into(const Value
& v
, nanojit::LIns
* v_ins
, tjit::Address addr
);
1459 * Box a slot so that it may be passed with value semantics to a native. On
1460 * 32-bit, this currently means boxing the value into insAlloc'd memory and
1461 * returning the address which is passed as a Value*. On 64-bit, this
1462 * currently means returning the boxed value which is passed as a jsval.
1464 nanojit::LIns
* box_value_for_native_call(const Value
& v
, nanojit::LIns
* v_ins
);
1466 /* Box a slot into insAlloc'd memory. */
1467 nanojit::LIns
* box_value_into_alloc(const Value
& v
, nanojit::LIns
* v_ins
);
1469 JS_REQUIRES_STACK
void guardClassHelper(bool cond
, nanojit::LIns
* obj_ins
, Class
* clasp
,
1470 VMSideExit
* exit
, nanojit::LoadQual loadQual
);
1471 JS_REQUIRES_STACK
void guardClass(nanojit::LIns
* obj_ins
, Class
* clasp
,
1472 VMSideExit
* exit
, nanojit::LoadQual loadQual
);
1473 JS_REQUIRES_STACK
void guardNotClass(nanojit::LIns
* obj_ins
, Class
* clasp
,
1474 VMSideExit
* exit
, nanojit::LoadQual loadQual
);
1475 JS_REQUIRES_STACK
void guardDenseArray(nanojit::LIns
* obj_ins
, ExitType exitType
);
1476 JS_REQUIRES_STACK
void guardDenseArray(nanojit::LIns
* obj_ins
, VMSideExit
* exit
);
1477 JS_REQUIRES_STACK
bool guardHasPrototype(JSObject
* obj
, nanojit::LIns
* obj_ins
,
1478 JSObject
** pobj
, nanojit::LIns
** pobj_ins
,
1480 JS_REQUIRES_STACK RecordingStatus
guardPrototypeHasNoIndexedProperties(JSObject
* obj
,
1481 nanojit::LIns
* obj_ins
,
1483 JS_REQUIRES_STACK RecordingStatus
guardNativeConversion(Value
& v
);
1484 JS_REQUIRES_STACK
void clearReturningFrameFromNativeTracker();
1485 JS_REQUIRES_STACK
void putActivationObjects();
1486 JS_REQUIRES_STACK RecordingStatus
createThis(JSObject
& ctor
, nanojit::LIns
* ctor_ins
,
1487 nanojit::LIns
** thisobj_insp
);
1488 JS_REQUIRES_STACK RecordingStatus
guardCallee(Value
& callee
);
1489 JS_REQUIRES_STACK JSStackFrame
*guardArguments(JSObject
*obj
, nanojit::LIns
* obj_ins
,
1491 JS_REQUIRES_STACK
nanojit::LIns
* guardArgsLengthNotAssigned(nanojit::LIns
* argsobj_ins
);
1492 JS_REQUIRES_STACK
void guardNotHole(nanojit::LIns
* argsobj_ins
, nanojit::LIns
* ids_ins
);
1493 JS_REQUIRES_STACK RecordingStatus
getClassPrototype(JSObject
* ctor
,
1494 nanojit::LIns
*& proto_ins
);
1495 JS_REQUIRES_STACK RecordingStatus
getClassPrototype(JSProtoKey key
,
1496 nanojit::LIns
*& proto_ins
);
1497 JS_REQUIRES_STACK RecordingStatus
newArray(JSObject
* ctor
, uint32 argc
, Value
* argv
,
1499 JS_REQUIRES_STACK RecordingStatus
newString(JSObject
* ctor
, uint32 argc
, Value
* argv
,
1501 JS_REQUIRES_STACK RecordingStatus
interpretedFunctionCall(Value
& fval
, JSFunction
* fun
,
1502 uintN argc
, bool constructing
);
1503 JS_REQUIRES_STACK
void propagateFailureToBuiltinStatus(nanojit::LIns
*ok_ins
,
1504 nanojit::LIns
*&status_ins
);
1505 JS_REQUIRES_STACK RecordingStatus
emitNativeCall(JSSpecializedNative
* sn
, uintN argc
,
1506 nanojit::LIns
* args
[], bool rooted
);
1507 JS_REQUIRES_STACK
void emitNativePropertyOp(const js::Shape
* shape
,
1508 nanojit::LIns
* obj_ins
,
1510 nanojit::LIns
* addr_boxed_val_ins
);
1511 JS_REQUIRES_STACK RecordingStatus
callSpecializedNative(JSNativeTraceInfo
* trcinfo
, uintN argc
,
1513 JS_REQUIRES_STACK RecordingStatus
callNative(uintN argc
, JSOp mode
);
1514 JS_REQUIRES_STACK RecordingStatus
callFloatReturningInt(uintN argc
,
1515 const nanojit::CallInfo
*ci
);
1516 JS_REQUIRES_STACK RecordingStatus
functionCall(uintN argc
, JSOp mode
);
1518 JS_REQUIRES_STACK
void trackCfgMerges(jsbytecode
* pc
);
1519 JS_REQUIRES_STACK
void emitIf(jsbytecode
* pc
, bool cond
, nanojit::LIns
* x
);
1520 JS_REQUIRES_STACK
void fuseIf(jsbytecode
* pc
, bool cond
, nanojit::LIns
* x
);
1521 JS_REQUIRES_STACK AbortableRecordingStatus
checkTraceEnd(jsbytecode
* pc
);
1523 AbortableRecordingStatus
hasMethod(JSObject
* obj
, jsid id
, bool& found
);
1524 JS_REQUIRES_STACK AbortableRecordingStatus
hasIteratorMethod(JSObject
* obj
, bool& found
);
1526 JS_REQUIRES_STACK jsatomid
getFullIndex(ptrdiff_t pcoff
= 0);
1528 JS_REQUIRES_STACK JSValueType
determineSlotType(Value
* vp
);
1530 JS_REQUIRES_STACK RecordingStatus
setUpwardTrackedVar(Value
* stackVp
, const Value
& v
,
1531 nanojit::LIns
* v_ins
);
1533 JS_REQUIRES_STACK AbortableRecordingStatus
compile();
1534 JS_REQUIRES_STACK AbortableRecordingStatus
closeLoop();
1535 JS_REQUIRES_STACK AbortableRecordingStatus
endLoop();
1536 JS_REQUIRES_STACK AbortableRecordingStatus
endLoop(VMSideExit
* exit
);
1537 JS_REQUIRES_STACK
void joinEdgesToEntry(TreeFragment
* peer_root
);
1538 JS_REQUIRES_STACK
void adjustCallerTypes(TreeFragment
* f
);
1539 JS_REQUIRES_STACK
void prepareTreeCall(TreeFragment
* inner
);
1540 JS_REQUIRES_STACK
void emitTreeCall(TreeFragment
* inner
, VMSideExit
* exit
);
1541 JS_REQUIRES_STACK
void determineGlobalTypes(JSValueType
* typeMap
);
1542 JS_REQUIRES_STACK VMSideExit
* downSnapshot(FrameInfo
* downFrame
);
1543 JS_REQUIRES_STACK TreeFragment
* findNestedCompatiblePeer(TreeFragment
* f
);
1544 JS_REQUIRES_STACK AbortableRecordingStatus
attemptTreeCall(TreeFragment
* inner
,
1545 uintN
& inlineCallCount
);
1547 static JS_REQUIRES_STACK MonitorResult
recordLoopEdge(JSContext
* cx
, TraceRecorder
* r
,
1548 uintN
& inlineCallCount
);
1550 /* Allocators associated with this recording session. */
1551 VMAllocator
& tempAlloc() const { return *traceMonitor
->tempAlloc
; }
1552 VMAllocator
& traceAlloc() const { return *traceMonitor
->traceAlloc
; }
1553 VMAllocator
& dataAlloc() const { return *traceMonitor
->dataAlloc
; }
1555 /* Member declarations for each opcode, to be called before interpreting the opcode. */
1556 #define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
1557 JS_REQUIRES_STACK AbortableRecordingStatus record_##op();
1558 # include "jsopcode.tbl"
1562 TraceRecorder(JSContext
* cx
, TraceMonitor
*tm
, VMSideExit
*, VMFragment
*,
1563 unsigned stackSlots
, unsigned ngslots
, JSValueType
* typeMap
,
1564 VMSideExit
* expectedInnerExit
, JSScript
* outerScript
, jsbytecode
* outerPC
,
1565 uint32 outerArgc
, bool speculate
);
1567 /* The destructor should only be called through finish*, not directly. */
1569 JS_REQUIRES_STACK AbortableRecordingStatus
finishSuccessfully();
1571 enum AbortResult
{ NORMAL_ABORT
, JIT_RESET
};
1572 JS_REQUIRES_STACK AbortResult
finishAbort(const char* reason
);
1574 friend class ImportBoxedStackSlotVisitor
;
1575 friend class ImportUnboxedStackSlotVisitor
;
1576 friend class ImportGlobalSlotVisitor
;
1577 friend class AdjustCallerGlobalTypesVisitor
;
1578 friend class AdjustCallerStackTypesVisitor
;
1579 friend class TypeCompatibilityVisitor
;
1580 friend class ImportFrameSlotsVisitor
;
1581 friend class SlotMap
;
1582 friend class DefaultSlotMap
;
1583 friend class DetermineTypesVisitor
;
1584 friend class RecursiveSlotMap
;
1585 friend class UpRecursiveSlotMap
;
1586 friend MonitorResult
RecordLoopEdge(JSContext
*, TraceMonitor
*, uintN
&);
1587 friend TracePointAction
RecordTracePoint(JSContext
*, TraceMonitor
*, uintN
&inlineCallCount
,
1589 friend AbortResult
AbortRecording(JSContext
*, const char*);
1590 friend class BoxArg
;
1591 friend void TraceMonitor::sweep(JSContext
*cx
);
1594 static bool JS_REQUIRES_STACK
1595 startRecorder(JSContext
*, TraceMonitor
*, VMSideExit
*, VMFragment
*,
1596 unsigned stackSlots
, unsigned ngslots
, JSValueType
* typeMap
,
1597 VMSideExit
* expectedInnerExit
, JSScript
* outerScript
, jsbytecode
* outerPC
,
1598 uint32 outerArgc
, bool speculate
);
1601 VMFragment
* getFragment() const { return fragment
; }
1602 TreeFragment
* getTree() const { return tree
; }
1603 bool outOfMemory() const { return traceMonitor
->outOfMemory(); }
1604 Oracle
* getOracle() const { return oracle
; }
1605 JSObject
* getGlobal() const { return globalObj
; }
1607 /* Entry points / callbacks from the interpreter. */
1608 JS_REQUIRES_STACK AbortableRecordingStatus
monitorRecording(JSOp op
);
1609 JS_REQUIRES_STACK AbortableRecordingStatus
record_EnterFrame();
1610 JS_REQUIRES_STACK AbortableRecordingStatus
record_LeaveFrame();
1611 JS_REQUIRES_STACK AbortableRecordingStatus
record_AddProperty(JSObject
*obj
);
1612 JS_REQUIRES_STACK AbortableRecordingStatus
record_DefLocalFunSetSlot(uint32 slot
,
1614 JS_REQUIRES_STACK AbortableRecordingStatus
record_NativeCallComplete();
1615 void forgetGuardedShapesForObject(JSObject
* obj
);
1617 bool globalSetExpected(unsigned slot
) {
1618 unsigned *pi
= Find(pendingGlobalSlotsToSet
, slot
);
1619 if (pi
== pendingGlobalSlotsToSet
.end()) {
1621 * Do slot arithmetic manually to avoid getSlotRef assertions which
1622 * do not need to be satisfied for this purpose.
1624 Value
*vp
= globalObj
->getSlots() + slot
;
1626 /* If this global is definitely being tracked, then the write is unexpected. */
1627 if (tracker
.has(vp
))
1631 * Otherwise, only abort if the global is not present in the
1632 * import typemap. Just deep aborting false here is not acceptable,
1633 * because the recorder does not guard on every operation that
1634 * could lazily resolve. Since resolving adds properties to
1635 * reserved slots, the tracer will never have imported them.
1637 return tree
->globalSlots
->offsetOf((uint16
)nativeGlobalSlot(vp
)) == -1;
1639 pendingGlobalSlotsToSet
.erase(pi
);
1644 /* Debug printing functionality to emit printf() on trace. */
1645 JS_REQUIRES_STACK
void tprint(const char *format
, int count
, nanojit::LIns
*insa
[]);
1646 JS_REQUIRES_STACK
void tprint(const char *format
);
1647 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins
);
1648 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1649 nanojit::LIns
*ins2
);
1650 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1651 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
);
1652 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1653 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
,
1654 nanojit::LIns
*ins4
);
1655 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1656 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
,
1657 nanojit::LIns
*ins4
, nanojit::LIns
*ins5
);
1658 JS_REQUIRES_STACK
void tprint(const char *format
, nanojit::LIns
*ins1
,
1659 nanojit::LIns
*ins2
, nanojit::LIns
*ins3
,
1660 nanojit::LIns
*ins4
, nanojit::LIns
*ins5
,
1661 nanojit::LIns
*ins6
);
1665 #define TRACING_ENABLED(cx) ((cx)->traceJitEnabled)
1666 #define REGEX_JIT_ENABLED(cx) ((cx)->traceJitEnabled || (cx)->methodJitEnabled)
1668 #define JSOP_IN_RANGE(op,lo,hi) (uintN((op) - (lo)) <= uintN((hi) - (lo)))
1669 #define JSOP_IS_BINARY(op) JSOP_IN_RANGE(op, JSOP_BITOR, JSOP_MOD)
1670 #define JSOP_IS_UNARY(op) JSOP_IN_RANGE(op, JSOP_NEG, JSOP_POS)
1671 #define JSOP_IS_EQUALITY(op) JSOP_IN_RANGE(op, JSOP_EQ, JSOP_NE)
1673 #define TRACE_ARGS_(x,args) \
1675 if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
1676 AbortableRecordingStatus status = tr_->record_##x args; \
1677 if (StatusAbortsRecorderIfActive(status)) { \
1678 if (TRACE_RECORDER(cx)) { \
1679 JS_ASSERT(TRACE_RECORDER(cx) == tr_); \
1680 AbortRecording(cx, #x); \
1682 if (status == ARECORD_ERROR) \
1685 JS_ASSERT(status != ARECORD_IMACRO); \
1689 #define TRACE_ARGS(x,args) TRACE_ARGS_(x, args)
1690 #define TRACE_0(x) TRACE_ARGS(x, ())
1691 #define TRACE_1(x,a) TRACE_ARGS(x, (a))
1692 #define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
1694 extern JS_REQUIRES_STACK MonitorResult
1695 MonitorLoopEdge(JSContext
* cx
, uintN
& inlineCallCount
, JSInterpMode interpMode
);
1697 extern JS_REQUIRES_STACK TracePointAction
1698 RecordTracePoint(JSContext
*, uintN
& inlineCallCount
, bool* blacklist
);
1700 extern JS_REQUIRES_STACK TracePointAction
1701 MonitorTracePoint(JSContext
*, uintN
& inlineCallCount
, bool* blacklist
,
1702 void** traceData
, uintN
*traceEpoch
, uint32
*loopCounter
, uint32 hits
);
1704 extern JS_REQUIRES_STACK
TraceRecorder::AbortResult
1705 AbortRecording(JSContext
* cx
, const char* reason
);
1708 InitJIT(TraceMonitor
*tm
);
1711 FinishJIT(TraceMonitor
*tm
);
1714 PurgeScriptFragments(TraceMonitor
* tm
, JSScript
* script
);
1717 OverfullJITCache(JSContext
*cx
, TraceMonitor
* tm
);
1720 FlushJITCache(JSContext
* cx
, TraceMonitor
* tm
);
1723 GetBuiltinFunction(JSContext
*cx
, uintN index
);
1726 SetMaxCodeCacheBytes(JSContext
* cx
, uint32 bytes
);
1729 ExternNativeToValue(JSContext
* cx
, Value
& v
, JSValueType type
, double* slot
);
1733 extern JS_FRIEND_API(bool)
1734 StartTraceVis(const char* filename
);
1736 extern JS_FRIEND_API(JSBool
)
1737 StartTraceVisNative(JSContext
*cx
, uintN argc
, jsval
*vp
);
1739 extern JS_FRIEND_API(bool)
1742 extern JS_FRIEND_API(JSBool
)
1743 StopTraceVisNative(JSContext
*cx
, uintN argc
, jsval
*vp
);
1745 /* Must contain no more than 16 items. */
1746 enum TraceVisState
{
1747 // Special: means we returned from current activity to last
1756 // Events: these all have (bit 3) == 1.
1760 /* Reason for an exit to the interpreter. */
1761 enum TraceVisExitReason
{
1764 /* Reasons in MonitorLoopEdge */
1773 R_FAIL_EXECUTE_TREE
,
1775 R_FAIL_EXTEND_FLUSH
,
1776 R_FAIL_EXTEND_MAX_BRANCHES
,
1777 R_FAIL_EXTEND_START
,
1779 R_FAIL_SCOPE_CHAIN_CHECK
,
1789 enum TraceVisFlushReason
{
1792 FR_GLOBAL_SHAPE_MISMATCH
,
1796 const unsigned long long MS64_MASK
= 0xfull
<< 60;
1797 const unsigned long long MR64_MASK
= 0x1full
<< 55;
1798 const unsigned long long MT64_MASK
= ~(MS64_MASK
| MR64_MASK
);
1800 extern FILE* traceVisLogFile
;
1801 extern JSHashTable
*traceVisScriptTable
;
1803 extern JS_FRIEND_API(void)
1804 StoreTraceVisState(JSContext
*cx
, TraceVisState s
, TraceVisExitReason r
);
1807 LogTraceVisState(JSContext
*cx
, TraceVisState s
, TraceVisExitReason r
)
1809 if (traceVisLogFile
) {
1810 unsigned long long sllu
= s
;
1811 unsigned long long rllu
= r
;
1812 unsigned long long d
= (sllu
<< 60) | (rllu
<< 55) | (rdtsc() & MT64_MASK
);
1813 fwrite(&d
, sizeof(d
), 1, traceVisLogFile
);
1815 if (traceVisScriptTable
) {
1816 StoreTraceVisState(cx
, s
, r
);
1821 * Although this runs the same code as LogTraceVisState, it is a separate
1822 * function because the meaning of the log entry is different. Also, the entry
1823 * formats may diverge someday.
1826 LogTraceVisEvent(JSContext
*cx
, TraceVisState s
, TraceVisFlushReason r
)
1828 LogTraceVisState(cx
, s
, (TraceVisExitReason
) r
);
1832 EnterTraceVisState(JSContext
*cx
, TraceVisState s
, TraceVisExitReason r
)
1834 LogTraceVisState(cx
, s
, r
);
1838 ExitTraceVisState(JSContext
*cx
, TraceVisExitReason r
)
1840 LogTraceVisState(cx
, S_EXITLAST
, r
);
1843 struct TraceVisStateObj
{
1844 TraceVisExitReason r
;
1847 inline TraceVisStateObj(JSContext
*cx
, TraceVisState s
) : r(R_NONE
)
1849 EnterTraceVisState(cx
, s
, R_NONE
);
1852 inline ~TraceVisStateObj()
1854 ExitTraceVisState(mCx
, r
);
1858 #endif /* MOZ_TRACEVIS */
1860 } /* namespace js */
1862 #else /* !JS_TRACER */
1864 #define TRACE_0(x) ((void)0)
1865 #define TRACE_1(x,a) ((void)0)
1866 #define TRACE_2(x,a,b) ((void)0)
1868 #endif /* !JS_TRACER */
1873 * While recording, the slots of the global object may change payload or type.
1874 * This is fine as long as the recorder expects this change (and therefore has
1875 * generated the corresponding LIR, snapshots, etc). The recorder indicates
1876 * that it expects a write to a global slot by setting pendingGlobalSlotsToSet
1877 * in the recorder, before the write is made by the interpreter, and clearing
1878 * pendingGlobalSlotsToSet before recording the next op. Any global slot write
1879 * that has not been whitelisted in this manner is therefore unexpected and, if
1880 * the global slot is actually being tracked, recording must be aborted.
1882 static JS_INLINE
void
1883 AbortRecordingIfUnexpectedGlobalWrite(JSContext
*cx
, JSObject
*obj
, unsigned slot
)
1886 if (TraceRecorder
*tr
= TRACE_RECORDER(cx
)) {
1887 if (obj
== tr
->getGlobal() && !tr
->globalSetExpected(slot
))
1888 AbortRecording(cx
, "Global slot written outside tracer supervision");
1893 } /* namespace js */
1895 #endif /* jstracer_h___ */