1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
15 * The Original Code is [Open Source Virtual Machine].
17 * The Initial Developer of the Original Code is
18 * Adobe System Incorporated.
19 * Portions created by the Initial Developer are Copyright (C) 2004-2007
20 * the Initial Developer. All Rights Reserved.
25 * Alternatively, the contents of this file may be used under the terms of
26 * either the GNU General Public License Version 2 or later (the "GPL"), or
27 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
28 * in which case the provisions of the GPL or the LGPL are applicable instead
29 * of those above. If you wish to allow use of your version of this file only
30 * under the terms of either the GPL or the LGPL, and not to allow others to
31 * use your version of this file under the terms of the MPL, indicate your
32 * decision by deleting the provisions above and replace them with the notice
33 * and other provisions required by the GPL or the LGPL. If you do not delete
34 * the provisions above, a recipient may use your version of this file under
35 * the terms of any one of the MPL, the GPL or the LGPL.
37 * ***** END LICENSE BLOCK ***** */
39 #ifndef __nanojit_LIR__
40 #define __nanojit_LIR__
42 namespace avmplus
{ class RegionTracker
; }
46 #define is_trace_skip_tramp(op) ((op) <= LIR_tramp)
49 #if defined(_MSC_VER) && _MSC_VER >= 1400
53 // flags; upper bits reserved
54 LIR64
= 0x40, // result is double or quad
56 // special operations (must be 0..N)
58 LIR_nearskip
= 3, // must be LIR_skip-1 and lsb=1
60 LIR_neartramp
= 5, // must be LIR_tramp-1 and lsb=1
63 // non-pure operations
71 LIR_loop
= 19, // loop fragment
72 LIR_x
= 20, // exit always
81 LIR_cmov
= 31, // conditional move (op1=cond, op2=cond(iftrue,iffalse))
84 LIR_ldc
= 34, // non-volatile load
85 LIR_2
= 35, // wraps a pair of refs
86 LIR_neg
= 36, // [ 1 integer input / integer output ]
87 LIR_add
= 37, // [ 2 operand integer intputs / integer output ]
98 // conditional guards, op^1 to complement
99 LIR_xt
= 48, // exit if true 0x30 0011 0000
100 LIR_xf
= 49, // exit if false 0x31 0011 0001
103 LIR_ldcb
= 52, // non-volatile 8-bit load
108 // relational operators. op^1 to swap left/right, op^3 to complement.
109 LIR_lt
= 56, // 0x38 0011 1000
110 LIR_gt
= 57, // 0x39 0011 1001
111 LIR_le
= 58, // 0x3A 0011 1010
112 LIR_ge
= 59, // 0x3B 0011 1011
113 LIR_ult
= 60, // 0x3C 0011 1100
114 LIR_ugt
= 61, // 0x3D 0011 1101
115 LIR_ule
= 62, // 0x3E 0011 1110
116 LIR_uge
= 63, // 0x3F 0011 1111
121 LIR_stq
= LIR_st
| LIR64
,
122 LIR_stqi
= LIR_sti
| LIR64
,
123 LIR_quad
= LIR_int
| LIR64
,
124 LIR_ldq
= LIR_ld
| LIR64
,
125 LIR_qiand
= 24 | LIR64
,
126 LIR_qiadd
= 25 | LIR64
,
127 LIR_qilsh
= LIR_lsh
| LIR64
,
129 LIR_fcall
= LIR_call
| LIR64
,
130 LIR_fneg
= LIR_neg
| LIR64
,
131 LIR_fadd
= LIR_add
| LIR64
,
132 LIR_fsub
= LIR_sub
| LIR64
,
133 LIR_fmul
= LIR_mul
| LIR64
,
134 LIR_fdiv
= 40 | LIR64
,
135 LIR_qcmov
= LIR_cmov
| LIR64
,
137 LIR_qjoin
= 41 | LIR64
,
138 LIR_i2f
= 42 | LIR64
,
139 LIR_u2f
= 43 | LIR64
,
140 LIR_qior
= 44 | LIR64
143 #if defined NANOJIT_64BIT
144 #define LIR_ldp LIR_ldq
145 #define LIR_piadd LIR_qiadd
146 #define LIR_piand LIR_qiand
147 #define LIR_pilsh LIR_qilsh
148 #define LIR_pcmov LIR_qcmov
149 #define LIR_pior LIR_qior
151 #define LIR_ldp LIR_ld
152 #define LIR_piadd LIR_add
153 #define LIR_piand LIR_and
154 #define LIR_pilsh LIR_lsh
155 #define LIR_pcmov LIR_cmov
156 #define LIR_pior LIR_or
159 inline uint32_t argwords(uint32_t argc
) {
167 // Low-level Instruction 4B
168 // had to lay it our as a union with duplicate code fields since msvc couldn't figure out how to compact it otherwise.
171 friend class LirBufWriter
;
172 // 3-operand form (backwards reach only)
176 uint32_t oprnd_3
:8; // only used for store, since this location gets clobbered during generation
177 uint32_t oprnd_1
:8; // 256 ins window and since they only point backwards this is sufficient.
184 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
185 signed int disp
:8; // Sun Studio requires explicitly declaring signed int bit-field
189 uint32_t oprnd_1
:8; // 256 ins window and since they only point backwards this is sufficient.
197 uint32_t resv
:8; // cobberred during assembly
202 // imm24 form for short tramp & skip
206 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
217 uint32_t resv
:8; // cobberred during assembly
218 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
225 // overlay used during code generation ( note that last byte is reserved for allocation )
229 uint32_t resv
:8; // cobberred during assembly
234 * Various forms of the instruction.
236 * In general the oprnd_x entries contain an uint value 0-255 that identifies a previous
237 * instruction, where 0 means the previous instruction and 255 means the instruction two
238 * hundred and fifty five prior to this one.
240 * For pointing to instructions further than this range LIR_tramp is used.
252 uint32_t reference(LIns
*) const;
253 LIns
* deref(int32_t off
) const;
256 LIns
* FASTCALL
oprnd1() const;
257 LIns
* FASTCALL
oprnd2() const;
258 LIns
* FASTCALL
oprnd3() const;
260 inline LOpcode
opcode() const { return u
.code
; }
261 inline uint8_t imm8() const { return c
.imm8a
; }
262 inline int16_t imm16() const { return i
.imm16
; }
263 inline LIns
* ref() const {
264 #if defined NANOJIT_64BIT
265 return (t
.code
& 1) ? (LIns
*)this+t
.imm24
: *(LIns
**)(this-2);
267 return (t
.code
& 1) ? (LIns
*)this+t
.imm24
: *(LIns
**)(this-1);
270 inline int32_t imm32() const { return *(int32_t*)(this-1); }
271 inline uint8_t resv() const { return g
.resv
; }
272 void* payload() const;
273 inline Page
* page() { return (Page
*) alignTo(this,NJ_PAGE_SIZE
); }
275 // index args in r-l order. arg(0) is rightmost arg
276 inline LIns
* arg(uint32_t i
) {
279 uint8_t* offs
= (uint8_t*) (this-argwords(c
));
280 return deref(offs
[i
]);
283 inline int32_t immdisp()const
285 return (u
.code
&~LIR64
) == LIR_sti
? sti
.disp
: oprnd3()->constval();
288 inline static bool sameop(LIns
* a
, LIns
* b
)
290 // hacky but more efficient than opcode() == opcode() due to bit masking of 7-bit field
295 tmp
.x
= *(uint32_t*)a
^ *(uint32_t*)b
;
296 return tmp
.u
.code
== 0;
299 inline int32_t constval() const
301 NanoAssert(isconst());
302 return isop(LIR_short
) ? imm16() : imm32();
305 inline uint64_t constvalq() const
307 NanoAssert(isconstq());
308 #ifdef AVMPLUS_UNALIGNED_ACCESS
309 return *(const uint64_t*)(this-2);
311 union { uint64_t tmp
; int32_t dst
[2]; } u
;
312 const int32_t* src
= (const int32_t*)(this-2);
319 inline void* constvalp() const
322 return (void*)constvalq();
324 return (void*)constval();
328 inline double constvalf() const
330 NanoAssert(isconstq());
331 #ifdef AVMPLUS_UNALIGNED_ACCESS
332 return *(const double*)(this-2);
334 union { uint32_t dst
[2]; double tmpf
; } u
;
335 const int32_t* src
= (const int32_t*)(this-2);
342 bool isCse(const CallInfo
*functions
) const;
343 bool isop(LOpcode o
) const { return u
.code
== o
; }
348 bool isStore() const;
350 bool isGuard() const;
351 bool isconst() const;
352 bool isconstval(int32_t val
) const;
353 bool isconstq() const;
354 bool isconstp() const;
356 return isop(LIR_neartramp
) || isop(LIR_tramp
);
359 void setimm16(int32_t i
);
360 void setimm24(int32_t i
);
361 void setresv(uint32_t resv
);
362 void initOpcode(LOpcode
);
363 void setOprnd1(LIns
*);
364 void setOprnd2(LIns
*);
365 void setOprnd3(LIns
*);
366 void setDisp(int8_t d
);
370 inline uint32_t argc() {
371 NanoAssert(isCall());
374 inline uint8_t fid() const {
375 NanoAssert(isCall());
381 bool FASTCALL
isCse(LOpcode v
);
382 bool FASTCALL
isCmp(LOpcode v
);
383 bool FASTCALL
isCond(LOpcode v
);
384 LIns
* FASTCALL
callArgN(LInsp i
, uint32_t n
);
385 extern const uint8_t operandCount
[];
387 class Fragmento
; // @todo remove this ; needed for minbuild for some reason?!? Should not be compiling this code at all
391 // make it a GCObject so we can explicitly delete it early
392 class LirWriter
: public GCObject
397 const CallInfo
*_functions
;
399 virtual ~LirWriter() {}
400 LirWriter(LirWriter
* out
)
401 : out(out
), _functions(out
?out
->_functions
: 0) {}
403 virtual LInsp
ins0(LOpcode v
) {
406 virtual LInsp
ins1(LOpcode v
, LIns
* a
) {
407 return out
->ins1(v
, a
);
409 virtual LInsp
ins2(LOpcode v
, LIns
* a
, LIns
* b
) {
410 return out
->ins2(v
, a
, b
);
412 virtual LInsp
insGuard(LOpcode v
, LIns
*c
, SideExit
*x
) {
413 return out
->insGuard(v
, c
, x
);
415 virtual LInsp
insParam(int32_t i
) {
416 return out
->insParam(i
);
418 virtual LInsp
insImm(int32_t imm
) {
419 return out
->insImm(imm
);
421 virtual LInsp
insImmq(uint64_t imm
) {
422 return out
->insImmq(imm
);
424 virtual LInsp
insLoad(LOpcode op
, LIns
* base
, LIns
* d
) {
425 return out
->insLoad(op
, base
, d
);
427 virtual LInsp
insStore(LIns
* value
, LIns
* base
, LIns
* disp
) {
428 return out
->insStore(value
, base
, disp
);
430 virtual LInsp
insStorei(LIns
* value
, LIns
* base
, int32_t d
) {
431 return isS8(d
) ? out
->insStorei(value
, base
, d
)
432 : out
->insStore(value
, base
, insImm(d
));
434 virtual LInsp
insCall(uint32_t fid
, LInsp args
[]) {
435 return out
->insCall(fid
, args
);
439 LIns
* insLoadi(LIns
*base
, int disp
);
440 LIns
* insLoad(LOpcode op
, LIns
*base
, int disp
);
441 LIns
* ins_choose(LIns
* cond
, LIns
* iftrue
, LIns
* iffalse
, bool);
442 LIns
* ins_eq0(LIns
* oprnd1
);
443 LIns
* ins2i(LOpcode op
, LIns
*oprnd1
, int32_t);
444 LIns
* qjoin(LInsp lo
, LInsp hi
);
445 LIns
* insImmPtr(const void *ptr
);
449 extern const char* lirNames
[];
452 * map address ranges to meaningful names.
454 class LabelMap MMGC_SUBCLASS_DECL
457 class Entry MMGC_SUBCLASS_DECL
460 Entry(int) : name(0), size(0), align(0) {}
461 Entry(avmplus::String
*n
, size_t s
, size_t a
) : name(n
),size(s
),align(a
) {}
463 DRCWB(avmplus::String
*) name
;
464 size_t size
:29, align
:3;
466 avmplus::SortedMap
<const void*, Entry
*, avmplus::LIST_GCObjects
> names
;
468 char buf
[1000], *end
;
469 void formatAddr(const void *p
, char *buf
);
472 LabelMap(AvmCore
*, LabelMap
* parent
);
474 void add(const void *p
, size_t size
, size_t align
, const char *name
);
475 void add(const void *p
, size_t size
, size_t align
, avmplus::String
*);
476 const char *dup(const char *);
477 const char *format(const void *p
);
478 void promoteAll(const void *newbase
);
481 class LirNameMap MMGC_SUBCLASS_DECL
483 class CountMap
: public avmplus::SortedMap
<int, int, avmplus::LIST_NonGCObjects
> {
485 CountMap(GC
*gc
) : avmplus::SortedMap
<int, int, avmplus::LIST_NonGCObjects
>(gc
) {};
488 if (containsKey(i
)) {
494 } lircounts
, funccounts
;
495 class Entry MMGC_SUBCLASS_DECL
498 Entry(int) : name(0) {}
499 Entry(avmplus::String
*n
) : name(n
) {}
501 DRCWB(avmplus::String
*) name
;
503 avmplus::SortedMap
<LInsp
, Entry
*, avmplus::LIST_GCObjects
> names
;
504 const CallInfo
*_functions
;
506 void formatImm(int32_t c
, char *buf
);
509 LirNameMap(GC
*gc
, const CallInfo
*_functions
, LabelMap
*r
)
513 _functions(_functions
),
518 void addName(LInsp i
, const char *s
);
519 bool addName(LInsp i
, avmplus::String
*s
);
520 void copyName(LInsp i
, const char *s
, int suffix
);
521 const char *formatRef(LIns
*ref
);
522 const char *formatIns(LInsp i
);
523 void formatGuard(LInsp i
, char *buf
);
527 class VerboseWriter
: public LirWriter
529 avmplus::List
<LInsp
, avmplus::LIST_NonGCObjects
> code
;
532 VerboseWriter(GC
*gc
, LirWriter
*out
, LirNameMap
* names
)
533 : LirWriter(out
), code(gc
), names(names
)
543 for (int j
=0, n
=code
.size(); j
< n
; j
++)
544 printf(" %s\n",names
->formatIns(code
[j
]));
549 LIns
* insGuard(LOpcode op
, LInsp cond
, SideExit
*x
) {
550 LInsp i
= add(out
->insGuard(op
,cond
,x
));
556 LIns
* ins0(LOpcode v
) {
557 LInsp i
= add(out
->ins0(v
));
563 LIns
* ins1(LOpcode v
, LInsp a
) {
564 return add(out
->ins1(v
, a
));
566 LIns
* ins2(LOpcode v
, LInsp a
, LInsp b
) {
567 return v
== LIR_2
? out
->ins2(v
,a
,b
) : add(out
->ins2(v
, a
, b
));
569 LIns
* insCall(uint32_t fid
, LInsp args
[]) {
570 return add(out
->insCall(fid
, args
));
572 LIns
* insParam(int32_t i
) {
573 return add(out
->insParam(i
));
575 LIns
* insLoad(LOpcode v
, LInsp base
, LInsp disp
) {
576 return add(out
->insLoad(v
, base
, disp
));
578 LIns
* insStore(LInsp v
, LInsp b
, LInsp d
) {
579 return add(out
->insStore(v
, b
, d
));
581 LIns
* insStorei(LInsp v
, LInsp b
, int32_t d
) {
582 return add(out
->insStorei(v
, b
, d
));
588 class ExprFilter
: public LirWriter
591 ExprFilter(LirWriter
*out
) : LirWriter(out
) {}
592 LIns
* ins1(LOpcode v
, LIns
* a
);
593 LIns
* ins2(LOpcode v
, LIns
* a
, LIns
* b
);
594 LIns
* insGuard(LOpcode v
, LIns
*c
, SideExit
*x
);
597 // @todo, this could be replaced by a generic HashMap or HashSet, if we had one
600 // must be a power of 2.
601 // don't start too small, or we'll waste time growing and rehashing.
602 // don't start too large, will waste memory.
603 static const uint32_t kInitialCap
= 2048;
609 static uint32_t FASTCALL
hashcode(LInsp i
);
610 uint32_t FASTCALL
find(LInsp name
, uint32_t hash
, const InsList
& list
, uint32_t cap
);
611 static bool FASTCALL
equals(LInsp a
, LInsp b
);
612 void FASTCALL
grow();
617 LInsp
find32(int32_t a
, uint32_t &i
);
618 LInsp
find64(uint64_t a
, uint32_t &i
);
619 LInsp
find1(LOpcode v
, LInsp a
, uint32_t &i
);
620 LInsp
find2(LOpcode v
, LInsp a
, LInsp b
, uint32_t &i
);
621 LInsp
findcall(uint32_t fid
, uint32_t argc
, LInsp args
[], uint32_t &i
);
622 LInsp
add(LInsp i
, uint32_t k
);
623 void replace(LInsp i
);
625 static uint32_t FASTCALL
hashimm(int32_t);
626 static uint32_t FASTCALL
hashimmq(uint64_t);
627 static uint32_t FASTCALL
hash1(LOpcode v
, LInsp
);
628 static uint32_t FASTCALL
hash2(LOpcode v
, LInsp
, LInsp
);
629 static uint32_t FASTCALL
hashcall(uint32_t fid
, uint32_t argc
, LInsp args
[]);
632 class CseFilter
: public LirWriter
636 CseFilter(LirWriter
*out
, GC
*gc
);
637 LIns
* insImm(int32_t imm
);
638 LIns
* insImmq(uint64_t q
);
639 LIns
* ins1(LOpcode v
, LInsp
);
640 LIns
* ins2(LOpcode v
, LInsp
, LInsp
);
641 LIns
* insLoad(LOpcode v
, LInsp b
, LInsp d
);
642 LIns
* insCall(uint32_t fid
, LInsp args
[]);
643 LIns
* insGuard(LOpcode op
, LInsp cond
, SideExit
*x
);
647 class LirBuffer
: public GCFinalizedObject
650 DWB(Fragmento
*) _frago
;
651 LirBuffer(Fragmento
* frago
, const CallInfo
* functions
);
652 virtual ~LirBuffer();
655 LInsp
commit(uint32_t count
);
657 bool outOmem() { return _noMem
!= 0; }
658 debug_only (void validate() const;)
659 verbose_only(DWB(LirNameMap
*) names
;)
660 verbose_only(int insCount();)
661 verbose_only(int byteCount();)
666 uint32_t lir
; // # instructions
667 uint32_t pages
; // pages consumed
671 const CallInfo
* _functions
;
672 LInsp state
,param1
,sp
,rp
;
677 Page
* _start
; // first page
678 LInsp _unused
; // next unused instruction slot
679 int _noMem
; // set if ran out of memory when writing to buffer
682 class LirBufWriter
: public LirWriter
684 DWB(LirBuffer
*) _buf
; // underlying buffer housing the instructions
688 LirBufWriter(LirBuffer
* buf
)
689 : LirWriter(0), _buf(buf
) {
690 _functions
= buf
->_functions
;
693 // LirWriter interface
694 LInsp
insLoad(LOpcode op
, LInsp base
, LInsp off
);
695 LInsp
insStore(LInsp o1
, LInsp o2
, LInsp o3
);
696 LInsp
insStorei(LInsp o1
, LInsp o2
, int32_t imm
);
697 LInsp
ins0(LOpcode op
);
698 LInsp
ins1(LOpcode op
, LInsp o1
);
699 LInsp
ins2(LOpcode op
, LInsp o1
, LInsp o2
);
700 LInsp
insParam(int32_t i
);
701 LInsp
insImm(int32_t imm
);
702 LInsp
insImmq(uint64_t imm
);
703 LInsp
insCall(uint32_t fid
, LInsp args
[]);
704 LInsp
insGuard(LOpcode op
, LInsp cond
, SideExit
*x
);
708 LInsp
insFar(LOpcode op
, LInsp target
);
709 LInsp
ensureReferenceable(LInsp i
, int32_t addedDistance
);
710 bool ensureRoom(uint32_t count
);
711 bool canReference(LInsp from
, LInsp to
) {
712 return isU8(from
-to
-1);
720 LirFilter(LirFilter
*in
) : in(in
) {}
721 virtual ~LirFilter() {}
723 virtual LInsp
read() {
726 virtual LInsp
pos() {
732 class LirReader
: public LirFilter
734 LInsp _i
; // current instruction that this decoder is operating on.
737 LirReader(LirBuffer
* buf
) : LirFilter(0), _i(buf
->next()-1) { }
738 LirReader(LInsp i
) : LirFilter(0), _i(i
) { }
739 virtual ~LirReader() {}
742 LInsp
read(); // advance to the prior instruction
750 void compile(Assembler
*assm
, Fragment
*frag
);
751 verbose_only( void printTracker(const char* s
, avmplus::RegionTracker
& trk
, Assembler
* assm
); )
752 verbose_only(void live(GC
*gc
, Assembler
*assm
, Fragment
*frag
);)
754 class StackFilter
: public LirFilter
761 int getTop(LInsp guard
);
763 StackFilter(LirFilter
*in
, GC
*gc
, Fragment
*frag
, LInsp sp
);
764 virtual ~StackFilter() {}
768 class CseReader
: public LirFilter
771 const CallInfo
*functions
;
773 CseReader(LirFilter
*in
, LInsHashSet
*exprs
, const CallInfo
*);
777 #endif // __nanojit_LIR__