Bug 469739 - Add support for displaying Vista UAC shield icon; r=joe sr=vladimir
[wine-gecko.git] / js / src / nanojit / Fragmento.cpp
blob2e24234a9139c9f218620cdbc9edd7eac4461381
1 /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: t; tab-width: 4 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
13 * License.
15 * The Original Code is [Open Source Virtual Machine].
17 * The Initial Developer of the Original Code is
18 * Adobe System Incorporated.
19 * Portions created by the Initial Developer are Copyright (C) 2004-2007
20 * the Initial Developer. All Rights Reserved.
22 * Contributor(s):
23 * Adobe AS3 Team
24 * Mozilla TraceMonkey Team
25 * Asko Tontti <atontti@cc.hut.fi>
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
39 * ***** END LICENSE BLOCK ***** */
41 #include "nanojit.h"
42 #undef MEMORY_INFO
44 namespace nanojit
46 #ifdef FEATURE_NANOJIT
48 using namespace avmplus;
50 static uint32_t calcSaneCacheSize(uint32_t in)
52 if (in < uint32_t(NJ_LOG2_PAGE_SIZE)) return NJ_LOG2_PAGE_SIZE; // at least 1 page
53 if (in > 30) return 30; // 1GB should be enough for anyone
54 return in;
57 /**
58 * This is the main control center for creating and managing fragments.
60 Fragmento::Fragmento(AvmCore* core, uint32_t cacheSizeLog2)
61 : _frags(core->GetGC()),
62 _freePages(core->GetGC(), 1024),
63 _allocList(core->GetGC()),
64 _max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
65 _pagesGrowth(1)
67 #ifdef _DEBUG
69 // XXX These belong somewhere else, but I can't find the
70 // right location right now.
71 NanoStaticAssert((LIR_lt ^ 3) == LIR_ge);
72 NanoStaticAssert((LIR_le ^ 3) == LIR_gt);
73 NanoStaticAssert((LIR_ult ^ 3) == LIR_uge);
74 NanoStaticAssert((LIR_ule ^ 3) == LIR_ugt);
75 NanoStaticAssert((LIR_flt ^ 3) == LIR_fge);
76 NanoStaticAssert((LIR_fle ^ 3) == LIR_fgt);
78 /* Opcodes must be strictly increasing without holes. */
79 uint32_t count = 0;
80 #define OPDEF(op, number, operands) \
81 NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
82 #define OPDEF64(op, number, operands) OPDEF(op, number, operands)
83 #include "LIRopcode.tbl"
84 #undef OPDEF
85 #undef OPDEF64
87 #endif
89 #ifdef MEMORY_INFO
90 _allocList.set_meminfo_name("Fragmento._allocList");
91 #endif
92 NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed
93 _core = core;
94 GC *gc = core->GetGC();
95 _assm = NJ_NEW(gc, nanojit::Assembler)(this);
96 verbose_only( enterCounts = NJ_NEW(gc, BlockHist)(gc); )
97 verbose_only( mergeCounts = NJ_NEW(gc, BlockHist)(gc); )
100 Fragmento::~Fragmento()
102 AllocEntry *entry;
104 clearFrags();
105 _frags.clear();
106 _freePages.clear();
107 while( _allocList.size() > 0 )
109 //fprintf(stderr,"dealloc %x\n", (intptr_t)_allocList.get(_allocList.size()-1));
110 #ifdef MEMORY_INFO
111 ChangeSizeExplicit("NanoJitMem", -1, _gcHeap->Size(_allocList.last()));
112 #endif
113 entry = _allocList.removeLast();
114 _gcHeap->Free( entry->page, entry->allocSize );
115 NJ_DELETE(entry);
117 NJ_DELETE(_assm);
118 #if defined(NJ_VERBOSE)
119 NJ_DELETE(enterCounts);
120 NJ_DELETE(mergeCounts);
121 #endif
124 void Fragmento::trackPages()
126 const uint32_t pageUse = _stats.pages - _freePages.size();
127 if (_stats.maxPageUse < pageUse)
128 _stats.maxPageUse = pageUse;
131 Page* Fragmento::pageAlloc()
133 NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
134 if (!_freePages.size())
135 pagesGrow(_pagesGrowth); // try to get more mem
136 if ((_pagesGrowth << 1) < _max_pages)
137 _pagesGrowth <<= 1;
139 trackPages();
140 Page* page = 0;
141 if (_freePages.size())
142 page = _freePages.removeLast();
143 return page;
146 void Fragmento::pagesRelease(PageList& l)
148 _freePages.add(l);
149 l.clear();
150 NanoAssert(_freePages.size() <= _stats.pages);
153 void Fragmento::pageFree(Page* page)
155 _freePages.add(page);
156 NanoAssert(_freePages.size() <= _stats.pages);
159 void Fragmento::pagesGrow(int32_t count)
161 NanoAssert(!_freePages.size());
162 MMGC_MEM_TYPE("NanojitFragmentoMem");
163 Page* memory = 0;
164 GC *gc = _core->GetGC();
165 if (_stats.pages < _max_pages)
167 AllocEntry *entry;
169 // make sure we don't grow beyond _max_pages
170 if (_stats.pages + count > _max_pages)
171 count = _max_pages - _stats.pages;
172 if (count < 0)
173 count = 0;
174 // @todo nastiness that needs a fix'n
175 _gcHeap = gc->GetGCHeap();
176 NanoAssert(int32_t(NJ_PAGE_SIZE)<=_gcHeap->kNativePageSize);
178 // convert _max_pages to gc page count
179 int32_t gcpages = (count*NJ_PAGE_SIZE) / _gcHeap->kNativePageSize;
180 MMGC_MEM_TYPE("NanoJitMem");
181 memory = (Page*)_gcHeap->Alloc(gcpages);
182 #ifdef MEMORY_INFO
183 ChangeSizeExplicit("NanoJitMem", 1, _gcHeap->Size(memory));
184 #endif
185 NanoAssert((int*)memory == pageTop(memory));
186 //fprintf(stderr,"head alloc of %d at %x of %d pages using nj page size of %d\n", gcpages, (intptr_t)memory, (intptr_t)_gcHeap->kNativePageSize, NJ_PAGE_SIZE);
188 entry = NJ_NEW(gc, AllocEntry);
189 entry->page = memory;
190 entry->allocSize = gcpages;
191 _allocList.add(entry);
193 _stats.pages += count;
194 Page* page = memory;
195 while(--count >= 0)
197 //fprintf(stderr,"Fragmento::pageGrow adding page %x ; %d\n", (unsigned)page, _freePages.size()+1);
198 _freePages.add(page++);
200 trackPages();
204 // Clear the fragment. This *does not* remove the fragment from the
205 // map--the caller must take care of this.
206 void Fragmento::clearFragment(Fragment* f)
208 Fragment *peer = f->peer;
209 while (peer) {
210 Fragment *next = peer->peer;
211 peer->releaseTreeMem(this);
212 NJ_DELETE(peer);
213 peer = next;
215 f->releaseTreeMem(this);
216 NJ_DELETE(f);
219 void Fragmento::clearFrag(const void* ip)
221 if (_frags.containsKey(ip)) {
222 clearFragment(_frags.remove(ip));
226 void Fragmento::clearFrags()
228 // reclaim any dangling native pages
229 _assm->pageReset();
231 while (!_frags.isEmpty()) {
232 clearFragment(_frags.removeLast());
235 verbose_only( enterCounts->clear();)
236 verbose_only( mergeCounts->clear();)
237 verbose_only( _stats.flushes++ );
238 verbose_only( _stats.compiles = 0 );
239 //fprintf(stderr, "Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
242 Assembler* Fragmento::assm()
244 return _assm;
247 AvmCore* Fragmento::core()
249 return _core;
252 Fragment* Fragmento::getAnchor(const void* ip)
254 Fragment *f = newFrag(ip);
255 Fragment *p = _frags.get(ip);
256 if (p) {
257 f->first = p;
258 /* append at the end of the peer list */
259 Fragment* next;
260 while ((next = p->peer) != NULL)
261 p = next;
262 p->peer = f;
263 } else {
264 f->first = f;
265 _frags.put(ip, f); /* this is the first fragment */
267 f->anchor = f;
268 f->root = f;
269 f->kind = LoopTrace;
270 verbose_only( addLabel(f, "T", _frags.size()); )
271 return f;
274 Fragment* Fragmento::getLoop(const void* ip)
276 return _frags.get(ip);
279 #ifdef NJ_VERBOSE
280 void Fragmento::addLabel(Fragment *f, const char *prefix, int id)
282 char fragname[20];
283 sprintf(fragname,"%s%d", prefix, id);
284 labels->add(f, sizeof(Fragment), 0, fragname);
286 #endif
288 Fragment *Fragmento::getMerge(GuardRecord *lr, const void* ip)
290 Fragment *anchor = lr->exit->from->anchor;
291 for (Fragment *f = anchor->branches; f != 0; f = f->nextbranch) {
292 if (f->kind == MergeTrace && f->ip == ip /*&& f->calldepth == lr->calldepth*/) {
293 // found existing shared branch on anchor
294 return f;
298 Fragment *f = newBranch(anchor, ip);
299 f->root = f;
300 f->kind = MergeTrace;
301 verbose_only(
302 int mergeid = 1;
303 for (Fragment *g = anchor->branches; g != 0; g = g->nextbranch)
304 if (g->kind == MergeTrace)
305 mergeid++;
306 addLabel(f, "M", mergeid);
308 return f;
311 Fragment *Fragmento::createBranch(SideExit* exit, const void* ip)
313 Fragment *f = newBranch(exit->from, ip);
314 f->kind = BranchTrace;
315 f->treeBranches = f->root->treeBranches;
316 f->root->treeBranches = f;
317 return f;
320 #ifdef NJ_VERBOSE
321 struct fragstats {
322 int size;
323 uint64_t traceDur;
324 uint64_t interpDur;
325 int lir, lirbytes;
328 void Fragmento::dumpFragStats(Fragment *f, int level, fragstats &stat)
330 char buf[50];
331 sprintf(buf, "%*c%s", 1+level, ' ', labels->format(f));
333 int called = f->hits();
334 if (called >= 0)
335 called += f->_called;
336 else
337 called = -(1<<f->blacklistLevel) - called - 1;
339 uint32_t main = f->_native - f->_exitNative;
341 char cause[200];
342 if (f->_token && strcmp(f->_token,"loop")==0)
343 sprintf(cause,"%s %d", f->_token, f->xjumpCount);
344 else if (f->_token) {
345 if (f->eot_target) {
346 sprintf(cause,"%s %s", f->_token, labels->format(f->eot_target));
347 } else {
348 strcpy(cause, f->_token);
351 else
352 cause[0] = 0;
354 _assm->outputf("%-10s %7d %6d %6d %6d %4d %9llu %9llu %-12s %s", buf,
355 called, f->guardCount, main, f->_native, f->compileNbr, f->traceTicks/1000, f->interpTicks/1000,
356 cause, labels->format(f->ip));
358 stat.size += main;
359 stat.traceDur += f->traceTicks;
360 stat.interpDur += f->interpTicks;
361 stat.lir += f->_lir;
362 stat.lirbytes += f->_lirbytes;
364 for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
365 if (x->kind != MergeTrace)
366 dumpFragStats(x,level+1,stat);
367 for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
368 if (x->kind == MergeTrace)
369 dumpFragStats(x,level+1,stat);
371 if (f->isAnchor() && f->branches != 0) {
372 _assm->output("");
376 class DurData { public:
377 DurData(): frag(0), traceDur(0), interpDur(0), size(0) {}
378 DurData(int): frag(0), traceDur(0), interpDur(0), size(0) {}
379 DurData(Fragment* f, uint64_t td, uint64_t id, int32_t s)
380 : frag(f), traceDur(td), interpDur(id), size(s) {}
381 Fragment* frag;
382 uint64_t traceDur;
383 uint64_t interpDur;
384 int32_t size;
387 void Fragmento::dumpRatio(const char *label, BlockHist *hist)
389 int total=0, unique=0;
390 for (int i = 0, n=hist->size(); i < n; i++) {
391 const void * id = hist->keyAt(i);
392 int c = hist->get(id);
393 if (c > 1) {
394 //_assm->outputf("%d %X", c, id);
395 unique += 1;
397 else if (c == 1) {
398 unique += 1;
400 total += c;
402 _assm->outputf("%s total %d unique %d ratio %.1f%", label, total, unique, double(total)/unique);
405 void Fragmento::dumpStats()
407 bool vsave = _assm->_verbose;
408 _assm->_verbose = true;
410 _assm->output("");
411 dumpRatio("inline", enterCounts);
412 dumpRatio("merges", mergeCounts);
413 _assm->outputf("abc %d il %d (%.1fx) abc+il %d (%.1fx)",
414 _stats.abcsize, _stats.ilsize, (double)_stats.ilsize/_stats.abcsize,
415 _stats.abcsize + _stats.ilsize,
416 double(_stats.abcsize+_stats.ilsize)/_stats.abcsize);
418 int32_t count = _frags.size();
419 int32_t pages = _stats.pages;
420 int32_t maxPageUse = _stats.maxPageUse;
421 int32_t free = _freePages.size();
422 int32_t flushes = _stats.flushes;
423 if (!count)
425 _assm->outputf("No fragments in cache, %d flushes", flushes);
426 _assm->_verbose = vsave;
427 return;
430 _assm->outputf("\nFragment statistics");
431 _assm->outputf(" loop trees: %d", count);
432 _assm->outputf(" flushes: %d", flushes);
433 _assm->outputf(" compiles: %d / %d", _stats.compiles, _stats.totalCompiles);
434 _assm->outputf(" used: %dk / %dk", (pages-free)<<(NJ_LOG2_PAGE_SIZE-10), pages<<(NJ_LOG2_PAGE_SIZE-10));
435 _assm->outputf(" maxPageUse: %dk", (maxPageUse)<<(NJ_LOG2_PAGE_SIZE-10));
436 _assm->output("\ntrace calls guards main native gen T-trace T-interp");
438 avmplus::SortedMap<uint64_t, DurData, avmplus::LIST_NonGCObjects> durs(_core->gc);
439 uint64_t totaldur=0;
440 fragstats totalstat = { 0,0,0,0,0 };
441 for (int32_t i=0; i<count; i++)
443 Fragment *f = _frags.at(i);
444 while (true) {
445 fragstats stat = { 0,0,0,0,0 };
446 dumpFragStats(f, 0, stat);
447 if (stat.lir) {
448 totalstat.lir += stat.lir;
449 totalstat.lirbytes += stat.lirbytes;
451 uint64_t bothDur = stat.traceDur + stat.interpDur;
452 if (bothDur) {
453 totalstat.interpDur += stat.interpDur;
454 totalstat.traceDur += stat.traceDur;
455 totalstat.size += stat.size;
456 totaldur += bothDur;
457 while (durs.containsKey(bothDur)) bothDur++;
458 DurData d(f, stat.traceDur, stat.interpDur, stat.size);
459 durs.put(bothDur, d);
461 if (!f->peer)
462 break;
463 f = f->peer;
466 uint64_t totaltrace = totalstat.traceDur;
467 int totalsize = totalstat.size;
469 _assm->outputf("");
470 _assm->outputf("lirbytes %d / lir %d = %.1f bytes/lir", totalstat.lirbytes,
471 totalstat.lir, double(totalstat.lirbytes)/totalstat.lir);
472 _assm->outputf(" trace interp");
473 _assm->outputf("%9lld (%2d%%) %9lld (%2d%%)",
474 totaltrace/1000, int(100.0*totaltrace/totaldur),
475 (totaldur-totaltrace)/1000, int(100.0*(totaldur-totaltrace)/totaldur));
476 _assm->outputf("");
477 _assm->outputf("trace ticks trace interp size");
478 for (int32_t i=durs.size()-1; i >= 0; i--) {
479 uint64_t bothDur = durs.keyAt(i);
480 DurData d = durs.get(bothDur);
481 int size = d.size;
482 _assm->outputf("%-4s %9lld (%2d%%) %9lld (%2d%%) %9lld (%2d%%) %6d (%2d%%) %s",
483 labels->format(d.frag),
484 bothDur/1000, int(100.0*bothDur/totaldur),
485 d.traceDur/1000, int(100.0*d.traceDur/totaldur),
486 d.interpDur/1000, int(100.0*d.interpDur/totaldur),
487 size, int(100.0*size/totalsize),
488 labels->format(d.frag->ip));
491 _assm->_verbose = vsave;
495 void Fragmento::countBlock(BlockHist *hist, const void* ip)
497 int c = hist->count(ip);
498 if (_assm->_verbose)
499 _assm->outputf("++ %s %d", labels->format(ip), c);
502 void Fragmento::countIL(uint32_t il, uint32_t abc)
504 _stats.ilsize += il;
505 _stats.abcsize += abc;
508 #ifdef AVMPLUS_VERBOSE
509 void Fragmento::drawTrees(char *fileName) {
510 drawTraceTrees(this, this->_frags, this->_core, fileName);
512 #endif
513 #endif // NJ_VERBOSE
516 // Fragment
518 Fragment::Fragment(const void* _ip) : ip(_ip)
520 // Fragment is a gc object which is zero'd by the GC, no need to clear fields
523 Fragment::~Fragment()
525 onDestroy();
526 NanoAssert(_pages == 0);
529 void Fragment::resetHits()
531 blacklistLevel >>= 1;
532 _hits = 0;
535 void Fragment::blacklist()
537 blacklistLevel++;
538 _hits = -(1<<blacklistLevel);
541 Fragment *Fragmento::newFrag(const void* ip)
543 GC *gc = _core->gc;
544 Fragment *f = NJ_NEW(gc, Fragment)(ip);
545 f->blacklistLevel = 5;
546 f->recordAttempts = 0;
547 return f;
550 Fragment *Fragmento::newBranch(Fragment *from, const void* ip)
552 Fragment *f = newFrag(ip);
553 f->anchor = from->anchor;
554 f->root = from->root;
555 f->xjumpCount = from->xjumpCount;
556 /*// prepend
557 f->nextbranch = from->branches;
558 from->branches = f;*/
559 // append
560 if (!from->branches) {
561 from->branches = f;
562 } else {
563 Fragment *p = from->branches;
564 while (p->nextbranch != 0)
565 p = p->nextbranch;
566 p->nextbranch = f;
568 return f;
571 void Fragment::releaseLirBuffer()
573 lastIns = 0;
576 void Fragment::releaseCode(Fragmento* frago)
578 _code = 0;
579 while(_pages)
581 Page* next = _pages->next;
582 frago->pageFree(_pages);
583 _pages = next;
587 void Fragment::releaseTreeMem(Fragmento* frago)
589 releaseLirBuffer();
590 releaseCode(frago);
592 // now do it for all branches
593 Fragment* branch = branches;
594 while(branch)
596 Fragment* next = branch->nextbranch;
597 branch->releaseTreeMem(frago); // @todo safer here to recurse in case we support nested trees
598 NJ_DELETE(branch);
599 branch = next;
602 #endif /* FEATURE_NANOJIT */