1 /*
   2  * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_fprofiler.cpp.incl"
  27 
  28 // Static fields of FlatProfiler
  29 int               FlatProfiler::received_gc_ticks   = 0;
  30 int               FlatProfiler::vm_operation_ticks  = 0;
  31 int               FlatProfiler::threads_lock_ticks  = 0;
  32 int               FlatProfiler::class_loader_ticks  = 0;
  33 int               FlatProfiler::extra_ticks         = 0;
  34 int               FlatProfiler::blocked_ticks       = 0;
  35 int               FlatProfiler::deopt_ticks         = 0;
  36 int               FlatProfiler::unknown_ticks       = 0;
  37 int               FlatProfiler::interpreter_ticks   = 0;
  38 int               FlatProfiler::compiler_ticks      = 0;
  39 int               FlatProfiler::received_ticks      = 0;
  40 int               FlatProfiler::delivered_ticks     = 0;
  41 int*              FlatProfiler::bytecode_ticks      = NULL;
  42 int*              FlatProfiler::bytecode_ticks_stub = NULL;
  43 int               FlatProfiler::all_int_ticks       = 0;
  44 int               FlatProfiler::all_comp_ticks      = 0;
  45 int               FlatProfiler::all_ticks           = 0;
  46 bool              FlatProfiler::full_profile_flag   = false;
  47 ThreadProfiler*   FlatProfiler::thread_profiler     = NULL;
  48 ThreadProfiler*   FlatProfiler::vm_thread_profiler  = NULL;
  49 FlatProfilerTask* FlatProfiler::task                = NULL;
  50 elapsedTimer      FlatProfiler::timer;
  51 int               FlatProfiler::interval_ticks_previous = 0;
  52 IntervalData*     FlatProfiler::interval_data       = NULL;
  53 
  54 ThreadProfiler::ThreadProfiler() {
  55   // Space for the ProfilerNodes
  56   const int area_size = 1 * ProfilerNodeSize * 1024;
  57   area_bottom = AllocateHeap(area_size, "fprofiler");
  58   area_top    = area_bottom;
  59   area_limit  = area_bottom + area_size;
  60 
  61   // ProfilerNode pointer table
  62   table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size);
  63   initialize();
  64   engaged = false;
  65 }
  66 
  67 ThreadProfiler::~ThreadProfiler() {
  68   FreeHeap(area_bottom);
  69   area_bottom = NULL;
  70   area_top = NULL;
  71   area_limit = NULL;
  72   FreeHeap(table);
  73   table = NULL;
  74 }
  75 
  76 // Statics for ThreadProfiler
  77 int ThreadProfiler::table_size = 1024;
  78 
  79 int ThreadProfiler::entry(int  value) {
  80   value = (value > 0) ? value : -value;
  81   return value % table_size;
  82 }
  83 
  84 ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) {
  85   _r = r;
  86   _pp = NULL;
  87   assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds");
  88   Thread* tp = Thread::current();
  89   if (tp != NULL && tp->is_Java_thread()) {
  90     JavaThread* jtp = (JavaThread*) tp;
  91     ThreadProfiler* pp = jtp->get_thread_profiler();
  92     _pp = pp;
  93     if (pp != NULL) {
  94       pp->region_flag[r] = true;
  95     }
  96   }
  97 }
  98 
  99 ThreadProfilerMark::~ThreadProfilerMark() {
 100   if (_pp != NULL) {
 101     _pp->region_flag[_r] = false;
 102   }
 103   _pp = NULL;
 104 }
 105 
 106 // Random other statics
 107 static const int col1 = 2;      // position of output column 1
 108 static const int col2 = 11;     // position of output column 2
 109 static const int col3 = 25;     // position of output column 3
 110 static const int col4 = 55;     // position of output column 4
 111 
 112 
 113 // Used for detailed profiling of nmethods.
 114 class PCRecorder : AllStatic {
 115  private:
 116   static int*    counters;
 117   static address base;
 118   enum {
 119    bucket_size = 16
 120   };
 121   static int     index_for(address pc) { return (pc - base)/bucket_size;   }
 122   static address pc_for(int index)     { return base + (index * bucket_size); }
 123   static int     size() {
 124     return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
 125   }
 126  public:
 127   static address bucket_start_for(address pc) {
 128     if (counters == NULL) return NULL;
 129     return pc_for(index_for(pc));
 130   }
 131   static int bucket_count_for(address pc)  { return counters[index_for(pc)]; }
 132   static void init();
 133   static void record(address pc);
 134   static void print();
 135   static void print_blobs(CodeBlob* cb);
 136 };
 137 
 138 int*    PCRecorder::counters = NULL;
 139 address PCRecorder::base     = NULL;
 140 
 141 void PCRecorder::init() {
 142   MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 143   int s = size();
 144   counters = NEW_C_HEAP_ARRAY(int, s);
 145   for (int index = 0; index < s; index++) {
 146     counters[index] = 0;
 147   }
 148   base = CodeCache::first_address();
 149 }
 150 
 151 void PCRecorder::record(address pc) {
 152   if (counters == NULL) return;
 153   assert(CodeCache::contains(pc), "must be in CodeCache");
 154   counters[index_for(pc)]++;
 155 }
 156 
 157 
 158 address FlatProfiler::bucket_start_for(address pc) {
 159   return PCRecorder::bucket_start_for(pc);
 160 }
 161 
 162 int FlatProfiler::bucket_count_for(address pc) {
 163   return PCRecorder::bucket_count_for(pc);
 164 }
 165 
 166 void PCRecorder::print() {
 167   if (counters == NULL) return;
 168 
 169   tty->cr();
 170   tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold);
 171   tty->print_cr("===================================================================");
 172   tty->cr();
 173 
 174   GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20);
 175 
 176 
 177   int s;
 178   {
 179     MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 180     s = size();
 181   }
 182 
 183   for (int index = 0; index < s; index++) {
 184     int count = counters[index];
 185     if (count > ProfilerPCTickThreshold) {
 186       address pc = pc_for(index);
 187       CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
 188       if (cb != NULL && candidates->find(cb) < 0) {
 189         candidates->push(cb);
 190       }
 191     }
 192   }
 193   for (int i = 0; i < candidates->length(); i++) {
 194     print_blobs(candidates->at(i));
 195   }
 196 }
 197 
 198 void PCRecorder::print_blobs(CodeBlob* cb) {
 199   if (cb != NULL) {
 200     cb->print();
 201     if (cb->is_nmethod()) {
 202       ((nmethod*)cb)->print_code();
 203     }
 204     tty->cr();
 205   } else {
 206     tty->print_cr("stub code");
 207   }
 208 }
 209 
 210 class tick_counter {            // holds tick info for one node
 211  public:
 212   int ticks_in_code;
 213   int ticks_in_native;
 214 
 215   tick_counter()                     {  ticks_in_code = ticks_in_native = 0; }
 216   tick_counter(int code, int native) {  ticks_in_code = code; ticks_in_native = native; }
 217 
 218   int total() const {
 219     return (ticks_in_code + ticks_in_native);
 220   }
 221 
 222   void add(tick_counter* a) {
 223     ticks_in_code += a->ticks_in_code;
 224     ticks_in_native += a->ticks_in_native;
 225   }
 226 
 227   void update(TickPosition where) {
 228     switch(where) {
 229       case tp_code:     ticks_in_code++;       break;
 230       case tp_native:   ticks_in_native++;      break;
 231     }
 232   }
 233 
 234   void print_code(outputStream* st, int total_ticks) {
 235     st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
 236   }
 237 
 238   void print_native(outputStream* st) {
 239     st->print(" + %5d ", ticks_in_native);
 240   }
 241 };
 242 
 243 class ProfilerNode {
 244  private:
 245   ProfilerNode* _next;
 246  public:
 247   tick_counter ticks;
 248 
 249  public:
 250 
 251   void* operator new(size_t size, ThreadProfiler* tp);
 252   void  operator delete(void* p);
 253 
 254   ProfilerNode() {
 255     _next = NULL;
 256   }
 257 
 258   virtual ~ProfilerNode() {
 259     if (_next)
 260       delete _next;
 261   }
 262 
 263   void set_next(ProfilerNode* n) { _next = n; }
 264   ProfilerNode* next()           { return _next; }
 265 
 266   void update(TickPosition where) { ticks.update(where);}
 267   int total_ticks() { return ticks.total(); }
 268 
 269   virtual bool is_interpreted() const { return false; }
 270   virtual bool is_compiled()    const { return false; }
 271   virtual bool is_stub()        const { return false; }
 272   virtual bool is_runtime_stub() const{ return false; }
 273   virtual void oops_do(OopClosure* f) = 0;
 274 
 275   virtual bool interpreted_match(methodOop m) const { return false; }
 276   virtual bool compiled_match(methodOop m ) const { return false; }
 277   virtual bool stub_match(methodOop m, const char* name) const { return false; }
 278   virtual bool adapter_match() const { return false; }
 279   virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; }
 280   virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; }
 281 
 282   static void print_title(outputStream* st) {
 283     st->print(" + native");
 284     st->fill_to(col3);
 285     st->print("Method");
 286     st->fill_to(col4);
 287     st->cr();
 288   }
 289 
 290   static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) {
 291     t->print_code(st, total);
 292     st->fill_to(col2);
 293     t->print_native(st);
 294     st->fill_to(col3);
 295     st->print(msg);
 296     st->cr();
 297   }
 298 
 299   virtual methodOop method()         = 0;
 300 
 301   virtual void print_method_on(outputStream* st) {
 302     int limit;
 303     int i;
 304     methodOop m = method();
 305     symbolOop k = m->klass_name();
 306     // Print the class name with dots instead of slashes
 307     limit = k->utf8_length();
 308     for (i = 0 ; i < limit ; i += 1) {
 309       char c = (char) k->byte_at(i);
 310       if (c == '/') {
 311         c = '.';
 312       }
 313       st->print("%c", c);
 314     }
 315     if (limit > 0) {
 316       st->print(".");
 317     }
 318     symbolOop n = m->name();
 319     limit = n->utf8_length();
 320     for (i = 0 ; i < limit ; i += 1) {
 321       char c = (char) n->byte_at(i);
 322       st->print("%c", c);
 323     }
 324     if( Verbose ) {
 325       // Disambiguate overloaded methods
 326       symbolOop sig = m->signature();
 327       sig->print_symbol_on(st);
 328     }
 329   }
 330 
 331   virtual void print(outputStream* st, int total_ticks) {
 332     ticks.print_code(st, total_ticks);
 333     st->fill_to(col2);
 334     ticks.print_native(st);
 335     st->fill_to(col3);
 336     print_method_on(st);
 337     st->cr();
 338   }
 339 
 340   // for hashing into the table
 341   static int hash(methodOop method) {
 342       // The point here is to try to make something fairly unique
 343       // out of the fields we can read without grabbing any locks
 344       // since the method may be locked when we need the hash.
 345       return (
 346           method->code_size() ^
 347           method->max_stack() ^
 348           method->max_locals() ^
 349           method->size_of_parameters());
 350   }
 351 
 352   // for sorting
 353   static int compare(ProfilerNode** a, ProfilerNode** b) {
 354     return (*b)->total_ticks() - (*a)->total_ticks();
 355   }
 356 };
 357 
 358 void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){
 359   void* result = (void*) tp->area_top;
 360   tp->area_top += size;
 361 
 362   if (tp->area_top > tp->area_limit) {
 363     fatal("flat profiler buffer overflow");
 364   }
 365   return result;
 366 }
 367 
 368 void ProfilerNode::operator delete(void* p){
 369 }
 370 
 371 class interpretedNode : public ProfilerNode {
 372  private:
 373    methodOop _method;
 374  public:
 375    interpretedNode(methodOop method, TickPosition where) : ProfilerNode() {
 376      _method = method;
 377      update(where);
 378    }
 379 
 380    bool is_interpreted() const { return true; }
 381 
 382    bool interpreted_match(methodOop m) const {
 383       return _method == m;
 384    }
 385 
 386    void oops_do(OopClosure* f) {
 387      f->do_oop((oop*)&_method);
 388    }
 389 
 390    methodOop method() { return _method; }
 391 
 392    static void print_title(outputStream* st) {
 393      st->fill_to(col1);
 394      st->print("%11s", "Interpreted");
 395      ProfilerNode::print_title(st);
 396    }
 397 
 398    void print(outputStream* st, int total_ticks) {
 399      ProfilerNode::print(st, total_ticks);
 400    }
 401 
 402    void print_method_on(outputStream* st) {
 403      ProfilerNode::print_method_on(st);
 404      if (Verbose) method()->invocation_counter()->print_short();
 405    }
 406 };
 407 
 408 class compiledNode : public ProfilerNode {
 409  private:
 410    methodOop _method;
 411  public:
 412    compiledNode(methodOop method, TickPosition where) : ProfilerNode() {
 413      _method = method;
 414      update(where);
 415   }
 416   bool is_compiled()    const { return true; }
 417 
 418   bool compiled_match(methodOop m) const {
 419     return _method == m;
 420   }
 421 
 422   methodOop method()         { return _method; }
 423 
 424   void oops_do(OopClosure* f) {
 425     f->do_oop((oop*)&_method);
 426   }
 427 
 428   static void print_title(outputStream* st) {
 429     st->fill_to(col1);
 430     st->print("%11s", "Compiled");
 431     ProfilerNode::print_title(st);
 432   }
 433 
 434   void print(outputStream* st, int total_ticks) {
 435     ProfilerNode::print(st, total_ticks);
 436   }
 437 
 438   void print_method_on(outputStream* st) {
 439     ProfilerNode::print_method_on(st);
 440   }
 441 };
 442 
 443 class stubNode : public ProfilerNode {
 444  private:
 445   methodOop _method;
 446   const char* _symbol;   // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string
 447  public:
 448    stubNode(methodOop method, const char* name, TickPosition where) : ProfilerNode() {
 449      _method = method;
 450      _symbol = name;
 451      update(where);
 452    }
 453 
 454    bool is_stub() const { return true; }
 455 
 456    bool stub_match(methodOop m, const char* name) const {
 457      return (_method == m) && (_symbol == name);
 458    }
 459 
 460    void oops_do(OopClosure* f) {
 461      f->do_oop((oop*)&_method);
 462    }
 463 
 464    methodOop method() { return _method; }
 465 
 466    static void print_title(outputStream* st) {
 467      st->fill_to(col1);
 468      st->print("%11s", "Stub");
 469      ProfilerNode::print_title(st);
 470    }
 471 
 472    void print(outputStream* st, int total_ticks) {
 473      ProfilerNode::print(st, total_ticks);
 474    }
 475 
 476    void print_method_on(outputStream* st) {
 477      ProfilerNode::print_method_on(st);
 478      print_symbol_on(st);
 479    }
 480 
 481   void print_symbol_on(outputStream* st) {
 482     if(_symbol) {
 483       st->print("  (%s)", _symbol);
 484     }
 485   }
 486 };
 487 
 488 class adapterNode : public ProfilerNode {
 489  public:
 490    adapterNode(TickPosition where) : ProfilerNode() {
 491      update(where);
 492   }
 493   bool is_compiled()    const { return true; }
 494 
 495   bool adapter_match() const { return true; }
 496 
 497   methodOop method()         { return NULL; }
 498 
 499   void oops_do(OopClosure* f) {
 500     ;
 501   }
 502 
 503   void print(outputStream* st, int total_ticks) {
 504     ProfilerNode::print(st, total_ticks);
 505   }
 506 
 507   void print_method_on(outputStream* st) {
 508     st->print("%s", "adapters");
 509   }
 510 };
 511 
 512 class runtimeStubNode : public ProfilerNode {
 513  private:
 514    const CodeBlob* _stub;
 515   const char* _symbol;     // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
 516  public:
 517    runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub),  _symbol(name) {
 518      assert(stub->is_runtime_stub(), "wrong code blob");
 519      update(where);
 520    }
 521 
 522   bool is_runtime_stub() const { return true; }
 523 
 524   bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
 525     assert(stub->is_runtime_stub(), "wrong code blob");
 526     return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
 527             (_symbol == name);
 528   }
 529 
 530   methodOop method() { return NULL; }
 531 
 532   static void print_title(outputStream* st) {
 533     st->fill_to(col1);
 534     st->print("%11s", "Runtime stub");
 535     ProfilerNode::print_title(st);
 536   }
 537 
 538   void oops_do(OopClosure* f) {
 539     ;
 540   }
 541 
 542   void print(outputStream* st, int total_ticks) {
 543     ProfilerNode::print(st, total_ticks);
 544   }
 545 
 546   void print_method_on(outputStream* st) {
 547     st->print("%s", ((RuntimeStub*)_stub)->name());
 548     print_symbol_on(st);
 549   }
 550 
 551   void print_symbol_on(outputStream* st) {
 552     if(_symbol) {
 553       st->print("  (%s)", _symbol);
 554     }
 555   }
 556 };
 557 
 558 
 559 class unknown_compiledNode : public ProfilerNode {
 560  const char *_name;
 561  public:
 562    unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
 563      if ( cb->is_buffer_blob() )
 564        _name = ((BufferBlob*)cb)->name();
 565      else
 566        _name = ((SingletonBlob*)cb)->name();
 567      update(where);
 568   }
 569   bool is_compiled()    const { return true; }
 570 
 571   bool unknown_compiled_match(const CodeBlob* cb) const {
 572      if ( cb->is_buffer_blob() )
 573        return !strcmp(((BufferBlob*)cb)->name(), _name);
 574      else
 575        return !strcmp(((SingletonBlob*)cb)->name(), _name);
 576   }
 577 
 578   methodOop method()         { return NULL; }
 579 
 580   void oops_do(OopClosure* f) {
 581     ;
 582   }
 583 
 584   void print(outputStream* st, int total_ticks) {
 585     ProfilerNode::print(st, total_ticks);
 586   }
 587 
 588   void print_method_on(outputStream* st) {
 589     st->print("%s", _name);
 590   }
 591 };
 592 
 593 class vmNode : public ProfilerNode {
 594  private:
 595   const char* _name; // "optional" name obtained by os means such as dll lookup
 596  public:
 597   vmNode(const TickPosition where) : ProfilerNode() {
 598     _name = NULL;
 599     update(where);
 600   }
 601 
 602   vmNode(const char* name, const TickPosition where) : ProfilerNode() {
 603     _name = name;
 604     update(where);
 605   }
 606 
 607   const char *name()    const { return _name; }
 608   bool is_compiled()    const { return true; }
 609 
 610   bool vm_match(const char* name) const { return strcmp(name, _name) == 0; }
 611 
 612   methodOop method()          { return NULL; }
 613 
 614   static int hash(const char* name){
 615     // Compute a simple hash
 616     const char* cp = name;
 617     int h = 0;
 618 
 619     if(name != NULL){
 620       while(*cp != '\0'){
 621         h = (h << 1) ^ *cp;
 622         cp++;
 623       }
 624     }
 625     return h;
 626   }
 627 
 628   void oops_do(OopClosure* f) {
 629     ;
 630   }
 631 
 632   void print(outputStream* st, int total_ticks) {
 633     ProfilerNode::print(st, total_ticks);
 634   }
 635 
 636   void print_method_on(outputStream* st) {
 637     if(_name==NULL){
 638       st->print("%s", "unknown code");
 639     }
 640     else {
 641       st->print("%s", _name);
 642     }
 643   }
 644 };
 645 
 646 void ThreadProfiler::interpreted_update(methodOop method, TickPosition where) {
 647   int index = entry(ProfilerNode::hash(method));
 648   if (!table[index]) {
 649     table[index] = new (this) interpretedNode(method, where);
 650   } else {
 651     ProfilerNode* prev = table[index];
 652     for(ProfilerNode* node = prev; node; node = node->next()) {
 653       if (node->interpreted_match(method)) {
 654         node->update(where);
 655         return;
 656       }
 657       prev = node;
 658     }
 659     prev->set_next(new (this) interpretedNode(method, where));
 660   }
 661 }
 662 
 663 void ThreadProfiler::compiled_update(methodOop method, TickPosition where) {
 664   int index = entry(ProfilerNode::hash(method));
 665   if (!table[index]) {
 666     table[index] = new (this) compiledNode(method, where);
 667   } else {
 668     ProfilerNode* prev = table[index];
 669     for(ProfilerNode* node = prev; node; node = node->next()) {
 670       if (node->compiled_match(method)) {
 671         node->update(where);
 672         return;
 673       }
 674       prev = node;
 675     }
 676     prev->set_next(new (this) compiledNode(method, where));
 677   }
 678 }
 679 
 680 void ThreadProfiler::stub_update(methodOop method, const char* name, TickPosition where) {
 681   int index = entry(ProfilerNode::hash(method));
 682   if (!table[index]) {
 683     table[index] = new (this) stubNode(method, name, where);
 684   } else {
 685     ProfilerNode* prev = table[index];
 686     for(ProfilerNode* node = prev; node; node = node->next()) {
 687       if (node->stub_match(method, name)) {
 688         node->update(where);
 689         return;
 690       }
 691       prev = node;
 692     }
 693     prev->set_next(new (this) stubNode(method, name, where));
 694   }
 695 }
 696 
 697 void ThreadProfiler::adapter_update(TickPosition where) {
 698   int index = 0;
 699   if (!table[index]) {
 700     table[index] = new (this) adapterNode(where);
 701   } else {
 702     ProfilerNode* prev = table[index];
 703     for(ProfilerNode* node = prev; node; node = node->next()) {
 704       if (node->adapter_match()) {
 705         node->update(where);
 706         return;
 707       }
 708       prev = node;
 709     }
 710     prev->set_next(new (this) adapterNode(where));
 711   }
 712 }
 713 
 714 void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
 715   int index = 0;
 716   if (!table[index]) {
 717     table[index] = new (this) runtimeStubNode(stub, name, where);
 718   } else {
 719     ProfilerNode* prev = table[index];
 720     for(ProfilerNode* node = prev; node; node = node->next()) {
 721       if (node->runtimeStub_match(stub, name)) {
 722         node->update(where);
 723         return;
 724       }
 725       prev = node;
 726     }
 727     prev->set_next(new (this) runtimeStubNode(stub, name, where));
 728   }
 729 }
 730 
 731 
 732 void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) {
 733   int index = 0;
 734   if (!table[index]) {
 735     table[index] = new (this) unknown_compiledNode(cb, where);
 736   } else {
 737     ProfilerNode* prev = table[index];
 738     for(ProfilerNode* node = prev; node; node = node->next()) {
 739       if (node->unknown_compiled_match(cb)) {
 740         node->update(where);
 741         return;
 742       }
 743       prev = node;
 744     }
 745     prev->set_next(new (this) unknown_compiledNode(cb, where));
 746   }
 747 }
 748 
 749 void ThreadProfiler::vm_update(TickPosition where) {
 750   vm_update(NULL, where);
 751 }
 752 
 753 void ThreadProfiler::vm_update(const char* name, TickPosition where) {
 754   int index = entry(vmNode::hash(name));
 755   assert(index >= 0, "Must be positive");
 756   // Note that we call strdup below since the symbol may be resource allocated
 757   if (!table[index]) {
 758     table[index] = new (this) vmNode(os::strdup(name), where);
 759   } else {
 760     ProfilerNode* prev = table[index];
 761     for(ProfilerNode* node = prev; node; node = node->next()) {
 762       if (((vmNode *)node)->vm_match(name)) {
 763         node->update(where);
 764         return;
 765       }
 766       prev = node;
 767     }
 768     prev->set_next(new (this) vmNode(os::strdup(name), where));
 769   }
 770 }
 771 
 772 
 773 class FlatProfilerTask : public PeriodicTask {
 774 public:
 775   FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
 776   void task();
 777 };
 778 
 779 void FlatProfiler::record_vm_operation() {
 780   if (Universe::heap()->is_gc_active()) {
 781     FlatProfiler::received_gc_ticks += 1;
 782     return;
 783   }
 784 
 785   if (DeoptimizationMarker::is_active()) {
 786     FlatProfiler::deopt_ticks += 1;
 787     return;
 788   }
 789 
 790   FlatProfiler::vm_operation_ticks += 1;
 791 }
 792 
 793 void FlatProfiler::record_vm_tick() {
 794   // Profile the VM Thread itself if needed
 795   // This is done without getting the Threads_lock and we can go deep
 796   // inside Safepoint, etc.
 797   if( ProfileVM  ) {
 798     ResourceMark rm;
 799     ExtendedPC epc;
 800     const char *name = NULL;
 801     char buf[256];
 802     buf[0] = '\0';
 803 
 804     vm_thread_profiler->inc_thread_ticks();
 805 
 806     // Get a snapshot of a current VMThread pc (and leave it running!)
 807     // The call may fail if, for instance the VM thread is interrupted while
 808     // holding the Interrupt_lock or for other reasons.
 809     epc = os::get_thread_pc(VMThread::vm_thread());
 810     if(epc.pc() != NULL) {
 811       if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
 812          name = buf;
 813       }
 814     }
 815     if (name != NULL) {
 816       vm_thread_profiler->vm_update(name, tp_native);
 817     }
 818   }
 819 }
 820 
 821 void FlatProfiler::record_thread_ticks() {
 822 
 823   int maxthreads, suspendedthreadcount;
 824   JavaThread** threadsList;
 825   bool interval_expired = false;
 826 
 827   if (ProfileIntervals &&
 828       (FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
 829     interval_expired = true;
 830     interval_ticks_previous = FlatProfiler::received_ticks;
 831   }
 832 
 833   // Try not to wait for the Threads_lock
 834   if (Threads_lock->try_lock()) {
 835     {  // Threads_lock scope
 836       maxthreads = Threads::number_of_threads();
 837       threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads);
 838       suspendedthreadcount = 0;
 839       for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
 840         if (tp->is_Compiler_thread()) {
 841           // Only record ticks for active compiler threads
 842           CompilerThread* cthread = (CompilerThread*)tp;
 843           if (cthread->task() != NULL) {
 844             // The compiler is active.  If we need to access any of the fields
 845             // of the compiler task we should suspend the CompilerThread first.
 846             FlatProfiler::compiler_ticks += 1;
 847             continue;
 848           }
 849         }
 850 
 851         // First externally suspend all threads by marking each for
 852         // external suspension - so it will stop at its next transition
 853         // Then do a safepoint
 854         ThreadProfiler* pp = tp->get_thread_profiler();
 855         if (pp != NULL && pp->engaged) {
 856           MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag);
 857           if (!tp->is_external_suspend() && !tp->is_exiting()) {
 858             tp->set_external_suspend();
 859             threadsList[suspendedthreadcount++] = tp;
 860           }
 861         }
 862       }
 863       Threads_lock->unlock();
 864     }
 865     // Suspend each thread. This call should just return
 866     // for any threads that have already self-suspended
 867     // Net result should be one safepoint
 868     for (int j = 0; j < suspendedthreadcount; j++) {
 869       JavaThread *tp = threadsList[j];
 870       if (tp) {
 871         tp->java_suspend();
 872       }
 873     }
 874 
 875     // We are responsible for resuming any thread on this list
 876     for (int i = 0; i < suspendedthreadcount; i++) {
 877       JavaThread *tp = threadsList[i];
 878       if (tp) {
 879         ThreadProfiler* pp = tp->get_thread_profiler();
 880         if (pp != NULL && pp->engaged) {
 881           HandleMark hm;
 882           FlatProfiler::delivered_ticks += 1;
 883           if (interval_expired) {
 884           FlatProfiler::interval_record_thread(pp);
 885           }
 886           // This is the place where we check to see if a user thread is
 887           // blocked waiting for compilation.
 888           if (tp->blocked_on_compilation()) {
 889             pp->compiler_ticks += 1;
 890             pp->interval_data_ref()->inc_compiling();
 891           } else {
 892             pp->record_tick(tp);
 893           }
 894         }
 895         MutexLocker ml(Threads_lock);
 896         tp->java_resume();
 897       }
 898     }
 899     if (interval_expired) {
 900       FlatProfiler::interval_print();
 901       FlatProfiler::interval_reset();
 902     }
 903   } else {
 904     // Couldn't get the threads lock, just record that rather than blocking
 905     FlatProfiler::threads_lock_ticks += 1;
 906   }
 907 
 908 }
 909 
 910 void FlatProfilerTask::task() {
 911   FlatProfiler::received_ticks += 1;
 912 
 913   if (ProfileVM) {
 914     FlatProfiler::record_vm_tick();
 915   }
 916 
 917   VM_Operation* op = VMThread::vm_operation();
 918   if (op != NULL) {
 919     FlatProfiler::record_vm_operation();
 920     if (SafepointSynchronize::is_at_safepoint()) {
 921       return;
 922     }
 923   }
 924   FlatProfiler::record_thread_ticks();
 925 }
 926 
 927 void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) {
 928   FlatProfiler::all_int_ticks++;
 929   if (!FlatProfiler::full_profile()) {
 930     return;
 931   }
 932 
 933   if (!fr.is_interpreted_frame_valid(thread)) {
 934     // tick came at a bad time
 935     interpreter_ticks += 1;
 936     FlatProfiler::interpreter_ticks += 1;
 937     return;
 938   }
 939 
 940   // The frame has been fully validated so we can trust the method and bci
 941 
 942   methodOop method = *fr.interpreter_frame_method_addr();
 943 
 944   interpreted_update(method, where);
 945 
 946   // update byte code table
 947   InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
 948   if (desc != NULL && desc->bytecode() >= 0) {
 949     ticks[desc->bytecode()]++;
 950   }
 951 }
 952 
 953 void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) {
 954   const char *name = NULL;
 955   TickPosition localwhere = where;
 956 
 957   FlatProfiler::all_comp_ticks++;
 958   if (!FlatProfiler::full_profile()) return;
 959 
 960   CodeBlob* cb = fr.cb();
 961 
 962 // For runtime stubs, record as native rather than as compiled
 963    if (cb->is_runtime_stub()) {
 964         RegisterMap map(thread, false);
 965         fr = fr.sender(&map);
 966         cb = fr.cb();
 967         localwhere = tp_native;
 968   }
 969   methodOop method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
 970                                           (methodOop)NULL;
 971 
 972   if (method == NULL) {
 973     if (cb->is_runtime_stub())
 974       runtime_stub_update(cb, name, localwhere);
 975     else
 976       unknown_compiled_update(cb, localwhere);
 977   }
 978   else {
 979     if (method->is_native()) {
 980       stub_update(method, name, localwhere);
 981     } else {
 982       compiled_update(method, localwhere);
 983     }
 984   }
 985 }
 986 
 987 extern "C" void find(int x);
 988 
 989 
 990 void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) {
 991   // The tick happened in real code -> non VM code
 992   if (fr.is_interpreted_frame()) {
 993     interval_data_ref()->inc_interpreted();
 994     record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks);
 995     return;
 996   }
 997 
 998   if (CodeCache::contains(fr.pc())) {
 999     interval_data_ref()->inc_compiled();
1000     PCRecorder::record(fr.pc());
1001     record_compiled_tick(thread, fr, tp_code);
1002     return;
1003   }
1004 
1005   if (VtableStubs::stub_containing(fr.pc()) != NULL) {
1006     unknown_ticks_array[ut_vtable_stubs] += 1;
1007     return;
1008   }
1009 
1010   frame caller = fr.profile_find_Java_sender_frame(thread);
1011 
1012   if (caller.sp() != NULL && caller.pc() != NULL) {
1013     record_tick_for_calling_frame(thread, caller);
1014     return;
1015   }
1016 
1017   unknown_ticks_array[ut_running_frame] += 1;
1018   FlatProfiler::unknown_ticks += 1;
1019 }
1020 
1021 void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) {
1022   // The tick happened in VM code
1023   interval_data_ref()->inc_native();
1024   if (fr.is_interpreted_frame()) {
1025     record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
1026     return;
1027   }
1028   if (CodeCache::contains(fr.pc())) {
1029     record_compiled_tick(thread, fr, tp_native);
1030     return;
1031   }
1032 
1033   frame caller = fr.profile_find_Java_sender_frame(thread);
1034 
1035   if (caller.sp() != NULL && caller.pc() != NULL) {
1036     record_tick_for_calling_frame(thread, caller);
1037     return;
1038   }
1039 
1040   unknown_ticks_array[ut_calling_frame] += 1;
1041   FlatProfiler::unknown_ticks += 1;
1042 }
1043 
1044 void ThreadProfiler::record_tick(JavaThread* thread) {
1045   FlatProfiler::all_ticks++;
1046   thread_ticks += 1;
1047 
1048   // Here's another way to track global state changes.
1049   // When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader
1050   // and we check that here.
1051   // This is more direct, and more than one thread can be in the class loader at a time,
1052   // but it does mean the class loader has to know about the profiler.
1053   if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
1054     class_loader_ticks += 1;
1055     FlatProfiler::class_loader_ticks += 1;
1056     return;
1057   } else if (region_flag[ThreadProfilerMark::extraRegion]) {
1058     extra_ticks += 1;
1059     FlatProfiler::extra_ticks += 1;
1060     return;
1061   }
1062   // Note that the WatcherThread can now stop for safepoints
1063   uint32_t debug_bits = 0;
1064   if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount,
1065       SuspendRetryDelay, &debug_bits)) {
1066     unknown_ticks_array[ut_unknown_thread_state] += 1;
1067     FlatProfiler::unknown_ticks += 1;
1068     return;
1069   }
1070 
1071   frame fr;
1072 
1073   switch (thread->thread_state()) {
1074   case _thread_in_native:
1075   case _thread_in_native_trans:
1076   case _thread_in_vm:
1077   case _thread_in_vm_trans:
1078     if (thread->profile_last_Java_frame(&fr)) {
1079       if (fr.is_runtime_frame()) {
1080         RegisterMap map(thread, false);
1081         fr = fr.sender(&map);
1082       }
1083       record_tick_for_calling_frame(thread, fr);
1084     } else {
1085       unknown_ticks_array[ut_no_last_Java_frame] += 1;
1086       FlatProfiler::unknown_ticks += 1;
1087     }
1088     break;
1089   // handle_special_runtime_exit_condition self-suspends threads in Java
1090   case _thread_in_Java:
1091   case _thread_in_Java_trans:
1092     if (thread->profile_last_Java_frame(&fr)) {
1093       if (fr.is_safepoint_blob_frame()) {
1094         RegisterMap map(thread, false);
1095         fr = fr.sender(&map);
1096       }
1097       record_tick_for_running_frame(thread, fr);
1098     } else {
1099       unknown_ticks_array[ut_no_last_Java_frame] += 1;
1100       FlatProfiler::unknown_ticks += 1;
1101     }
1102     break;
1103   case _thread_blocked:
1104   case _thread_blocked_trans:
1105     if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) {
1106         if (thread->profile_last_Java_frame(&fr)) {
1107           if (fr.is_safepoint_blob_frame()) {
1108             RegisterMap map(thread, false);
1109             fr = fr.sender(&map);
1110             record_tick_for_running_frame(thread, fr);
1111           } else {
1112             record_tick_for_calling_frame(thread, fr);
1113           }
1114         } else {
1115           unknown_ticks_array[ut_no_last_Java_frame] += 1;
1116           FlatProfiler::unknown_ticks += 1;
1117         }
1118     } else {
1119           blocked_ticks += 1;
1120           FlatProfiler::blocked_ticks += 1;
1121     }
1122     break;
1123   case _thread_uninitialized:
1124   case _thread_new:
1125   // not used, included for completeness
1126   case _thread_new_trans:
1127      unknown_ticks_array[ut_no_last_Java_frame] += 1;
1128      FlatProfiler::unknown_ticks += 1;
1129      break;
1130   default:
1131     unknown_ticks_array[ut_unknown_thread_state] += 1;
1132     FlatProfiler::unknown_ticks += 1;
1133     break;
1134   }
1135   return;
1136 }
1137 
1138 void ThreadProfiler::engage() {
1139   engaged = true;
1140   timer.start();
1141 }
1142 
1143 void ThreadProfiler::disengage() {
1144   engaged = false;
1145   timer.stop();
1146 }
1147 
1148 void ThreadProfiler::initialize() {
1149   for (int index = 0; index < table_size; index++) {
1150     table[index] = NULL;
1151   }
1152   thread_ticks = 0;
1153   blocked_ticks = 0;
1154   compiler_ticks = 0;
1155   interpreter_ticks = 0;
1156   for (int ut = 0; ut < ut_end; ut += 1) {
1157     unknown_ticks_array[ut] = 0;
1158   }
1159   region_flag[ThreadProfilerMark::classLoaderRegion] = false;
1160   class_loader_ticks = 0;
1161   region_flag[ThreadProfilerMark::extraRegion] = false;
1162   extra_ticks = 0;
1163   timer.start();
1164   interval_data_ref()->reset();
1165 }
1166 
1167 void ThreadProfiler::reset() {
1168   timer.stop();
1169   if (table != NULL) {
1170     for (int index = 0; index < table_size; index++) {
1171       ProfilerNode* n = table[index];
1172       if (n != NULL) {
1173         delete n;
1174       }
1175     }
1176   }
1177   initialize();
1178 }
1179 
1180 void FlatProfiler::allocate_table() {
1181   { // Bytecode table
1182     bytecode_ticks      = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes);
1183     bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes);
1184     for(int index = 0; index < Bytecodes::number_of_codes; index++) {
1185       bytecode_ticks[index]      = 0;
1186       bytecode_ticks_stub[index] = 0;
1187     }
1188   }
1189 
1190   if (ProfilerRecordPC) PCRecorder::init();
1191 
1192   interval_data         = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size);
1193   FlatProfiler::interval_reset();
1194 }
1195 
1196 void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) {
1197   full_profile_flag = fullProfile;
1198   if (bytecode_ticks == NULL) {
1199     allocate_table();
1200   }
1201   if(ProfileVM && (vm_thread_profiler == NULL)){
1202     vm_thread_profiler = new ThreadProfiler();
1203   }
1204   if (task == NULL) {
1205     task = new FlatProfilerTask(WatcherThread::delay_interval);
1206     task->enroll();
1207   }
1208   timer.start();
1209   if (mainThread != NULL) {
1210     // When mainThread was created, it might not have a ThreadProfiler
1211     ThreadProfiler* pp = mainThread->get_thread_profiler();
1212     if (pp == NULL) {
1213       mainThread->set_thread_profiler(new ThreadProfiler());
1214     } else {
1215       pp->reset();
1216     }
1217     mainThread->get_thread_profiler()->engage();
1218   }
1219   // This is where we would assign thread_profiler
1220   // if we wanted only one thread_profiler for all threads.
1221   thread_profiler = NULL;
1222 }
1223 
1224 void FlatProfiler::disengage() {
1225   if (!task) {
1226     return;
1227   }
1228   timer.stop();
1229   task->disenroll();
1230   delete task;
1231   task = NULL;
1232   if (thread_profiler != NULL) {
1233     thread_profiler->disengage();
1234   } else {
1235     MutexLocker tl(Threads_lock);
1236     for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
1237       ThreadProfiler* pp = tp->get_thread_profiler();
1238       if (pp != NULL) {
1239         pp->disengage();
1240       }
1241     }
1242   }
1243 }
1244 
1245 void FlatProfiler::reset() {
1246   if (task) {
1247     disengage();
1248   }
1249 
1250   class_loader_ticks = 0;
1251   extra_ticks        = 0;
1252   received_gc_ticks  = 0;
1253   vm_operation_ticks = 0;
1254   compiler_ticks     = 0;
1255   deopt_ticks        = 0;
1256   interpreter_ticks  = 0;
1257   blocked_ticks      = 0;
1258   unknown_ticks      = 0;
1259   received_ticks     = 0;
1260   delivered_ticks    = 0;
1261   timer.stop();
1262 }
1263 
1264 bool FlatProfiler::is_active() {
1265   return task != NULL;
1266 }
1267 
1268 void FlatProfiler::print_byte_code_statistics() {
1269   GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
1270 
1271   tty->print_cr(" Bytecode ticks:");
1272   for (int index = 0; index < Bytecodes::number_of_codes; index++) {
1273     if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
1274       tty->print_cr("  %4d %4d = %s",
1275         FlatProfiler::bytecode_ticks[index],
1276         FlatProfiler::bytecode_ticks_stub[index],
1277         Bytecodes::name( (Bytecodes::Code) index));
1278     }
1279   }
1280   tty->cr();
1281 }
1282 
1283 void print_ticks(const char* title, int ticks, int total) {
1284   if (ticks > 0) {
1285     tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks);
1286     tty->fill_to(col3);
1287     tty->print("%s", title);
1288     tty->cr();
1289   }
1290 }
1291 
1292 void ThreadProfiler::print(const char* thread_name) {
1293   ResourceMark rm;
1294   MutexLocker ppl(ProfilePrint_lock);
1295   int index = 0; // Declared outside for loops for portability
1296 
1297   if (table == NULL) {
1298     return;
1299   }
1300 
1301   if (thread_ticks <= 0) {
1302     return;
1303   }
1304 
1305   const char* title = "too soon to tell";
1306   double secs = timer.seconds();
1307 
1308   GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
1309   for(index = 0; index < table_size; index++) {
1310     for(ProfilerNode* node = table[index]; node; node = node->next())
1311       array->append(node);
1312   }
1313 
1314   array->sort(&ProfilerNode::compare);
1315 
1316   // compute total (sanity check)
1317   int active =
1318     class_loader_ticks +
1319     compiler_ticks +
1320     interpreter_ticks +
1321     unknown_ticks();
1322   for (index = 0; index < array->length(); index++) {
1323     active += array->at(index)->ticks.total();
1324   }
1325   int total = active + blocked_ticks;
1326 
1327   tty->cr();
1328   tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
1329   if (total != thread_ticks) {
1330     print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
1331   }
1332   tty->cr();
1333 
1334   // print interpreted methods
1335   tick_counter interpreted_ticks;
1336   bool has_interpreted_ticks = false;
1337   int print_count = 0;
1338   for (index = 0; index < array->length(); index++) {
1339     ProfilerNode* n = array->at(index);
1340     if (n->is_interpreted()) {
1341       interpreted_ticks.add(&n->ticks);
1342       if (!has_interpreted_ticks) {
1343         interpretedNode::print_title(tty);
1344         has_interpreted_ticks = true;
1345       }
1346       if (print_count++ < ProfilerNumberOfInterpretedMethods) {
1347         n->print(tty, active);
1348       }
1349     }
1350   }
1351   if (has_interpreted_ticks) {
1352     if (print_count <= ProfilerNumberOfInterpretedMethods) {
1353       title = "Total interpreted";
1354     } else {
1355       title = "Total interpreted (including elided)";
1356     }
1357     interpretedNode::print_total(tty, &interpreted_ticks, active, title);
1358     tty->cr();
1359   }
1360 
1361   // print compiled methods
1362   tick_counter compiled_ticks;
1363   bool has_compiled_ticks = false;
1364   print_count = 0;
1365   for (index = 0; index < array->length(); index++) {
1366     ProfilerNode* n = array->at(index);
1367     if (n->is_compiled()) {
1368       compiled_ticks.add(&n->ticks);
1369       if (!has_compiled_ticks) {
1370         compiledNode::print_title(tty);
1371         has_compiled_ticks = true;
1372       }
1373       if (print_count++ < ProfilerNumberOfCompiledMethods) {
1374         n->print(tty, active);
1375       }
1376     }
1377   }
1378   if (has_compiled_ticks) {
1379     if (print_count <= ProfilerNumberOfCompiledMethods) {
1380       title = "Total compiled";
1381     } else {
1382       title = "Total compiled (including elided)";
1383     }
1384     compiledNode::print_total(tty, &compiled_ticks, active, title);
1385     tty->cr();
1386   }
1387 
1388   // print stub methods
1389   tick_counter stub_ticks;
1390   bool has_stub_ticks = false;
1391   print_count = 0;
1392   for (index = 0; index < array->length(); index++) {
1393     ProfilerNode* n = array->at(index);
1394     if (n->is_stub()) {
1395       stub_ticks.add(&n->ticks);
1396       if (!has_stub_ticks) {
1397         stubNode::print_title(tty);
1398         has_stub_ticks = true;
1399       }
1400       if (print_count++ < ProfilerNumberOfStubMethods) {
1401         n->print(tty, active);
1402       }
1403     }
1404   }
1405   if (has_stub_ticks) {
1406     if (print_count <= ProfilerNumberOfStubMethods) {
1407       title = "Total stub";
1408     } else {
1409       title = "Total stub (including elided)";
1410     }
1411     stubNode::print_total(tty, &stub_ticks, active, title);
1412     tty->cr();
1413   }
1414 
1415   // print runtime stubs
1416   tick_counter runtime_stub_ticks;
1417   bool has_runtime_stub_ticks = false;
1418   print_count = 0;
1419   for (index = 0; index < array->length(); index++) {
1420     ProfilerNode* n = array->at(index);
1421     if (n->is_runtime_stub()) {
1422       runtime_stub_ticks.add(&n->ticks);
1423       if (!has_runtime_stub_ticks) {
1424         runtimeStubNode::print_title(tty);
1425         has_runtime_stub_ticks = true;
1426       }
1427       if (print_count++ < ProfilerNumberOfRuntimeStubNodes) {
1428         n->print(tty, active);
1429       }
1430     }
1431   }
1432   if (has_runtime_stub_ticks) {
1433     if (print_count <= ProfilerNumberOfRuntimeStubNodes) {
1434       title = "Total runtime stubs";
1435     } else {
1436       title = "Total runtime stubs (including elided)";
1437     }
1438     runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title);
1439     tty->cr();
1440   }
1441 
1442   if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) {
1443     tty->fill_to(col1);
1444     tty->print_cr("Thread-local ticks:");
1445     print_ticks("Blocked (of total)",  blocked_ticks,      total);
1446     print_ticks("Class loader",        class_loader_ticks, active);
1447     print_ticks("Extra",               extra_ticks,        active);
1448     print_ticks("Interpreter",         interpreter_ticks,  active);
1449     print_ticks("Compilation",         compiler_ticks,     active);
1450     print_ticks("Unknown: vtable stubs",  unknown_ticks_array[ut_vtable_stubs],         active);
1451     print_ticks("Unknown: null method",   unknown_ticks_array[ut_null_method],          active);
1452     print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame],        active);
1453     print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame],        active);
1454     print_ticks("Unknown: no pc",         unknown_ticks_array[ut_no_pc],                active);
1455     print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame],   active);
1456     print_ticks("Unknown: thread_state",  unknown_ticks_array[ut_unknown_thread_state], active);
1457     tty->cr();
1458   }
1459 
1460   if (WizardMode) {
1461     tty->print_cr("Node area used: %dKb", (area_top - area_bottom) / 1024);
1462   }
1463   reset();
1464 }
1465 
1466 /*
1467 ThreadProfiler::print_unknown(){
1468   if (table == NULL) {
1469     return;
1470   }
1471 
1472   if (thread_ticks <= 0) {
1473     return;
1474   }
1475 } */
1476 
1477 void FlatProfiler::print(int unused) {
1478   ResourceMark rm;
1479   if (thread_profiler != NULL) {
1480     thread_profiler->print("All threads");
1481   } else {
1482     MutexLocker tl(Threads_lock);
1483     for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
1484       ThreadProfiler* pp = tp->get_thread_profiler();
1485       if (pp != NULL) {
1486         pp->print(tp->get_thread_name());
1487       }
1488     }
1489   }
1490 
1491   if (ProfilerPrintByteCodeStatistics) {
1492     print_byte_code_statistics();
1493   }
1494 
1495   if (non_method_ticks() > 0) {
1496     tty->cr();
1497     tty->print_cr("Global summary of %3.2f seconds:", timer.seconds());
1498     print_ticks("Received ticks",      received_ticks,     received_ticks);
1499     print_ticks("Received GC ticks",   received_gc_ticks,  received_ticks);
1500     print_ticks("Compilation",         compiler_ticks,     received_ticks);
1501     print_ticks("Deoptimization",      deopt_ticks,        received_ticks);
1502     print_ticks("Other VM operations", vm_operation_ticks, received_ticks);
1503 #ifndef PRODUCT
1504     print_ticks("Blocked ticks",       blocked_ticks,      received_ticks);
1505     print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks);
1506     print_ticks("Delivered ticks",     delivered_ticks,    received_ticks);
1507     print_ticks("All ticks",           all_ticks,          received_ticks);
1508 #endif
1509     print_ticks("Class loader",        class_loader_ticks, received_ticks);
1510     print_ticks("Extra       ",        extra_ticks,        received_ticks);
1511     print_ticks("Interpreter",         interpreter_ticks,  received_ticks);
1512     print_ticks("Unknown code",        unknown_ticks,      received_ticks);
1513   }
1514 
1515   PCRecorder::print();
1516 
1517   if(ProfileVM){
1518     tty->cr();
1519     vm_thread_profiler->print("VM Thread");
1520   }
1521 }
1522 
1523 void IntervalData::print_header(outputStream* st) {
1524   st->print("i/c/n/g");
1525 }
1526 
1527 void IntervalData::print_data(outputStream* st) {
1528   st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling());
1529 }
1530 
1531 void FlatProfiler::interval_record_thread(ThreadProfiler* tp) {
1532   IntervalData id = tp->interval_data();
1533   int total = id.total();
1534   tp->interval_data_ref()->reset();
1535 
1536   // Insertion sort the data, if it's relevant.
1537   for (int i = 0; i < interval_print_size; i += 1) {
1538     if (total > interval_data[i].total()) {
1539       for (int j = interval_print_size - 1; j > i; j -= 1) {
1540         interval_data[j] = interval_data[j-1];
1541       }
1542       interval_data[i] = id;
1543       break;
1544     }
1545   }
1546 }
1547 
1548 void FlatProfiler::interval_print() {
1549   if ((interval_data[0].total() > 0)) {
1550     tty->stamp();
1551     tty->print("\t");
1552     IntervalData::print_header(tty);
1553     for (int i = 0; i < interval_print_size; i += 1) {
1554       if (interval_data[i].total() > 0) {
1555         tty->print("\t");
1556         interval_data[i].print_data(tty);
1557       }
1558     }
1559     tty->cr();
1560   }
1561 }
1562 
1563 void FlatProfiler::interval_reset() {
1564   for (int i = 0; i < interval_print_size; i += 1) {
1565     interval_data[i].reset();
1566   }
1567 }
1568 
1569 void ThreadProfiler::oops_do(OopClosure* f) {
1570   if (table == NULL) return;
1571 
1572   for(int index = 0; index < table_size; index++) {
1573     for(ProfilerNode* node = table[index]; node; node = node->next())
1574       node->oops_do(f);
1575   }
1576 }
1577 
1578 void FlatProfiler::oops_do(OopClosure* f) {
1579   if (thread_profiler != NULL) {
1580     thread_profiler->oops_do(f);
1581   } else {
1582     for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
1583       ThreadProfiler* pp = tp->get_thread_profiler();
1584       if (pp != NULL) {
1585         pp->oops_do(f);
1586       }
1587     }
1588   }
1589 }