1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/atomic.hpp"
  27 #include "runtime/interfaceSupport.hpp"
  28 #include "runtime/mutexLocker.hpp"
  29 #include "runtime/safepoint.hpp"
  30 #include "runtime/threadCritical.hpp"
  31 #include "services/memPtr.hpp"
  32 #include "services/memReporter.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/decoder.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 bool NMT_track_callsite = false;
  38 
  39 // walk all 'known' threads at NMT sync point, and collect their recorders
  40 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
  41   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  42   if (thread->is_Java_thread()) {
  43     JavaThread* javaThread = (JavaThread*)thread;
  44     MemRecorder* recorder = javaThread->get_recorder();
  45     if (recorder != NULL) {
  46       MemTracker::enqueue_pending_recorder(recorder);
  47       javaThread->set_recorder(NULL);
  48     }
  49   }
  50   _thread_count ++;
  51 }
  52 
  53 
  54 MemRecorder*                    MemTracker::_global_recorder = NULL;
  55 MemSnapshot*                    MemTracker::_snapshot = NULL;
  56 MemBaseline                     MemTracker::_baseline;
  57 Mutex*                          MemTracker::_query_lock = NULL;
  58 volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
  59 volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
  60 MemTrackWorker*                 MemTracker::_worker_thread = NULL;
  61 int                             MemTracker::_sync_point_skip_count = 0;
  62 MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
  63 volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
  64 MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
  65 int                             MemTracker::_thread_count = 255;
  66 volatile jint                   MemTracker::_pooled_recorder_count = 0;
  67 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
  68 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
  69 
  70 void MemTracker::init_tracking_options(const char* option_line) {
  71   _tracking_level = NMT_off;
  72   if (strcmp(option_line, "=summary") == 0) {
  73     _tracking_level = NMT_summary;
  74   } else if (strcmp(option_line, "=detail") == 0) {
  75     _tracking_level = NMT_detail;
  76   } else if (strcmp(option_line, "=off") != 0) {
  77     vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
  78   }
  79 }
  80 
  81 // first phase of bootstrapping, when VM is still in single-threaded mode.
  82 void MemTracker::bootstrap_single_thread() {
  83   if (_tracking_level > NMT_off) {
  84     assert(_state == NMT_uninited, "wrong state");
  85 
  86     // NMT is not supported with UseMallocOnly is on. NMT can NOT
  87     // handle the amount of malloc data without significantly impacting
  88     // runtime performance when this flag is on.
  89     if (UseMallocOnly) {
  90       shutdown(NMT_use_malloc_only);
  91       return;
  92     }
  93 
  94     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
  95     if (_query_lock == NULL) {
  96       shutdown(NMT_out_of_memory);
  97       return;
  98     }
  99 
 100     debug_only(_main_thread_tid = os::current_thread_id();)
 101     _state = NMT_bootstrapping_single_thread;
 102     NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 103   }
 104 }
 105 
 106 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
 107 void MemTracker::bootstrap_multi_thread() {
 108   if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
 109   // create nmt lock for multi-thread execution
 110     assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 111     _state = NMT_bootstrapping_multi_thread;
 112     NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 113   }
 114 }
 115 
 116 // fully start nmt
 117 void MemTracker::start() {
 118   // Native memory tracking is off from command line option
 119   if (_tracking_level == NMT_off || shutdown_in_progress()) return;
 120 
 121   assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 122   assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
 123 
 124   _snapshot = new (std::nothrow)MemSnapshot();
 125   if (_snapshot != NULL && !_snapshot->out_of_memory()) {
 126     if (start_worker()) {
 127       _state = NMT_started;
 128       NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 129       return;
 130     }
 131   }
 132 
 133   // fail to start native memory tracking, shut it down
 134   shutdown(NMT_initialization);
 135 }
 136 
 137 /**
 138  * Shutting down native memory tracking.
 139  * We can not shutdown native memory tracking immediately, so we just
 140  * setup shutdown pending flag, every native memory tracking component
 141  * should orderly shut itself down.
 142  *
 143  * The shutdown sequences:
 144  *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
 145  *  2. Worker thread calls MemTracker::final_shutdown(), which transites
 146  *     MemTracker to final shutdown state.
 147  *  3. At sync point, MemTracker does final cleanup, before sets memory
 148  *     tracking level to off to complete shutdown.
 149  */
 150 void MemTracker::shutdown(ShutdownReason reason) {
 151   if (_tracking_level == NMT_off) return;
 152 
 153   if (_state <= NMT_bootstrapping_single_thread) {
 154     // we still in single thread mode, there is not contention
 155     _state = NMT_shutdown_pending;
 156     _reason = reason;
 157   } else {
 158     // we want to know who initialized shutdown
 159     if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
 160                                        (jint*)&_state, (jint)NMT_started)) {
 161         _reason = reason;
 162     }
 163   }
 164 }
 165 
 166 // final phase of shutdown
 167 void MemTracker::final_shutdown() {
 168   // delete all pending recorders and pooled recorders
 169   delete_all_pending_recorders();
 170   delete_all_pooled_recorders();
 171 
 172   {
 173     // shared baseline and snapshot are the only objects needed to
 174     // create query results
 175     MutexLockerEx locker(_query_lock, true);
 176     // cleanup baseline data and snapshot
 177     _baseline.clear();
 178     delete _snapshot;
 179     _snapshot = NULL;
 180   }
 181 
 182   // shutdown shared decoder instance, since it is only
 183   // used by native memory tracking so far.
 184   Decoder::shutdown();
 185 
 186   MemTrackWorker* worker = NULL;
 187   {
 188     ThreadCritical tc;
 189     // can not delete worker inside the thread critical
 190     if (_worker_thread != NULL && Thread::current() == _worker_thread) {
 191       worker = _worker_thread;
 192       _worker_thread = NULL;
 193     }
 194   }
 195   if (worker != NULL) {
 196     delete worker;
 197   }
 198   _state = NMT_final_shutdown;
 199 }
 200 
 201 // delete all pooled recorders
 202 void MemTracker::delete_all_pooled_recorders() {
 203   // free all pooled recorders
 204   volatile MemRecorder* cur_head = _pooled_recorders;
 205   if (cur_head != NULL) {
 206     MemRecorder* null_ptr = NULL;
 207     while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
 208       (void*)&_pooled_recorders, (void*)cur_head)) {
 209       cur_head = _pooled_recorders;
 210     }
 211     if (cur_head != NULL) {
 212       delete cur_head;
 213       _pooled_recorder_count = 0;
 214     }
 215   }
 216 }
 217 
 218 // delete all recorders in pending queue
 219 void MemTracker::delete_all_pending_recorders() {
 220   // free all pending recorders
 221   MemRecorder* pending_head = get_pending_recorders();
 222   if (pending_head != NULL) {
 223     delete pending_head;
 224   }
 225 }
 226 
 227 /*
 228  * retrieve per-thread recorder of specified thread.
 229  * if thread == NULL, it means global recorder
 230  */
 231 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
 232   if (shutdown_in_progress()) return NULL;
 233 
 234   MemRecorder* rc;
 235   if (thread == NULL) {
 236     rc = _global_recorder;
 237   } else {
 238     rc = thread->get_recorder();
 239   }
 240 
 241   if (rc != NULL && rc->is_full()) {
 242     enqueue_pending_recorder(rc);
 243     rc = NULL;
 244   }
 245 
 246   if (rc == NULL) {
 247     rc = get_new_or_pooled_instance();
 248     if (thread == NULL) {
 249       _global_recorder = rc;
 250     } else {
 251       thread->set_recorder(rc);
 252     }
 253   }
 254   return rc;
 255 }
 256 
 257 /*
 258  * get a per-thread recorder from pool, or create a new one if
 259  * there is not one available.
 260  */
 261 MemRecorder* MemTracker::get_new_or_pooled_instance() {
 262    MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
 263    if (cur_head == NULL) {
 264      MemRecorder* rec = new (std::nothrow)MemRecorder();
 265      if (rec == NULL || rec->out_of_memory()) {
 266        shutdown(NMT_out_of_memory);
 267        if (rec != NULL) {
 268          delete rec;
 269          rec = NULL;
 270        }
 271      }
 272      return rec;
 273    } else {
 274      MemRecorder* next_head = cur_head->next();
 275      if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
 276        (void*)cur_head)) {
 277        return get_new_or_pooled_instance();
 278      }
 279      cur_head->set_next(NULL);
 280      Atomic::dec(&_pooled_recorder_count);
 281      debug_only(cur_head->set_generation();)
 282      return cur_head;
 283   }
 284 }
 285 
 286 /*
 287  * retrieve all recorders in pending queue, and empty the queue
 288  */
 289 MemRecorder* MemTracker::get_pending_recorders() {
 290   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 291   MemRecorder* null_ptr = NULL;
 292   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
 293     (void*)cur_head)) {
 294     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 295   }
 296   NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
 297   return cur_head;
 298 }
 299 
 300 /*
 301  * release a recorder to recorder pool.
 302  */
 303 void MemTracker::release_thread_recorder(MemRecorder* rec) {
 304   assert(rec != NULL, "null recorder");
 305   // we don't want to pool too many recorders
 306   rec->set_next(NULL);
 307   if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
 308     delete rec;
 309     return;
 310   }
 311 
 312   rec->clear();
 313   MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
 314   rec->set_next(cur_head);
 315   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
 316     (void*)cur_head)) {
 317     cur_head = const_cast<MemRecorder*>(_pooled_recorders);
 318     rec->set_next(cur_head);
 319   }
 320   Atomic::inc(&_pooled_recorder_count);
 321 }
 322 
 323 /*
 324  * This is the most important method in whole nmt implementation.
 325  *
 326  * Create a memory record.
 327  * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
 328  *    still in single thread mode.
 329  * 2. For all threads other than JavaThread, ThreadCritical is needed
 330  *    to write to recorders to global recorder.
 331  * 3. For JavaThreads that are not longer visible by safepoint, also
 332  *    need to take ThreadCritical and records are written to global
 333  *    recorders, since these threads are NOT walked by Threads.do_thread().
 334  * 4. JavaThreads that are running in native state, have to transition
 335  *    to VM state before writing to per-thread recorders.
 336  * 5. JavaThreads that are running in VM state do not need any lock and
 337  *    records are written to per-thread recorders.
 338  * 6. For a thread has yet to attach VM 'Thread', they need to take
 339  *    ThreadCritical to write to global recorder.
 340  *
 341  *    Important note:
 342  *    NO LOCK should be taken inside ThreadCritical lock !!!
 343  */
 344 void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
 345     size_t size, address pc, Thread* thread) {
 346   assert(addr != NULL, "Sanity check");
 347   if (!shutdown_in_progress()) {
 348     // single thread, we just write records direct to global recorder,'
 349     // with any lock
 350     if (_state == NMT_bootstrapping_single_thread) {
 351       assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 352       thread = NULL;
 353     } else {
 354       if (thread == NULL) {
 355           // don't use Thread::current(), since it is possible that
 356           // the calling thread has yet to attach to VM 'Thread',
 357           // which will result assertion failure
 358           thread = ThreadLocalStorage::thread();
 359       }
 360     }
 361 
 362     if (thread != NULL) {
 363       if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
 364         JavaThread*      java_thread = (JavaThread*)thread;
 365         JavaThreadState  state = java_thread->thread_state();
 366         if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
 367           // JavaThreads that are safepoint safe, can run through safepoint,
 368           // so ThreadCritical is needed to ensure no threads at safepoint create
 369           // new records while the records are being gathered and the sequence number is changing
 370           ThreadCritical tc;
 371           create_record_in_recorder(addr, flags, size, pc, java_thread);
 372         } else {
 373           create_record_in_recorder(addr, flags, size, pc, java_thread);
 374         }
 375       } else {
 376         // other threads, such as worker and watcher threads, etc. need to
 377         // take ThreadCritical to write to global recorder
 378         ThreadCritical tc;
 379         create_record_in_recorder(addr, flags, size, pc, NULL);
 380       }
 381     } else {
 382       if (_state == NMT_bootstrapping_single_thread) {
 383         // single thread, no lock needed
 384         create_record_in_recorder(addr, flags, size, pc, NULL);
 385       } else {
 386         // for thread has yet to attach VM 'Thread', we can not use VM mutex.
 387         // use native thread critical instead
 388         ThreadCritical tc;
 389         create_record_in_recorder(addr, flags, size, pc, NULL);
 390       }
 391     }
 392   }
 393 }
 394 
 395 // write a record to proper recorder. No lock can be taken from this method
 396 // down.
 397 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
 398     size_t size, address pc, JavaThread* thread) {
 399 
 400     MemRecorder* rc = get_thread_recorder(thread);
 401     if (rc != NULL) {
 402       rc->record(addr, flags, size, pc);
 403     }
 404 }
 405 
 406 /**
 407  * enqueue a recorder to pending queue
 408  */
 409 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
 410   assert(rec != NULL, "null recorder");
 411 
 412   // we are shutting down, so just delete it
 413   if (shutdown_in_progress()) {
 414     rec->set_next(NULL);
 415     delete rec;
 416     return;
 417   }
 418 
 419   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 420   rec->set_next(cur_head);
 421   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
 422     (void*)cur_head)) {
 423     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 424     rec->set_next(cur_head);
 425   }
 426   NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
 427 }
 428 
 429 /*
 430  * The method is called at global safepoint
 431  * during it synchronization process.
 432  *   1. enqueue all JavaThreads' per-thread recorders
 433  *   2. enqueue global recorder
 434  *   3. retrieve all pending recorders
 435  *   4. reset global sequence number generator
 436  *   5. call worker's sync
 437  */
 438 #define MAX_SAFEPOINTS_TO_SKIP     128
 439 #define SAFE_SEQUENCE_THRESHOLD    30
 440 #define HIGH_GENERATION_THRESHOLD  60
 441 
 442 void MemTracker::sync() {
 443   assert(_tracking_level > NMT_off, "NMT is not enabled");
 444   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
 445 
 446   // Some GC tests hit large number of safepoints in short period of time
 447   // without meaningful activities. We should prevent going to
 448   // sync point in these cases, which can potentially exhaust generation buffer.
 449   // Here is the factots to determine if we should go into sync point:
 450   // 1. not to overflow sequence number
 451   // 2. if we are in danger to overflow generation buffer
 452   // 3. how many safepoints we already skipped sync point
 453   if (_state == NMT_started) {
 454     // worker thread is not ready, no one can manage generation
 455     // buffer, so skip this safepoint
 456     if (_worker_thread == NULL) return;
 457 
 458     if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
 459       int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
 460       int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
 461       if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
 462         _sync_point_skip_count ++;
 463         return;
 464       }
 465     }
 466     _sync_point_skip_count = 0;
 467     {
 468       // This method is running at safepoint, with ThreadCritical lock,
 469       // it should guarantee that NMT is fully sync-ed.
 470       ThreadCritical tc;
 471 
 472       SequenceGenerator::reset();
 473 
 474       // walk all JavaThreads to collect recorders
 475       SyncThreadRecorderClosure stc;
 476       Threads::threads_do(&stc);
 477 
 478       _thread_count = stc.get_thread_count();
 479       MemRecorder* pending_recorders = get_pending_recorders();
 480 
 481       if (_global_recorder != NULL) {
 482         _global_recorder->set_next(pending_recorders);
 483         pending_recorders = _global_recorder;
 484         _global_recorder = NULL;
 485       }
 486       // check _worker_thread with lock to avoid racing condition
 487       if (_worker_thread != NULL) {
 488         _worker_thread->at_sync_point(pending_recorders);
 489       }
 490 
 491       assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
 492     }
 493   }
 494 
 495   // now, it is the time to shut whole things off
 496   if (_state == NMT_final_shutdown) {
 497     // walk all JavaThreads to delete all recorders
 498     SyncThreadRecorderClosure stc;
 499     Threads::threads_do(&stc);
 500     // delete global recorder
 501     {
 502       ThreadCritical tc;
 503       if (_global_recorder != NULL) {
 504         delete _global_recorder;
 505         _global_recorder = NULL;
 506       }
 507     }
 508     MemRecorder* pending_recorders = get_pending_recorders();
 509     if (pending_recorders != NULL) {
 510       delete pending_recorders;
 511     }
 512     // try at a later sync point to ensure MemRecorder instance drops to zero to
 513     // completely shutdown NMT
 514     if (MemRecorder::_instance_count == 0) {
 515       _state = NMT_shutdown;
 516       _tracking_level = NMT_off;
 517     }
 518   }
 519 }
 520 
 521 /*
 522  * Start worker thread.
 523  */
 524 bool MemTracker::start_worker() {
 525   assert(_worker_thread == NULL, "Just Check");
 526   _worker_thread = new (std::nothrow) MemTrackWorker();
 527   if (_worker_thread == NULL || _worker_thread->has_error()) {
 528     shutdown(NMT_initialization);
 529     return false;
 530   }
 531   _worker_thread->start();
 532   return true;
 533 }
 534 
 535 /*
 536  * We need to collect a JavaThread's per-thread recorder
 537  * before it exits.
 538  */
 539 void MemTracker::thread_exiting(JavaThread* thread) {
 540   if (is_on()) {
 541     MemRecorder* rec = thread->get_recorder();
 542     if (rec != NULL) {
 543       enqueue_pending_recorder(rec);
 544       thread->set_recorder(NULL);
 545     }
 546   }
 547 }
 548 
 549 // baseline current memory snapshot
 550 bool MemTracker::baseline() {
 551   MutexLockerEx lock(_query_lock, true);
 552   MemSnapshot* snapshot = get_snapshot();
 553   if (snapshot != NULL) {
 554     return _baseline.baseline(*snapshot, false);
 555   }
 556   return false;
 557 }
 558 
 559 // print memory usage from current snapshot
 560 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 561   MemBaseline  baseline;
 562   MutexLockerEx lock(_query_lock, true);
 563   MemSnapshot* snapshot = get_snapshot();
 564   if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 565     BaselineReporter reporter(out, unit);
 566     reporter.report_baseline(baseline, summary_only);
 567     return true;
 568   }
 569   return false;
 570 }
 571 
 572 // compare memory usage between current snapshot and baseline
 573 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 574   MutexLockerEx lock(_query_lock, true);
 575   if (_baseline.baselined()) {
 576     MemBaseline baseline;
 577     MemSnapshot* snapshot = get_snapshot();
 578     if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 579       BaselineReporter reporter(out, unit);
 580       reporter.diff_baselines(baseline, _baseline, summary_only);
 581       return true;
 582     }
 583   }
 584   return false;
 585 }
 586 
 587 #ifndef PRODUCT
 588 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
 589   int cur_len = 0;
 590   char tmp[1024];
 591   address pc;
 592 
 593   while (cur_len < len) {
 594     pc = os::get_caller_pc(toSkip + 1);
 595     if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
 596       jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
 597       cur_len = (int)strlen(buf);
 598     } else {
 599       buf[cur_len] = '\0';
 600       break;
 601     }
 602     toSkip ++;
 603   }
 604 }
 605 
 606 void MemTracker::print_tracker_stats(outputStream* st) {
 607   st->print_cr("\nMemory Tracker Stats:");
 608   st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
 609   st->print_cr("\tthead count = %d", _thread_count);
 610   st->print_cr("\tArena instance = %d", Arena::_instance_count);
 611   st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
 612   st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
 613   st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
 614   if (_worker_thread != NULL) {
 615     st->print_cr("\tWorker thread:");
 616     st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
 617     st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
 618     st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
 619   } else {
 620     st->print_cr("\tWorker thread is not started");
 621   }
 622   st->print_cr(" ");
 623 
 624   if (_snapshot != NULL) {
 625     _snapshot->print_snapshot_stats(st);
 626   } else {
 627     st->print_cr("No snapshot");
 628   }
 629 }
 630 #endif
 631