1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/atomic.hpp"
  27 #include "runtime/interfaceSupport.hpp"
  28 #include "runtime/mutexLocker.hpp"
  29 #include "runtime/safepoint.hpp"
  30 #include "runtime/threadCritical.hpp"
  31 #include "runtime/vm_operations.hpp"
  32 #include "services/memPtr.hpp"
  33 #include "services/memReporter.hpp"
  34 #include "services/memTracker.hpp"
  35 #include "utilities/decoder.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 
  38 bool NMT_track_callsite = false;
  39 
  40 // walk all 'known' threads at NMT sync point, and collect their recorders
  41 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
  42   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  43   if (thread->is_Java_thread()) {
  44     JavaThread* javaThread = (JavaThread*)thread;
  45     MemRecorder* recorder = javaThread->get_recorder();
  46     if (recorder != NULL) {
  47       MemTracker::enqueue_pending_recorder(recorder);
  48       javaThread->set_recorder(NULL);
  49     }
  50   }
  51   _thread_count ++;
  52 }
  53 
  54 
  55 MemRecorder*                    MemTracker::_global_recorder = NULL;
  56 MemSnapshot*                    MemTracker::_snapshot = NULL;
  57 MemBaseline                     MemTracker::_baseline;
  58 Mutex*                          MemTracker::_query_lock = NULL;
  59 volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
  60 volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
  61 MemTrackWorker*                 MemTracker::_worker_thread = NULL;
  62 int                             MemTracker::_sync_point_skip_count = 0;
  63 MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
  64 volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
  65 MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
  66 int                             MemTracker::_thread_count = 255;
  67 volatile jint                   MemTracker::_pooled_recorder_count = 0;
  68 volatile unsigned long          MemTracker::_processing_generation = 0;
  69 volatile bool                   MemTracker::_worker_thread_idle = false;
  70 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
  71 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
  72 
  73 void MemTracker::init_tracking_options(const char* option_line) {
  74   _tracking_level = NMT_off;
  75   if (strcmp(option_line, "=summary") == 0) {
  76     _tracking_level = NMT_summary;
  77   } else if (strcmp(option_line, "=detail") == 0) {
  78     _tracking_level = NMT_detail;
  79   } else if (strcmp(option_line, "=off") != 0) {
  80     vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
  81   }
  82 }
  83 
  84 // first phase of bootstrapping, when VM is still in single-threaded mode.
  85 void MemTracker::bootstrap_single_thread() {
  86   if (_tracking_level > NMT_off) {
  87     assert(_state == NMT_uninited, "wrong state");
  88 
  89     // NMT is not supported with UseMallocOnly is on. NMT can NOT
  90     // handle the amount of malloc data without significantly impacting
  91     // runtime performance when this flag is on.
  92     if (UseMallocOnly) {
  93       shutdown(NMT_use_malloc_only);
  94       return;
  95     }
  96 
  97     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
  98     if (_query_lock == NULL) {
  99       shutdown(NMT_out_of_memory);
 100       return;
 101     }
 102 
 103     debug_only(_main_thread_tid = os::current_thread_id();)
 104     _state = NMT_bootstrapping_single_thread;
 105     NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 106   }
 107 }
 108 
 109 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
 110 void MemTracker::bootstrap_multi_thread() {
 111   if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
 112   // create nmt lock for multi-thread execution
 113     assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 114     _state = NMT_bootstrapping_multi_thread;
 115     NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 116   }
 117 }
 118 
 119 // fully start nmt
 120 void MemTracker::start() {
 121   // Native memory tracking is off from command line option
 122   if (_tracking_level == NMT_off || shutdown_in_progress()) return;
 123 
 124   assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 125   assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
 126 
 127   _snapshot = new (std::nothrow)MemSnapshot();
 128   if (_snapshot != NULL && !_snapshot->out_of_memory()) {
 129     if (start_worker()) {
 130       _state = NMT_started;
 131       NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 132       return;
 133     }
 134   }
 135 
 136   // fail to start native memory tracking, shut it down
 137   shutdown(NMT_initialization);
 138 }
 139 
 140 /**
 141  * Shutting down native memory tracking.
 142  * We can not shutdown native memory tracking immediately, so we just
 143  * setup shutdown pending flag, every native memory tracking component
 144  * should orderly shut itself down.
 145  *
 146  * The shutdown sequences:
 147  *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
 148  *  2. Worker thread calls MemTracker::final_shutdown(), which transites
 149  *     MemTracker to final shutdown state.
 150  *  3. At sync point, MemTracker does final cleanup, before sets memory
 151  *     tracking level to off to complete shutdown.
 152  */
 153 void MemTracker::shutdown(ShutdownReason reason) {
 154   if (_tracking_level == NMT_off) return;
 155 
 156   if (_state <= NMT_bootstrapping_single_thread) {
 157     // we still in single thread mode, there is not contention
 158     _state = NMT_shutdown_pending;
 159     _reason = reason;
 160   } else {
 161     // we want to know who initialized shutdown
 162     if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
 163                                        (jint*)&_state, (jint)NMT_started)) {
 164         _reason = reason;
 165     }
 166   }
 167 }
 168 
 169 // final phase of shutdown
 170 void MemTracker::final_shutdown() {
 171   // delete all pending recorders and pooled recorders
 172   delete_all_pending_recorders();
 173   delete_all_pooled_recorders();
 174 
 175   {
 176     // shared baseline and snapshot are the only objects needed to
 177     // create query results
 178     MutexLockerEx locker(_query_lock, true);
 179     // cleanup baseline data and snapshot
 180     _baseline.clear();
 181     MemSnapshot* the_snapshot = _snapshot;
 182     _snapshot = NULL;
 183     delete the_snapshot;
 184   }
 185 
 186   // shutdown shared decoder instance, since it is only
 187   // used by native memory tracking so far.
 188   Decoder::shutdown();
 189 
 190   MemTrackWorker* worker = NULL;
 191   {
 192     ThreadCritical tc;
 193     // can not delete worker inside the thread critical
 194     if (_worker_thread != NULL && Thread::current() == _worker_thread) {
 195       worker = _worker_thread;
 196       _worker_thread = NULL;
 197     }
 198   }
 199   if (worker != NULL) {
 200     delete worker;
 201   }
 202   _state = NMT_final_shutdown;
 203 }
 204 
 205 // delete all pooled recorders
 206 void MemTracker::delete_all_pooled_recorders() {
 207   // free all pooled recorders
 208   volatile MemRecorder* cur_head = _pooled_recorders;
 209   if (cur_head != NULL) {
 210     MemRecorder* null_ptr = NULL;
 211     while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
 212       (void*)&_pooled_recorders, (void*)cur_head)) {
 213       cur_head = _pooled_recorders;
 214     }
 215     if (cur_head != NULL) {
 216       delete cur_head;
 217       _pooled_recorder_count = 0;
 218     }
 219   }
 220 }
 221 
 222 // delete all recorders in pending queue
 223 void MemTracker::delete_all_pending_recorders() {
 224   // free all pending recorders
 225   MemRecorder* pending_head = get_pending_recorders();
 226   if (pending_head != NULL) {
 227     delete pending_head;
 228   }
 229 }
 230 
 231 /*
 232  * retrieve per-thread recorder of specified thread.
 233  * if thread == NULL, it means global recorder
 234  */
 235 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
 236   if (shutdown_in_progress()) return NULL;
 237 
 238   MemRecorder* rc;
 239   if (thread == NULL) {
 240     rc = _global_recorder;
 241   } else {
 242     rc = thread->get_recorder();
 243   }
 244 
 245   if (rc != NULL && rc->is_full()) {
 246     enqueue_pending_recorder(rc);
 247     rc = NULL;
 248   }
 249 
 250   if (rc == NULL) {
 251     rc = get_new_or_pooled_instance();
 252     if (thread == NULL) {
 253       _global_recorder = rc;
 254     } else {
 255       thread->set_recorder(rc);
 256     }
 257   }
 258   return rc;
 259 }
 260 
 261 /*
 262  * get a per-thread recorder from pool, or create a new one if
 263  * there is not one available.
 264  */
 265 MemRecorder* MemTracker::get_new_or_pooled_instance() {
 266    MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
 267    if (cur_head == NULL) {
 268      MemRecorder* rec = new (std::nothrow)MemRecorder();
 269      if (rec == NULL || rec->out_of_memory()) {
 270        shutdown(NMT_out_of_memory);
 271        if (rec != NULL) {
 272          delete rec;
 273          rec = NULL;
 274        }
 275      }
 276      return rec;
 277    } else {
 278      MemRecorder* next_head = cur_head->next();
 279      if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
 280        (void*)cur_head)) {
 281        return get_new_or_pooled_instance();
 282      }
 283      cur_head->set_next(NULL);
 284      Atomic::dec(&_pooled_recorder_count);
 285      cur_head->set_generation();
 286      return cur_head;
 287   }
 288 }
 289 
 290 /*
 291  * retrieve all recorders in pending queue, and empty the queue
 292  */
 293 MemRecorder* MemTracker::get_pending_recorders() {
 294   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 295   MemRecorder* null_ptr = NULL;
 296   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
 297     (void*)cur_head)) {
 298     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 299   }
 300   NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
 301   return cur_head;
 302 }
 303 
 304 /*
 305  * release a recorder to recorder pool.
 306  */
 307 void MemTracker::release_thread_recorder(MemRecorder* rec) {
 308   assert(rec != NULL, "null recorder");
 309   // we don't want to pool too many recorders
 310   rec->set_next(NULL);
 311   if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
 312     delete rec;
 313     return;
 314   }
 315 
 316   rec->clear();
 317   MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
 318   rec->set_next(cur_head);
 319   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
 320     (void*)cur_head)) {
 321     cur_head = const_cast<MemRecorder*>(_pooled_recorders);
 322     rec->set_next(cur_head);
 323   }
 324   Atomic::inc(&_pooled_recorder_count);
 325 }
 326 
 327 /*
 328  * This is the most important method in whole nmt implementation.
 329  *
 330  * Create a memory record.
 331  * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
 332  *    still in single thread mode.
 333  * 2. For all threads other than JavaThread, ThreadCritical is needed
 334  *    to write to recorders to global recorder.
 335  * 3. For JavaThreads that are not longer visible by safepoint, also
 336  *    need to take ThreadCritical and records are written to global
 337  *    recorders, since these threads are NOT walked by Threads.do_thread().
 338  * 4. JavaThreads that are running in native state, have to transition
 339  *    to VM state before writing to per-thread recorders.
 340  * 5. JavaThreads that are running in VM state do not need any lock and
 341  *    records are written to per-thread recorders.
 342  * 6. For a thread has yet to attach VM 'Thread', they need to take
 343  *    ThreadCritical to write to global recorder.
 344  *
 345  *    Important note:
 346  *    NO LOCK should be taken inside ThreadCritical lock !!!
 347  */
 348 void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
 349     size_t size, address pc, Thread* thread) {
 350   assert(addr != NULL, "Sanity check");
 351   if (!shutdown_in_progress()) {
 352     // single thread, we just write records direct to global recorder,'
 353     // with any lock
 354     if (_state == NMT_bootstrapping_single_thread) {
 355       assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 356       thread = NULL;
 357     } else {
 358       if (thread == NULL) {
 359           // don't use Thread::current(), since it is possible that
 360           // the calling thread has yet to attach to VM 'Thread',
 361           // which will result assertion failure
 362           thread = ThreadLocalStorage::thread();
 363       }
 364     }
 365 
 366     if (thread != NULL) {
 367       if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
 368         JavaThread*      java_thread = (JavaThread*)thread;
 369         JavaThreadState  state = java_thread->thread_state();
 370         if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
 371           // JavaThreads that are safepoint safe, can run through safepoint,
 372           // so ThreadCritical is needed to ensure no threads at safepoint create
 373           // new records while the records are being gathered and the sequence number is changing
 374           ThreadCritical tc;
 375           create_record_in_recorder(addr, flags, size, pc, java_thread);
 376         } else {
 377           create_record_in_recorder(addr, flags, size, pc, java_thread);
 378         }
 379       } else {
 380         // other threads, such as worker and watcher threads, etc. need to
 381         // take ThreadCritical to write to global recorder
 382         ThreadCritical tc;
 383         create_record_in_recorder(addr, flags, size, pc, NULL);
 384       }
 385     } else {
 386       if (_state == NMT_bootstrapping_single_thread) {
 387         // single thread, no lock needed
 388         create_record_in_recorder(addr, flags, size, pc, NULL);
 389       } else {
 390         // for thread has yet to attach VM 'Thread', we can not use VM mutex.
 391         // use native thread critical instead
 392         ThreadCritical tc;
 393         create_record_in_recorder(addr, flags, size, pc, NULL);
 394       }
 395     }
 396   }
 397 }
 398 
 399 // write a record to proper recorder. No lock can be taken from this method
 400 // down.
 401 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
 402     size_t size, address pc, JavaThread* thread) {
 403 
 404     MemRecorder* rc = get_thread_recorder(thread);
 405     if (rc != NULL) {
 406       rc->record(addr, flags, size, pc);
 407     }
 408 }
 409 
 410 /**
 411  * enqueue a recorder to pending queue
 412  */
 413 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
 414   assert(rec != NULL, "null recorder");
 415 
 416   // we are shutting down, so just delete it
 417   if (shutdown_in_progress()) {
 418     rec->set_next(NULL);
 419     delete rec;
 420     return;
 421   }
 422 
 423   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 424   rec->set_next(cur_head);
 425   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
 426     (void*)cur_head)) {
 427     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 428     rec->set_next(cur_head);
 429   }
 430   NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
 431 }
 432 
 433 /*
 434  * The method is called at global safepoint
 435  * during it synchronization process.
 436  *   1. enqueue all JavaThreads' per-thread recorders
 437  *   2. enqueue global recorder
 438  *   3. retrieve all pending recorders
 439  *   4. reset global sequence number generator
 440  *   5. call worker's sync
 441  */
 442 #define MAX_SAFEPOINTS_TO_SKIP     128
 443 #define SAFE_SEQUENCE_THRESHOLD    30
 444 #define HIGH_GENERATION_THRESHOLD  60
 445 
 446 void MemTracker::sync() {
 447   assert(_tracking_level > NMT_off, "NMT is not enabled");
 448   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
 449 
 450   // Some GC tests hit large number of safepoints in short period of time
 451   // without meaningful activities. We should prevent going to
 452   // sync point in these cases, which can potentially exhaust generation buffer.
 453   // Here is the factots to determine if we should go into sync point:
 454   // 1. not to overflow sequence number
 455   // 2. if we are in danger to overflow generation buffer
 456   // 3. how many safepoints we already skipped sync point
 457   if (_state == NMT_started) {
 458     // worker thread is not ready, no one can manage generation
 459     // buffer, so skip this safepoint
 460     if (_worker_thread == NULL) return;
 461 
 462     if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
 463       int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
 464       int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
 465       if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
 466         _sync_point_skip_count ++;
 467         return;
 468       }
 469     }
 470     _sync_point_skip_count = 0;
 471     {
 472       // This method is running at safepoint, with ThreadCritical lock,
 473       // it should guarantee that NMT is fully sync-ed.
 474       ThreadCritical tc;
 475 
 476       SequenceGenerator::reset();
 477 
 478       // walk all JavaThreads to collect recorders
 479       SyncThreadRecorderClosure stc;
 480       Threads::threads_do(&stc);
 481 
 482       _thread_count = stc.get_thread_count();
 483       MemRecorder* pending_recorders = get_pending_recorders();
 484 
 485       if (_global_recorder != NULL) {
 486         _global_recorder->set_next(pending_recorders);
 487         pending_recorders = _global_recorder;
 488         _global_recorder = NULL;
 489       }
 490       // check _worker_thread with lock to avoid racing condition
 491       if (_worker_thread != NULL) {
 492         _worker_thread->at_sync_point(pending_recorders);
 493       }
 494 
 495       assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
 496     }
 497   }
 498 
 499   // now, it is the time to shut whole things off
 500   if (_state == NMT_final_shutdown) {
 501     // walk all JavaThreads to delete all recorders
 502     SyncThreadRecorderClosure stc;
 503     Threads::threads_do(&stc);
 504     // delete global recorder
 505     {
 506       ThreadCritical tc;
 507       if (_global_recorder != NULL) {
 508         delete _global_recorder;
 509         _global_recorder = NULL;
 510       }
 511     }
 512     MemRecorder* pending_recorders = get_pending_recorders();
 513     if (pending_recorders != NULL) {
 514       delete pending_recorders;
 515     }
 516     // try at a later sync point to ensure MemRecorder instance drops to zero to
 517     // completely shutdown NMT
 518     if (MemRecorder::_instance_count == 0) {
 519       _state = NMT_shutdown;
 520       _tracking_level = NMT_off;
 521     }
 522   }
 523 }
 524 
 525 /*
 526  * Start worker thread.
 527  */
 528 bool MemTracker::start_worker() {
 529   assert(_worker_thread == NULL, "Just Check");
 530   _worker_thread = new (std::nothrow) MemTrackWorker();
 531   if (_worker_thread == NULL || _worker_thread->has_error()) {
 532     shutdown(NMT_initialization);
 533     return false;
 534   }
 535   _worker_thread->start();
 536   return true;
 537 }
 538 
 539 /*
 540  * We need to collect a JavaThread's per-thread recorder
 541  * before it exits.
 542  */
 543 void MemTracker::thread_exiting(JavaThread* thread) {
 544   if (is_on()) {
 545     MemRecorder* rec = thread->get_recorder();
 546     if (rec != NULL) {
 547       enqueue_pending_recorder(rec);
 548       thread->set_recorder(NULL);
 549     }
 550   }
 551 }
 552 
 553 // baseline current memory snapshot
 554 bool MemTracker::baseline() {
 555   MutexLockerEx lock(_query_lock, true);
 556   MemSnapshot* snapshot = get_snapshot();
 557   if (snapshot != NULL) {
 558     return _baseline.baseline(*snapshot, false);
 559   }
 560   return false;
 561 }
 562 
 563 // print memory usage from current snapshot
 564 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 565   MemBaseline  baseline;
 566   MutexLockerEx lock(_query_lock, true);
 567   MemSnapshot* snapshot = get_snapshot();
 568   if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 569     BaselineReporter reporter(out, unit);
 570     reporter.report_baseline(baseline, summary_only);
 571     return true;
 572   }
 573   return false;
 574 }
 575 
 576 // Whitebox API for blocking until the current generation of NMT data has been merged
 577 bool MemTracker::wbtest_wait_for_data_merge() {
 578   MutexLockerEx lock(_query_lock, true);
 579   assert(_worker_thread != NULL, "Invalid query");
 580   // the generation at query time, so NMT will spin till this generation is processed
 581   unsigned long generation_at_query_time = SequenceGenerator::current_generation();
 582   unsigned long current_processing_generation = _processing_generation;
 583   // if generation counter overflown
 584   bool generation_overflown = (generation_at_query_time < current_processing_generation);
 585   long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
 586   // spin
 587   while (!shutdown_in_progress()) {
 588     if (!generation_overflown) {
 589       if (current_processing_generation > generation_at_query_time) {
 590         break;
 591       }
 592     } else {
 593       assert(generations_to_wrap >= 0, "Sanity check");
 594       long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
 595       assert(current_generations_to_wrap >= 0, "Sanity check");
 596       // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
 597       if (current_generations_to_wrap > generations_to_wrap &&
 598           current_processing_generation > generation_at_query_time) {
 599         break;
 600       } 
 601     }
 602 
 603     // if worker thread is idle, but generation is not advancing, that means
 604     // there is not safepoint to let NMT advance generation, force one.
 605     if (_worker_thread_idle) {
 606       VM_ForceSafepoint vfs;
 607       VMThread::execute(&vfs);
 608     }
 609     MemSnapshot* snapshot = get_snapshot();
 610     if (snapshot == NULL) {
 611       return false;
 612     } 
 613     snapshot->wait(1000);
 614     current_processing_generation = _processing_generation;
 615   }
 616   return true;
 617 }
 618 
 619 // compare memory usage between current snapshot and baseline
 620 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 621   MutexLockerEx lock(_query_lock, true);
 622   if (_baseline.baselined()) {
 623     MemBaseline baseline;
 624     MemSnapshot* snapshot = get_snapshot();
 625     if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 626       BaselineReporter reporter(out, unit);
 627       reporter.diff_baselines(baseline, _baseline, summary_only);
 628       return true;
 629     }
 630   }
 631   return false;
 632 }
 633 
 634 #ifndef PRODUCT
 635 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
 636   int cur_len = 0;
 637   char tmp[1024];
 638   address pc;
 639 
 640   while (cur_len < len) {
 641     pc = os::get_caller_pc(toSkip + 1);
 642     if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
 643       jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
 644       cur_len = (int)strlen(buf);
 645     } else {
 646       buf[cur_len] = '\0';
 647       break;
 648     }
 649     toSkip ++;
 650   }
 651 }
 652 
 653 void MemTracker::print_tracker_stats(outputStream* st) {
 654   st->print_cr("\nMemory Tracker Stats:");
 655   st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
 656   st->print_cr("\tthead count = %d", _thread_count);
 657   st->print_cr("\tArena instance = %d", Arena::_instance_count);
 658   st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
 659   st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
 660   st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
 661   if (_worker_thread != NULL) {
 662     st->print_cr("\tWorker thread:");
 663     st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
 664     st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
 665     st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
 666   } else {
 667     st->print_cr("\tWorker thread is not started");
 668   }
 669   st->print_cr(" ");
 670 
 671   if (_snapshot != NULL) {
 672     _snapshot->print_snapshot_stats(st);
 673   } else {
 674     st->print_cr("No snapshot");
 675   }
 676 }
 677 #endif
 678