1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "oops/instanceKlass.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/interfaceSupport.hpp"
  29 #include "runtime/mutexLocker.hpp"
  30 #include "runtime/safepoint.hpp"
  31 #include "runtime/threadCritical.hpp"
  32 #include "runtime/vm_operations.hpp"
  33 #include "services/memPtr.hpp"
  34 #include "services/memReporter.hpp"
  35 #include "services/memTracker.hpp"
  36 #include "utilities/decoder.hpp"
  37 #include "utilities/defaultStream.hpp"
  38 #include "utilities/globalDefinitions.hpp"
  39 
  40 bool NMT_track_callsite = false;
  41 
  42 // walk all 'known' threads at NMT sync point, and collect their recorders
  43 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
  44   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  45   if (thread->is_Java_thread()) {
  46     JavaThread* javaThread = (JavaThread*)thread;
  47     MemRecorder* recorder = javaThread->get_recorder();
  48     if (recorder != NULL) {
  49       MemTracker::enqueue_pending_recorder(recorder);
  50       javaThread->set_recorder(NULL);
  51     }
  52   }
  53   _thread_count ++;
  54 }
  55 
  56 
  57 MemRecorder* volatile           MemTracker::_global_recorder = NULL;
  58 MemSnapshot*                    MemTracker::_snapshot = NULL;
  59 MemBaseline                     MemTracker::_baseline;
  60 Mutex*                          MemTracker::_query_lock = NULL;
  61 MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
  62 MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
  63 MemTrackWorker*                 MemTracker::_worker_thread = NULL;
  64 int                             MemTracker::_sync_point_skip_count = 0;
  65 MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
  66 volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
  67 MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
  68 int                             MemTracker::_thread_count = 255;
  69 volatile jint                   MemTracker::_pooled_recorder_count = 0;
  70 volatile unsigned long          MemTracker::_processing_generation = 0;
  71 volatile bool                   MemTracker::_worker_thread_idle = false;
  72 volatile bool                   MemTracker::_slowdown_calling_thread = false;
  73 debug_only(intx                 MemTracker::_main_thread_tid = 0;)
  74 NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
  75 
  76 void MemTracker::init_tracking_options(const char* option_line) {
  77   _tracking_level = NMT_off;
  78   if (strcmp(option_line, "=summary") == 0) {
  79     _tracking_level = NMT_summary;
  80   } else if (strcmp(option_line, "=detail") == 0) {
  81     // detail relies on a stack-walking ability that may not
  82     // be available depending on platform and/or compiler flags
  83 #if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  84       _tracking_level = NMT_detail;
  85 #else
  86       jio_fprintf(defaultStream::error_stream(),
  87         "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
  88       _tracking_level = NMT_summary;
  89 #endif
  90   } else if (strcmp(option_line, "=off") != 0) {
  91     vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
  92   }
  93 }
  94 
  95 // first phase of bootstrapping, when VM is still in single-threaded mode.
  96 void MemTracker::bootstrap_single_thread() {
  97   if (_tracking_level > NMT_off) {
  98     assert(_state == NMT_uninited, "wrong state");
  99 
 100     // NMT is not supported with UseMallocOnly is on. NMT can NOT
 101     // handle the amount of malloc data without significantly impacting
 102     // runtime performance when this flag is on.
 103     if (UseMallocOnly) {
 104       shutdown(NMT_use_malloc_only);
 105       return;
 106     }
 107 
 108     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
 109     if (_query_lock == NULL) {
 110       shutdown(NMT_out_of_memory);
 111       return;
 112     }
 113 
 114     debug_only(_main_thread_tid = os::current_thread_id();)
 115     _state = NMT_bootstrapping_single_thread;
 116     NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 117   }
 118 }
 119 
 120 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
 121 void MemTracker::bootstrap_multi_thread() {
 122   if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
 123   // create nmt lock for multi-thread execution
 124     assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 125     _state = NMT_bootstrapping_multi_thread;
 126     NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 127   }
 128 }
 129 
 130 // fully start nmt
 131 void MemTracker::start() {
 132   // Native memory tracking is off from command line option
 133   if (_tracking_level == NMT_off || shutdown_in_progress()) return;
 134 
 135   assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 136   assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
 137 
 138   _snapshot = new (std::nothrow)MemSnapshot();
 139   if (_snapshot != NULL) {
 140     if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
 141       _state = NMT_started;
 142       NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
 143       return;
 144     }
 145 
 146     delete _snapshot;
 147     _snapshot = NULL;
 148   }
 149 
 150   // fail to start native memory tracking, shut it down
 151   shutdown(NMT_initialization);
 152 }
 153 
 154 /**
 155  * Shutting down native memory tracking.
 156  * We can not shutdown native memory tracking immediately, so we just
 157  * setup shutdown pending flag, every native memory tracking component
 158  * should orderly shut itself down.
 159  *
 160  * The shutdown sequences:
 161  *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
 162  *  2. Worker thread calls MemTracker::final_shutdown(), which transites
 163  *     MemTracker to final shutdown state.
 164  *  3. At sync point, MemTracker does final cleanup, before sets memory
 165  *     tracking level to off to complete shutdown.
 166  */
 167 void MemTracker::shutdown(ShutdownReason reason) {
 168   if (_tracking_level == NMT_off) return;
 169 
 170   if (_state <= NMT_bootstrapping_single_thread) {
 171     // we still in single thread mode, there is not contention
 172     _state = NMT_shutdown_pending;
 173     _reason = reason;
 174   } else {
 175     // we want to know who initialized shutdown
 176     if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
 177                                        (jint*)&_state, (jint)NMT_started)) {
 178         _reason = reason;
 179     }
 180   }
 181 }
 182 
 183 // final phase of shutdown
 184 void MemTracker::final_shutdown() {
 185   // delete all pending recorders and pooled recorders
 186   delete_all_pending_recorders();
 187   delete_all_pooled_recorders();
 188 
 189   {
 190     // shared baseline and snapshot are the only objects needed to
 191     // create query results
 192     MutexLockerEx locker(_query_lock, true);
 193     // cleanup baseline data and snapshot
 194     _baseline.clear();
 195     delete _snapshot;
 196     _snapshot = NULL;
 197   }
 198 
 199   // shutdown shared decoder instance, since it is only
 200   // used by native memory tracking so far.
 201   Decoder::shutdown();
 202 
 203   MemTrackWorker* worker = NULL;
 204   {
 205     ThreadCritical tc;
 206     // can not delete worker inside the thread critical
 207     if (_worker_thread != NULL && Thread::current() == _worker_thread) {
 208       worker = _worker_thread;
 209       _worker_thread = NULL;
 210     }
 211   }
 212   if (worker != NULL) {
 213     delete worker;
 214   }
 215   _state = NMT_final_shutdown;
 216 }
 217 
 218 // delete all pooled recorders
 219 void MemTracker::delete_all_pooled_recorders() {
 220   // free all pooled recorders
 221   MemRecorder* volatile cur_head = _pooled_recorders;
 222   if (cur_head != NULL) {
 223     MemRecorder* null_ptr = NULL;
 224     while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
 225       (void*)&_pooled_recorders, (void*)cur_head)) {
 226       cur_head = _pooled_recorders;
 227     }
 228     if (cur_head != NULL) {
 229       delete cur_head;
 230       _pooled_recorder_count = 0;
 231     }
 232   }
 233 }
 234 
 235 // delete all recorders in pending queue
 236 void MemTracker::delete_all_pending_recorders() {
 237   // free all pending recorders
 238   MemRecorder* pending_head = get_pending_recorders();
 239   if (pending_head != NULL) {
 240     delete pending_head;
 241   }
 242 }
 243 
 244 /*
 245  * retrieve per-thread recorder of specified thread.
 246  * if thread == NULL, it means global recorder
 247  */
 248 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
 249   if (shutdown_in_progress()) return NULL;
 250 
 251   MemRecorder* rc;
 252   if (thread == NULL) {
 253     rc = _global_recorder;
 254   } else {
 255     rc = thread->get_recorder();
 256   }
 257 
 258   if (rc != NULL && rc->is_full()) {
 259     enqueue_pending_recorder(rc);
 260     rc = NULL;
 261   }
 262 
 263   if (rc == NULL) {
 264     rc = get_new_or_pooled_instance();
 265     if (thread == NULL) {
 266       _global_recorder = rc;
 267     } else {
 268       thread->set_recorder(rc);
 269     }
 270   }
 271   return rc;
 272 }
 273 
 274 /*
 275  * get a per-thread recorder from pool, or create a new one if
 276  * there is not one available.
 277  */
 278 MemRecorder* MemTracker::get_new_or_pooled_instance() {
 279    MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
 280    if (cur_head == NULL) {
 281      MemRecorder* rec = new (std::nothrow)MemRecorder();
 282      if (rec == NULL || rec->out_of_memory()) {
 283        shutdown(NMT_out_of_memory);
 284        if (rec != NULL) {
 285          delete rec;
 286          rec = NULL;
 287        }
 288      }
 289      return rec;
 290    } else {
 291      MemRecorder* next_head = cur_head->next();
 292      if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
 293        (void*)cur_head)) {
 294        return get_new_or_pooled_instance();
 295      }
 296      cur_head->set_next(NULL);
 297      Atomic::dec(&_pooled_recorder_count);
 298      cur_head->set_generation();
 299      return cur_head;
 300   }
 301 }
 302 
 303 /*
 304  * retrieve all recorders in pending queue, and empty the queue
 305  */
 306 MemRecorder* MemTracker::get_pending_recorders() {
 307   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 308   MemRecorder* null_ptr = NULL;
 309   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
 310     (void*)cur_head)) {
 311     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 312   }
 313   NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
 314   return cur_head;
 315 }
 316 
 317 /*
 318  * release a recorder to recorder pool.
 319  */
 320 void MemTracker::release_thread_recorder(MemRecorder* rec) {
 321   assert(rec != NULL, "null recorder");
 322   // we don't want to pool too many recorders
 323   rec->set_next(NULL);
 324   if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
 325     delete rec;
 326     return;
 327   }
 328 
 329   rec->clear();
 330   MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
 331   rec->set_next(cur_head);
 332   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
 333     (void*)cur_head)) {
 334     cur_head = const_cast<MemRecorder*>(_pooled_recorders);
 335     rec->set_next(cur_head);
 336   }
 337   Atomic::inc(&_pooled_recorder_count);
 338 }
 339 
 340 /*
 341  * This is the most important method in whole nmt implementation.
 342  *
 343  * Create a memory record.
 344  * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
 345  *    still in single thread mode.
 346  * 2. For all threads other than JavaThread, ThreadCritical is needed
 347  *    to write to recorders to global recorder.
 348  * 3. For JavaThreads that are not longer visible by safepoint, also
 349  *    need to take ThreadCritical and records are written to global
 350  *    recorders, since these threads are NOT walked by Threads.do_thread().
 351  * 4. JavaThreads that are running in native state, have to transition
 352  *    to VM state before writing to per-thread recorders.
 353  * 5. JavaThreads that are running in VM state do not need any lock and
 354  *    records are written to per-thread recorders.
 355  * 6. For a thread has yet to attach VM 'Thread', they need to take
 356  *    ThreadCritical to write to global recorder.
 357  *
 358  *    Important note:
 359  *    NO LOCK should be taken inside ThreadCritical lock !!!
 360  */
 361 void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
 362     size_t size, address pc, Thread* thread) {
 363   assert(addr != NULL, "Sanity check");
 364   if (!shutdown_in_progress()) {
 365     // single thread, we just write records direct to global recorder,'
 366     // with any lock
 367     if (_state == NMT_bootstrapping_single_thread) {
 368       assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
 369       thread = NULL;
 370     } else {
 371       if (thread == NULL) {
 372           // don't use Thread::current(), since it is possible that
 373           // the calling thread has yet to attach to VM 'Thread',
 374           // which will result assertion failure
 375           thread = ThreadLocalStorage::thread();
 376       }
 377     }
 378 
 379     if (thread != NULL) {
 380       // slow down all calling threads except NMT worker thread, so it
 381       // can catch up.
 382       if (_slowdown_calling_thread && thread != _worker_thread) {
 383         os::yield_all();
 384       }
 385 
 386       if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
 387         JavaThread*      java_thread = (JavaThread*)thread;
 388         JavaThreadState  state = java_thread->thread_state();
 389         if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
 390           // JavaThreads that are safepoint safe, can run through safepoint,
 391           // so ThreadCritical is needed to ensure no threads at safepoint create
 392           // new records while the records are being gathered and the sequence number is changing
 393           ThreadCritical tc;
 394           create_record_in_recorder(addr, flags, size, pc, java_thread);
 395         } else {
 396           create_record_in_recorder(addr, flags, size, pc, java_thread);
 397         }
 398       } else {
 399         // other threads, such as worker and watcher threads, etc. need to
 400         // take ThreadCritical to write to global recorder
 401         ThreadCritical tc;
 402         create_record_in_recorder(addr, flags, size, pc, NULL);
 403       }
 404     } else {
 405       if (_state == NMT_bootstrapping_single_thread) {
 406         // single thread, no lock needed
 407         create_record_in_recorder(addr, flags, size, pc, NULL);
 408       } else {
 409         // for thread has yet to attach VM 'Thread', we can not use VM mutex.
 410         // use native thread critical instead
 411         ThreadCritical tc;
 412         create_record_in_recorder(addr, flags, size, pc, NULL);
 413       }
 414     }
 415   }
 416 }
 417 
 418 // write a record to proper recorder. No lock can be taken from this method
 419 // down.
 420 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
 421     size_t size, address pc, JavaThread* thread) {
 422 
 423     MemRecorder* rc = get_thread_recorder(thread);
 424     if (rc != NULL) {
 425       rc->record(addr, flags, size, pc);
 426     }
 427 }
 428 
 429 /**
 430  * enqueue a recorder to pending queue
 431  */
 432 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
 433   assert(rec != NULL, "null recorder");
 434 
 435   // we are shutting down, so just delete it
 436   if (shutdown_in_progress()) {
 437     rec->set_next(NULL);
 438     delete rec;
 439     return;
 440   }
 441 
 442   MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 443   rec->set_next(cur_head);
 444   while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
 445     (void*)cur_head)) {
 446     cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
 447     rec->set_next(cur_head);
 448   }
 449   NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
 450 }
 451 
 452 /*
 453  * The method is called at global safepoint
 454  * during it synchronization process.
 455  *   1. enqueue all JavaThreads' per-thread recorders
 456  *   2. enqueue global recorder
 457  *   3. retrieve all pending recorders
 458  *   4. reset global sequence number generator
 459  *   5. call worker's sync
 460  */
 461 #define MAX_SAFEPOINTS_TO_SKIP     128
 462 #define SAFE_SEQUENCE_THRESHOLD    30
 463 #define HIGH_GENERATION_THRESHOLD  60
 464 #define MAX_RECORDER_THREAD_RATIO  30
 465 
 466 void MemTracker::sync() {
 467   assert(_tracking_level > NMT_off, "NMT is not enabled");
 468   assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
 469 
 470   // Some GC tests hit large number of safepoints in short period of time
 471   // without meaningful activities. We should prevent going to
 472   // sync point in these cases, which can potentially exhaust generation buffer.
 473   // Here is the factots to determine if we should go into sync point:
 474   // 1. not to overflow sequence number
 475   // 2. if we are in danger to overflow generation buffer
 476   // 3. how many safepoints we already skipped sync point
 477   if (_state == NMT_started) {
 478     // worker thread is not ready, no one can manage generation
 479     // buffer, so skip this safepoint
 480     if (_worker_thread == NULL) return;
 481 
 482     if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
 483       int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
 484       int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
 485       if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
 486         _sync_point_skip_count ++;
 487         return;
 488       }
 489     }
 490     _sync_point_skip_count = 0;
 491     {
 492       // This method is running at safepoint, with ThreadCritical lock,
 493       // it should guarantee that NMT is fully sync-ed.
 494       ThreadCritical tc;
 495 
 496       SequenceGenerator::reset();
 497 
 498       // walk all JavaThreads to collect recorders
 499       SyncThreadRecorderClosure stc;
 500       Threads::threads_do(&stc);
 501 
 502       _thread_count = stc.get_thread_count();
 503       MemRecorder* pending_recorders = get_pending_recorders();
 504 
 505       if (_global_recorder != NULL) {
 506         _global_recorder->set_next(pending_recorders);
 507         pending_recorders = _global_recorder;
 508         _global_recorder = NULL;
 509       }
 510 
 511       // see if NMT has too many outstanding recorder instances, it usually
 512       // means that worker thread is lagging behind in processing them.
 513       if (!AutoShutdownNMT) {
 514         _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
 515       }
 516 
 517       // check _worker_thread with lock to avoid racing condition
 518       if (_worker_thread != NULL) {
 519         _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
 520       }
 521 
 522       assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
 523     }
 524   }
 525 
 526   // now, it is the time to shut whole things off
 527   if (_state == NMT_final_shutdown) {
 528     // walk all JavaThreads to delete all recorders
 529     SyncThreadRecorderClosure stc;
 530     Threads::threads_do(&stc);
 531     // delete global recorder
 532     {
 533       ThreadCritical tc;
 534       if (_global_recorder != NULL) {
 535         delete _global_recorder;
 536         _global_recorder = NULL;
 537       }
 538     }
 539     MemRecorder* pending_recorders = get_pending_recorders();
 540     if (pending_recorders != NULL) {
 541       delete pending_recorders;
 542     }
 543     // try at a later sync point to ensure MemRecorder instance drops to zero to
 544     // completely shutdown NMT
 545     if (MemRecorder::_instance_count == 0) {
 546       _state = NMT_shutdown;
 547       _tracking_level = NMT_off;
 548     }
 549   }
 550 }
 551 
 552 /*
 553  * Start worker thread.
 554  */
 555 bool MemTracker::start_worker(MemSnapshot* snapshot) {
 556   assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
 557   _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
 558   if (_worker_thread == NULL) {
 559     return false;
 560   } else if (_worker_thread->has_error()) {
 561     delete _worker_thread;
 562     _worker_thread = NULL;
 563     return false;
 564   }
 565   _worker_thread->start();
 566   return true;
 567 }
 568 
 569 /*
 570  * We need to collect a JavaThread's per-thread recorder
 571  * before it exits.
 572  */
 573 void MemTracker::thread_exiting(JavaThread* thread) {
 574   if (is_on()) {
 575     MemRecorder* rec = thread->get_recorder();
 576     if (rec != NULL) {
 577       enqueue_pending_recorder(rec);
 578       thread->set_recorder(NULL);
 579     }
 580   }
 581 }
 582 
 583 // baseline current memory snapshot
 584 bool MemTracker::baseline() {
 585   MutexLocker lock(_query_lock);
 586   MemSnapshot* snapshot = get_snapshot();
 587   if (snapshot != NULL) {
 588     return _baseline.baseline(*snapshot, false);
 589   }
 590   return false;
 591 }
 592 
 593 // print memory usage from current snapshot
 594 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 595   MemBaseline  baseline;
 596   MutexLocker  lock(_query_lock);
 597   MemSnapshot* snapshot = get_snapshot();
 598   if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 599     BaselineReporter reporter(out, unit);
 600     reporter.report_baseline(baseline, summary_only);
 601     return true;
 602   }
 603   return false;
 604 }
 605 
 606 // Whitebox API for blocking until the current generation of NMT data has been merged
 607 bool MemTracker::wbtest_wait_for_data_merge() {
 608   // NMT can't be shutdown while we're holding _query_lock
 609   MutexLocker lock(_query_lock);
 610   assert(_worker_thread != NULL, "Invalid query");
 611   // the generation at query time, so NMT will spin till this generation is processed
 612   unsigned long generation_at_query_time = SequenceGenerator::current_generation();
 613   unsigned long current_processing_generation = _processing_generation;
 614   // if generation counter overflown
 615   bool generation_overflown = (generation_at_query_time < current_processing_generation);
 616   long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
 617   // spin
 618   while (!shutdown_in_progress()) {
 619     if (!generation_overflown) {
 620       if (current_processing_generation > generation_at_query_time) {
 621         return true;
 622       }
 623     } else {
 624       assert(generations_to_wrap >= 0, "Sanity check");
 625       long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
 626       assert(current_generations_to_wrap >= 0, "Sanity check");
 627       // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
 628       if (current_generations_to_wrap > generations_to_wrap &&
 629           current_processing_generation > generation_at_query_time) {
 630         return true;
 631       }
 632     }
 633 
 634     // if worker thread is idle, but generation is not advancing, that means
 635     // there is not safepoint to let NMT advance generation, force one.
 636     if (_worker_thread_idle) {
 637       VM_ForceSafepoint vfs;
 638       VMThread::execute(&vfs);
 639     }
 640     MemSnapshot* snapshot = get_snapshot();
 641     if (snapshot == NULL) {
 642       return false;
 643     }
 644     snapshot->wait(1000);
 645     current_processing_generation = _processing_generation;
 646   }
 647   // We end up here if NMT is shutting down before our data has been merged
 648   return false;
 649 }
 650 
 651 // compare memory usage between current snapshot and baseline
 652 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
 653   MutexLocker lock(_query_lock);
 654   if (_baseline.baselined()) {
 655     MemBaseline baseline;
 656     MemSnapshot* snapshot = get_snapshot();
 657     if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
 658       BaselineReporter reporter(out, unit);
 659       reporter.diff_baselines(baseline, _baseline, summary_only);
 660       return true;
 661     }
 662   }
 663   return false;
 664 }
 665 
 666 #ifndef PRODUCT
 667 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
 668   int cur_len = 0;
 669   char tmp[1024];
 670   address pc;
 671 
 672   while (cur_len < len) {
 673     pc = os::get_caller_pc(toSkip + 1);
 674     if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
 675       jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
 676       cur_len = (int)strlen(buf);
 677     } else {
 678       buf[cur_len] = '\0';
 679       break;
 680     }
 681     toSkip ++;
 682   }
 683 }
 684 
 685 void MemTracker::print_tracker_stats(outputStream* st) {
 686   st->print_cr("\nMemory Tracker Stats:");
 687   st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
 688   st->print_cr("\tthead count = %d", _thread_count);
 689   st->print_cr("\tArena instance = %d", Arena::_instance_count);
 690   st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
 691   st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
 692   st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
 693   if (_worker_thread != NULL) {
 694     st->print_cr("\tWorker thread:");
 695     st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
 696     st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
 697     st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
 698   } else {
 699     st->print_cr("\tWorker thread is not started");
 700   }
 701   st->print_cr(" ");
 702 
 703   if (_snapshot != NULL) {
 704     _snapshot->print_snapshot_stats(st);
 705   } else {
 706     st->print_cr("No snapshot");
 707   }
 708 }
 709 #endif
 710