src/share/vm/services/memTracker.cpp

Print this page
rev 3953 : JDK-8005012: Add WB APIs to better support NMT testing
Summary:

*** 27,36 **** --- 27,37 ---- #include "runtime/atomic.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" #include "runtime/threadCritical.hpp" + #include "runtime/vm_operations.hpp" #include "services/memPtr.hpp" #include "services/memReporter.hpp" #include "services/memTracker.hpp" #include "utilities/decoder.hpp" #include "utilities/globalDefinitions.hpp"
*** 63,72 **** --- 64,75 ---- MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; int MemTracker::_thread_count = 255; volatile jint MemTracker::_pooled_recorder_count = 0; + volatile unsigned long MemTracker::_processing_generation = 0; + volatile bool MemTracker::_worker_thread_idle = false; debug_only(intx MemTracker::_main_thread_tid = 0;) NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) void MemTracker::init_tracking_options(const char* option_line) { _tracking_level = NMT_off;
*** 277,287 **** (void*)cur_head)) { return get_new_or_pooled_instance(); } cur_head->set_next(NULL); Atomic::dec(&_pooled_recorder_count); ! debug_only(cur_head->set_generation();) return cur_head; } } /* --- 280,290 ---- (void*)cur_head)) { return get_new_or_pooled_instance(); } cur_head->set_next(NULL); Atomic::dec(&_pooled_recorder_count); ! cur_head->set_generation(); return cur_head; } } /*
*** 568,577 **** --- 571,624 ---- return true; } return false; } + // Whitebox API for blocking until the current generation of NMT data has been merged + bool MemTracker::wbtest_wait_for_data_merge() { + MutexLockerEx lock(_query_lock, true); + assert(_worker_thread != NULL, "Invalid query"); + // the generation at query time, so NMT will spin till this generation is processed + unsigned long generation_at_query_time = SequenceGenerator::current_generation(); + unsigned long current_processing_generation = _processing_generation; + // if generation counter overflown + bool generation_overflown = (generation_at_query_time < current_processing_generation); + long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; + // spin + while (!shutdown_in_progress()) { + if (!generation_overflown) { + if (current_processing_generation > generation_at_query_time) { + return true; + } + } else { + assert(generations_to_wrap >= 0, "Sanity check"); + long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; + assert(current_generations_to_wrap >= 0, "Sanity check"); + // to overflow an unsigned long should take long time, so to_wrap check should be sufficient + if (current_generations_to_wrap > generations_to_wrap && + current_processing_generation > generation_at_query_time) { + return true; + } + } + + // if worker thread is idle, but generation is not advancing, that means + // there is not safepoint to let NMT advance generation, force one. + if (_worker_thread_idle) { + VM_ForceSafepoint vfs; + VMThread::execute(&vfs); + } + MemSnapshot* snapshot = get_snapshot(); + if (snapshot == NULL) { + return false; + } + snapshot->wait(1000); + current_processing_generation = _processing_generation; + } + // We end up here if NMT is shutting down before our data has been merged + return false; + } + // compare memory usage between current snapshot and baseline bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { MutexLockerEx lock(_query_lock, true); if (_baseline.baselined()) { MemBaseline baseline;