--- old/src/share/tools/whitebox/sun/hotspot/WhiteBox.java 2012-12-17 16:19:08.092569200 +0100 +++ new/src/share/tools/whitebox/sun/hotspot/WhiteBox.java 2012-12-17 16:19:07.780568600 +0100 @@ -76,4 +76,9 @@ public native long g1NumFreeRegions(); public native int g1RegionSize(); public native Object[] parseCommandLine(String commandline, DiagnosticCommand[] args); + + // NMT + public native boolean NMTAllocTest(); + public native boolean NMTFreeTestMemory(); + public native boolean NMTWaitForDataMerge(); } --- old/src/share/vm/memory/allocation.hpp 2012-12-17 16:19:10.338973100 +0100 +++ new/src/share/vm/memory/allocation.hpp 2012-12-17 16:19:10.089372700 +0100 @@ -145,9 +145,10 @@ mtChunk = 0x0B00, // chunk that holds content of arenas mtJavaHeap = 0x0C00, // Java heap mtClassShared = 0x0D00, // class data sharing - mt_number_of_types = 0x000D, // number of memory types (mtDontTrack + mtTest = 0x0E00, // Test type for verifying NMT + mt_number_of_types = 0x000E, // number of memory types (mtDontTrack // is not included as validate type) - mtDontTrack = 0x0E00, // memory we do not or cannot track + mtDontTrack = 0x0F00, // memory we do not or cannot track mt_masks = 0x7F00, // object type mask --- old/src/share/vm/prims/whitebox.cpp 2012-12-17 16:19:12.585377100 +0100 +++ new/src/share/vm/prims/whitebox.cpp 2012-12-17 16:19:12.320176600 +0100 @@ -43,6 +43,10 @@ #include "gc_implementation/g1/heapRegionRemSet.hpp" #endif // !SERIALGC +#ifdef INCLUDE_NMT +#include "services/memTracker.hpp" +#endif // INCLUDE_NMT + bool WhiteBox::_used = false; WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj)) @@ -110,6 +114,60 @@ WB_END #endif // !SERIALGC +#ifdef INCLUDE_NMT +// Keep track of the 3 allocations in NMTAllocTest so we can free them later +// on and verify that they're not visible anymore +static void* nmtMtTest1 = NULL, *nmtMtTest2 = NULL, *nmtMtTest3 = NULL; + +// Alloc memory using the test memory type so that we can use that to see if +// NMT picks it up correctly +WB_ENTRY(jboolean, WB_NMTAllocTest(JNIEnv* env)) + void *mem; + + if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) { + return false; + } + + // Allocate 2 * 128k + 256k + 1024k and free the 1024k one to make sure we track + // everything correctly. Total should be 512k held alive. + nmtMtTest1 = os::malloc(128 * 1024, mtTest); + mem = os::malloc(1024 * 1024, mtTest); + nmtMtTest2 = os::malloc(256 * 1024, mtTest); + os::free(mem, mtTest); + nmtMtTest3 = os::malloc(128 * 1024, mtTest); + + return true; +WB_END + +// Free the memory allocated by NMTAllocTest +WB_ENTRY(jboolean, WB_NMTFreeTestMemory(JNIEnv* env)) + + if (nmtMtTest1 == NULL || nmtMtTest2 == NULL || nmtMtTest3 == NULL) { + return false; + } + + os::free(nmtMtTest1, mtTest); + nmtMtTest1 = NULL; + os::free(nmtMtTest2, mtTest); + nmtMtTest2 = NULL; + os::free(nmtMtTest3, mtTest); + nmtMtTest3 = NULL; + + return true; +WB_END + +// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature +WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env)) + + if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) { + return false; + } + + return MemTracker::wbtest_wait_for_data_merge(); +WB_END + +#endif // INCLUDE_NMT + //Some convenience methods to deal with objects from java int WhiteBox::offset_for_field(const char* field_name, oop object, Symbol* signature_symbol) { @@ -177,6 +235,11 @@ {CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions }, {CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize }, #endif // !SERIALGC +#ifdef INCLUDE_NMT + {CC"NMTAllocTest", CC"()Z", (void*)&WB_NMTAllocTest }, + {CC"NMTFreeTestMemory", CC"()Z", (void*)&WB_NMTFreeTestMemory }, + {CC"NMTWaitForDataMerge",CC"()Z", (void*)&WB_NMTWaitForDataMerge}, +#endif // INCLUDE_NMT }; #undef CC --- old/src/share/vm/services/memBaseline.cpp 2012-12-17 16:19:14.816181000 +0100 +++ new/src/share/vm/services/memBaseline.cpp 2012-12-17 16:19:14.535380500 +0100 @@ -41,6 +41,7 @@ {mtNMT, "Memory Tracking"}, {mtChunk, "Pooled Free Chunks"}, {mtClassShared,"Shared spaces for classes"}, + {mtTest, "Test"}, {mtNone, "Unknown"} // It can happen when type tagging records are lagging // behind }; --- old/src/share/vm/services/memPtr.cpp 2012-12-17 16:19:17.062584900 +0100 +++ new/src/share/vm/services/memPtr.cpp 2012-12-17 16:19:16.797384500 +0100 @@ -27,8 +27,8 @@ #include "services/memTracker.hpp" volatile jint SequenceGenerator::_seq_number = 1; +volatile unsigned long SequenceGenerator::_generation = 1; NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;) -DEBUG_ONLY(volatile unsigned long SequenceGenerator::_generation = 0;) jint SequenceGenerator::next() { jint seq = Atomic::add(1, &_seq_number); --- old/src/share/vm/services/memPtr.hpp 2012-12-17 16:19:19.246588800 +0100 +++ new/src/share/vm/services/memPtr.hpp 2012-12-17 16:19:18.981388300 +0100 @@ -47,16 +47,16 @@ static void reset() { assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); _seq_number = 1; - DEBUG_ONLY(_generation ++;) + _generation ++; }; - DEBUG_ONLY(static unsigned long current_generation() { return (unsigned long)_generation; }) + static unsigned long current_generation() { return _generation; } NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; }) private: - static volatile jint _seq_number; - NOT_PRODUCT(static jint _max_seq_number; ) - DEBUG_ONLY(static volatile unsigned long _generation; ) + static volatile jint _seq_number; + static volatile unsigned long _generation; + NOT_PRODUCT(static jint _max_seq_number; ) }; /* --- old/src/share/vm/services/memRecorder.cpp 2012-12-17 16:19:21.508592800 +0100 +++ new/src/share/vm/services/memRecorder.cpp 2012-12-17 16:19:21.243392300 +0100 @@ -55,7 +55,7 @@ MemRecorder::MemRecorder() { assert(MemTracker::is_on(), "Native memory tracking is off"); Atomic::inc(&_instance_count); - debug_only(set_generation();) + set_generation(); if (MemTracker::track_callsite()) { _pointer_records = new (std::nothrow)FixedSizeMemPointerArrayget_generation() != processing_generation || worker_idle) { + processing_generation = rec->get_generation(); + worker_idle = false; + MemTracker::set_current_processing_generation(processing_generation); + } + // merge the recorder into staging area if (!snapshot->merge(rec)) { MemTracker::shutdown(MemTracker::NMT_out_of_memory); @@ -123,6 +131,9 @@ MemTracker::shutdown(MemTracker::NMT_out_of_memory); } } else { + // worker thread is idle + worker_idle = true; + MemTracker::report_worker_idle(); snapshot->wait(1000); ThreadCritical tc; // check if more data arrived --- old/src/share/vm/services/memTrackWorker.hpp 2012-12-17 16:19:30.369408300 +0100 +++ new/src/share/vm/services/memTrackWorker.hpp 2012-12-17 16:19:30.104207900 +0100 @@ -66,6 +66,7 @@ NOT_PRODUCT(int _merge_count;) NOT_PRODUCT(int _last_gen_in_use;) + // how many generations are queued inline int generations_in_use() const { return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1)); } --- old/src/share/vm/services/memTracker.cpp 2012-12-17 16:19:32.569012200 +0100 +++ new/src/share/vm/services/memTracker.cpp 2012-12-17 16:19:32.303811700 +0100 @@ -28,6 +28,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" #include "runtime/threadCritical.hpp" +#include "runtime/vm_operations.hpp" #include "services/memPtr.hpp" #include "services/memReporter.hpp" #include "services/memTracker.hpp" @@ -64,6 +65,8 @@ MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; int MemTracker::_thread_count = 255; volatile jint MemTracker::_pooled_recorder_count = 0; +volatile unsigned long MemTracker::_processing_generation = 0; +volatile bool MemTracker::_worker_thread_idle = false; debug_only(intx MemTracker::_main_thread_tid = 0;) NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) @@ -175,8 +178,9 @@ MutexLockerEx locker(_query_lock, true); // cleanup baseline data and snapshot _baseline.clear(); - delete _snapshot; + MemSnapshot* the_snapshot = _snapshot; _snapshot = NULL; + delete the_snapshot; } // shutdown shared decoder instance, since it is only @@ -278,7 +282,7 @@ } cur_head->set_next(NULL); Atomic::dec(&_pooled_recorder_count); - debug_only(cur_head->set_generation();) + cur_head->set_generation(); return cur_head; } } @@ -569,6 +573,49 @@ return false; } +// Whitebox API for blocking until the current generation of NMT data has been merged +bool MemTracker::wbtest_wait_for_data_merge() { + MutexLockerEx lock(_query_lock, true); + assert(_worker_thread != NULL, "Invalid query"); + // the generation at query time, so NMT will spin till this generation is processed + unsigned long generation_at_query_time = SequenceGenerator::current_generation(); + unsigned long current_processing_generation = _processing_generation; + // if generation counter overflown + bool generation_overflown = (generation_at_query_time < current_processing_generation); + long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; + // spin + while (!shutdown_in_progress()) { + if (!generation_overflown) { + if (current_processing_generation > generation_at_query_time) { + break; + } + } else { + assert(generations_to_wrap >= 0, "Sanity check"); + long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; + assert(current_generations_to_wrap >= 0, "Sanity check"); + // to overflow an unsigned long should take long time, so to_wrap check should be sufficient + if (current_generations_to_wrap > generations_to_wrap && + current_processing_generation > generation_at_query_time) { + break; + } + } + + // if worker thread is idle, but generation is not advancing, that means + // there is not safepoint to let NMT advance generation, force one. + if (_worker_thread_idle) { + VM_ForceSafepoint vfs; + VMThread::execute(&vfs); + } + MemSnapshot* snapshot = get_snapshot(); + if (snapshot == NULL) { + return false; + } + snapshot->wait(1000); + current_processing_generation = _processing_generation; + } + return true; +} + // compare memory usage between current snapshot and baseline bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { MutexLockerEx lock(_query_lock, true); --- old/src/share/vm/services/memTracker.hpp 2012-12-17 16:19:34.893416300 +0100 +++ new/src/share/vm/services/memTracker.hpp 2012-12-17 16:19:34.597015700 +0100 @@ -91,9 +91,10 @@ static bool compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only = true) { } + static bool wbtest_wait_for_data_merge() { } + static inline void sync() { } static inline void thread_exiting(JavaThread* thread) { } - }; @@ -111,6 +112,10 @@ extern bool NMT_track_callsite; +#ifndef MAX_UNSIGNED_LONG +#define MAX_UNSIGNED_LONG (unsigned long)(-1) +#endif + #ifdef ASSERT #define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) #else @@ -379,6 +384,11 @@ static bool compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only = true); + // the version for whitebox testing support, it ensures that all memory + // activities before this method call, are reflected in the snapshot + // database. + static bool wbtest_wait_for_data_merge(); + // sync is called within global safepoint to synchronize nmt data static void sync(); @@ -431,6 +441,15 @@ static void create_record_in_recorder(address addr, MEMFLAGS type, size_t size, address pc, JavaThread* thread); + static void set_current_processing_generation(unsigned long generation) { + _worker_thread_idle = false; + _processing_generation = generation; + } + + static void report_worker_idle() { + _worker_thread_idle = true; + } + private: // global memory snapshot static MemSnapshot* _snapshot; @@ -482,6 +501,11 @@ static volatile enum NMTStates _state; // the reason for shutting down nmt static enum ShutdownReason _reason; + // the generation that NMT is processing + static volatile unsigned long _processing_generation; + // although NMT is still procesing current generation, but + // there is not more recorder to process, set idle state + static volatile bool _worker_thread_idle; }; #endif // !INCLUDE_NMT