< prev index next >

src/share/vm/memory/sharedHeap.cpp

Print this page
rev 7854 : imported patch 8027962-per-phase-timing-measurements-for-strong-roots-processing

*** 36,62 **** #include "utilities/copy.hpp" #include "utilities/workgroup.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC ! SharedHeap* SharedHeap::_sh; ! ! // The set of potentially parallel tasks in root scanning. ! enum SH_process_roots_tasks { ! SH_PS_Universe_oops_do, ! SH_PS_JNIHandles_oops_do, ! SH_PS_ObjectSynchronizer_oops_do, ! SH_PS_FlatProfiler_oops_do, ! SH_PS_Management_oops_do, ! SH_PS_SystemDictionary_oops_do, ! SH_PS_ClassLoaderDataGraph_oops_do, ! SH_PS_jvmti_oops_do, ! SH_PS_CodeCache_oops_do, ! // Leave this one last. ! SH_PS_NumElements }; SharedHeap::SharedHeap(CollectorPolicy* policy_) : CollectedHeap(), _collector_policy(policy_), _strong_roots_scope(NULL), _strong_roots_parity(0), --- 36,66 ---- #include "utilities/copy.hpp" #include "utilities/workgroup.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC ! static const char* ext_roots_task_strings[SharedHeap::SH_PS_NumElements] = { ! "Thread Roots (ms)", ! "StringTable Roots (ms)", ! "Universe Roots (ms)", ! "JNI Handles Roots (ms)", ! "ObjectSynchronizer Roots (ms)", ! "FlatProfiler Roots (ms)", ! "Management Roots (ms)", ! "SystemDictionary Roots (ms)", ! "CLDG Roots (ms)", ! "JVMTI Roots (ms)", ! "CodeCache Roots (ms)" }; + const char* SharedHeap::ext_roots_task_str(uint task) { + vmassert(task < ARRAY_SIZE(ext_roots_task_strings), "must be"); + return ext_roots_task_strings[task]; + } + + SharedHeap* SharedHeap::_sh; + SharedHeap::SharedHeap(CollectorPolicy* policy_) : CollectedHeap(), _collector_policy(policy_), _strong_roots_scope(NULL), _strong_roots_parity(0),
*** 183,193 **** ScanningOption so, OopClosure* strong_roots, OopClosure* weak_roots, CLDClosure* strong_cld_closure, CLDClosure* weak_cld_closure, ! CodeBlobClosure* code_roots) { StrongRootsScope srs(this, activate_scope); // General roots. assert(_strong_roots_parity != 0, "must have called prologue code"); assert(code_roots != NULL, "code root closure should always be set"); --- 187,198 ---- ScanningOption so, OopClosure* strong_roots, OopClosure* weak_roots, CLDClosure* strong_cld_closure, CLDClosure* weak_cld_closure, ! CodeBlobClosure* code_roots, ! GCPhaseTimeTracker* phase_durations) { StrongRootsScope srs(this, activate_scope); // General roots. assert(_strong_roots_parity != 0, "must have called prologue code"); assert(code_roots != NULL, "code root closure should always be set");
*** 198,253 **** // Iterating over the CLDG and the Threads are done early to allow G1 to // first process the strong CLDs and nmethods and then, after a barrier, // let the thread process the weak CLDs and nmethods. if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); } // Some CLDs contained in the thread frames should be considered strong. // Don't process them if they will be processed during the ClassLoaderDataGraph phase. CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); // This is the point where this worker thread will not find more strong CLDs/nmethods. // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing. active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads()); if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { Universe::oops_do(strong_roots); } // Global (strong) JNI handles if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) JNIHandles::oops_do(strong_roots); if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) ObjectSynchronizer::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) FlatProfiler::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) Management::oops_do(strong_roots); if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) JvmtiExport::oops_do(strong_roots); ! if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { SystemDictionary::roots_oops_do(strong_roots, weak_roots); } // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. if (weak_roots != NULL) { if (CollectedHeap::use_parallel_gc_threads()) { StringTable::possibly_parallel_oops_do(weak_roots); } else { StringTable::oops_do(weak_roots); } } if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { if (so & SO_ScavengeCodeCache) { assert(code_roots != NULL, "must supply closure for code cache"); // We only visit parts of the CodeCache when scavenging. --- 203,289 ---- // Iterating over the CLDG and the Threads are done early to allow G1 to // first process the strong CLDs and nmethods and then, after a barrier, // let the thread process the weak CLDs and nmethods. + { + TrackPhaseTime x(phase_durations, SH_PS_ClassLoaderDataGraph_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); } + } // Some CLDs contained in the thread frames should be considered strong. // Don't process them if they will be processed during the ClassLoaderDataGraph phase. CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; + { + TrackPhaseTime x(phase_durations, SH_PS_Threads_oops_do); Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); + } // This is the point where this worker thread will not find more strong CLDs/nmethods. // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing. active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads()); + { + TrackPhaseTime x(phase_durations, SH_PS_Universe_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { Universe::oops_do(strong_roots); } + } + { + TrackPhaseTime x(phase_durations, SH_PS_JNIHandles_oops_do); // Global (strong) JNI handles if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) JNIHandles::oops_do(strong_roots); + } + { + TrackPhaseTime x(phase_durations, SH_PS_ObjectSynchronizer_oops_do); if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) ObjectSynchronizer::oops_do(strong_roots); + } + { + TrackPhaseTime x(phase_durations, SH_PS_FlatProfiler_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) FlatProfiler::oops_do(strong_roots); + } + { + TrackPhaseTime x(phase_durations, SH_PS_Management_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) Management::oops_do(strong_roots); + } + { + TrackPhaseTime x(phase_durations, SH_PS_jvmti_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) JvmtiExport::oops_do(strong_roots); ! } ! { ! TrackPhaseTime x(phase_durations, SH_PS_SystemDictionary_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { SystemDictionary::roots_oops_do(strong_roots, weak_roots); } + } + { + TrackPhaseTime x(phase_durations, SH_PS_StringTable_oops_do); // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. if (weak_roots != NULL) { if (CollectedHeap::use_parallel_gc_threads()) { StringTable::possibly_parallel_oops_do(weak_roots); } else { StringTable::oops_do(weak_roots); } } + } + { + TrackPhaseTime x(phase_durations, SH_PS_CodeCache_oops_do); if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { if (so & SO_ScavengeCodeCache) { assert(code_roots != NULL, "must supply closure for code cache"); // We only visit parts of the CodeCache when scavenging.
*** 263,296 **** // Verify that the code cache contents are not subject to // movement by a scavenging collection. DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } _process_strong_tasks->all_tasks_completed(); } void SharedHeap::process_all_roots(bool activate_scope, ScanningOption so, OopClosure* roots, CLDClosure* cld_closure, ! CodeBlobClosure* code_closure) { process_roots(activate_scope, so, roots, roots, cld_closure, cld_closure, ! code_closure); } void SharedHeap::process_strong_roots(bool activate_scope, ScanningOption so, OopClosure* roots, CLDClosure* cld_closure, ! CodeBlobClosure* code_closure) { process_roots(activate_scope, so, roots, NULL, cld_closure, NULL, ! code_closure); } class AlwaysTrueClosure: public BoolObjectClosure { public: --- 299,337 ---- // Verify that the code cache contents are not subject to // movement by a scavenging collection. DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } + } _process_strong_tasks->all_tasks_completed(); } void SharedHeap::process_all_roots(bool activate_scope, ScanningOption so, OopClosure* roots, CLDClosure* cld_closure, ! CodeBlobClosure* code_closure, ! GCPhaseTimeTracker* phase_durations) { process_roots(activate_scope, so, roots, roots, cld_closure, cld_closure, ! code_closure, ! phase_durations); } void SharedHeap::process_strong_roots(bool activate_scope, ScanningOption so, OopClosure* roots, CLDClosure* cld_closure, ! CodeBlobClosure* code_closure, ! GCPhaseTimeTracker* phase_durations) { process_roots(activate_scope, so, roots, NULL, cld_closure, NULL, ! code_closure, ! phase_durations); } class AlwaysTrueClosure: public BoolObjectClosure { public:
< prev index next >