src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>

*** 35,46 **** --- 35,50 ---- #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/shared/collectorCounters.hpp" + #include "gc_implementation/shared/gcTimer.hpp" + #include "gc_implementation/shared/gcTrace.hpp" + #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_interface/collectedHeap.inline.hpp" + #include "memory/allocation.hpp" #include "memory/cardTableRS.hpp" #include "memory/collectorPolicy.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/genMarkSweep.hpp"
*** 59,68 **** --- 63,73 ---- #include "services/runtimeService.hpp" // statics CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; bool CMSCollector::_full_gc_requested = false; + GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc; ////////////////////////////////////////////////////////////////// // In support of CMS/VM thread synchronization ////////////////////////////////////////////////////////////////// // We split use of the CGC_lock into 2 "levels".
*** 589,599 **** _collector_policy(cp), _should_unload_classes(false), _concurrent_cycles_since_last_unload(0), _roots_scanning_options(0), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), ! _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { ExplicitGCInvokesConcurrent = true; } // Now expand the span and allocate the collection support structures --- 594,607 ---- _collector_policy(cp), _should_unload_classes(false), _concurrent_cycles_since_last_unload(0), _roots_scanning_options(0), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), ! _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), ! _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), ! _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), ! _cms_start_registered(false) { if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { ExplicitGCInvokesConcurrent = true; } // Now expand the span and allocate the collection support structures
*** 1674,1695 **** } acquire_control_and_collect(full, clear_all_soft_refs); _full_gcs_since_conc_gc++; } ! void CMSCollector::request_full_gc(unsigned int full_gc_count) { GenCollectedHeap* gch = GenCollectedHeap::heap(); unsigned int gc_count = gch->total_full_collections(); if (gc_count == full_gc_count) { MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; CGC_lock->notify(); // nudge CMS thread } else { assert(gc_count > full_gc_count, "Error: causal loop"); } } // The foreground and background collectors need to coordinate in order // to make sure that they do not mutually interfere with CMS collections. // When a background collection is active, // the foreground collector may need to take over (preempt) and --- 1682,1723 ---- } acquire_control_and_collect(full, clear_all_soft_refs); _full_gcs_since_conc_gc++; } ! void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { GenCollectedHeap* gch = GenCollectedHeap::heap(); unsigned int gc_count = gch->total_full_collections(); if (gc_count == full_gc_count) { MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; + _full_gc_cause = cause; CGC_lock->notify(); // nudge CMS thread } else { assert(gc_count > full_gc_count, "Error: causal loop"); } } + bool CMSCollector::is_external_interruption() { + GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); + return GCCause::is_user_requested_gc(cause) || + GCCause::is_serviceability_requested_gc(cause); + } + + void CMSCollector::report_concurrent_mode_interruption() { + if (is_external_interruption()) { + if (PrintGCDetails) { + gclog_or_tty->print(" (concurrent mode interrupted)"); + } + } else { + if (PrintGCDetails) { + gclog_or_tty->print(" (concurrent mode failure)"); + } + _gc_tracer_cm->report_concurrent_mode_failure(); + } + } + // The foreground and background collectors need to coordinate in order // to make sure that they do not mutually interfere with CMS collections. // When a background collection is active, // the foreground collector may need to take over (preempt) and
*** 1843,1860 **** should_compact = false; } } ) ! if (PrintGCDetails && first_state > Idling) { ! GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); ! if (GCCause::is_user_requested_gc(cause) || ! GCCause::is_serviceability_requested_gc(cause)) { ! gclog_or_tty->print(" (concurrent mode interrupted)"); ! } else { ! gclog_or_tty->print(" (concurrent mode failure)"); ! } } set_did_compact(should_compact); if (should_compact) { // If the collection is being acquired from the background --- 1871,1882 ---- should_compact = false; } } ) ! if (first_state > Idling) { ! report_concurrent_mode_interruption(); } set_did_compact(should_compact); if (should_compact) { // If the collection is being acquired from the background
*** 1866,1875 **** --- 1888,1901 ---- // Scrub the list of those references because Mark-Sweep-Compact // code assumes referents are not NULL and that all discovered // Reference objects are active. ref_processor()->clean_up_discovered_references(); + if (first_state > Idling) { + save_heap_summary(); + } + do_compaction_work(clear_all_soft_refs); // Has the GC time limit been exceeded? DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); size_t max_eden_size = young_gen->max_capacity() -
*** 1969,1979 **** // A work method used by the foreground collector to do // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { GenCollectedHeap* gch = GenCollectedHeap::heap(); ! TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " "collections passed to foreground collector", _full_gcs_since_conc_gc); } --- 1995,2012 ---- // A work method used by the foreground collector to do // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { GenCollectedHeap* gch = GenCollectedHeap::heap(); ! ! STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); ! gc_timer->register_gc_start(os::elapsed_counter()); ! ! SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); ! gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); ! ! GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " "collections passed to foreground collector", _full_gcs_since_conc_gc); }
*** 2060,2069 **** --- 2093,2106 ---- // Sample collection pause time and reset for collection interval. if (UseAdaptiveSizePolicy) { size_policy()->msc_collection_end(gch->gc_cause()); } + gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); + // For a mark-sweep-compact, compute_new_size() will be called // in the heap's do_collection() method. } // A work method used by the foreground collector to do
*** 2091,2101 **** // In the foreground case don't do the precleaning since // it is not done concurrently and there is extra work // required. _collectorState = FinalMarking; } ! collect_in_foreground(clear_all_soft_refs); // For a mark-sweep, compute_new_size() will be called // in the heap's do_collection() method. } --- 2128,2138 ---- // In the foreground case don't do the precleaning since // it is not done concurrently and there is extra work // required. _collectorState = FinalMarking; } ! collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause()); // For a mark-sweep, compute_new_size() will be called // in the heap's do_collection() method. }
*** 2151,2161 **** // the different locking requirements of the background collector and the // foreground collector. There was originally an attempt to share // one "collect" method between the background collector and the foreground // collector but the if-then-else required made it cleaner to have // separate methods. ! void CMSCollector::collect_in_background(bool clear_all_soft_refs) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); GenCollectedHeap* gch = GenCollectedHeap::heap(); { --- 2188,2198 ---- // the different locking requirements of the background collector and the // foreground collector. There was originally an attempt to share // one "collect" method between the background collector and the foreground // collector but the if-then-else required made it cleaner to have // separate methods. ! void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); GenCollectedHeap* gch = GenCollectedHeap::heap(); {
*** 2170,2179 **** --- 2207,2217 ---- assert(!_foregroundGCShouldWait, "Should be clear"); return; } else { assert(_collectorState == Idling, "Should be idling before start."); _collectorState = InitialMarking; + register_gc_start(cause); // Reset the expansion cause, now that we are about to begin // a new cycle. clear_expansion_cause(); // Clear the MetaspaceGC flag since a concurrent collection
*** 2182,2191 **** --- 2220,2230 ---- } // Decide if we want to enable class unloading as part of the // ensuing concurrent GC cycle. update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests + _full_gc_cause = GCCause::_no_gc; // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); }
*** 2261,2271 **** switch (_collectorState) { case InitialMarking: { ReleaseForegroundGC x(this); stats().record_cms_begin(); - VM_CMS_Initial_Mark initial_mark_op(this); VMThread::execute(&initial_mark_op); } // The collector state may be any legal state at this point // since the background collector may have yielded to the --- 2300,2309 ----
*** 2341,2350 **** --- 2379,2389 ---- ReleaseForegroundGC x(this); // unblock FG collection MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag); CMSTokenSync z(true); // not strictly needed. if (_collectorState == Resizing) { compute_new_size(); + save_heap_summary(); _collectorState = Resetting; } else { assert(_collectorState == Idling, "The state should only change" " because the foreground collector has finished the collection"); }
*** 2399,2418 **** if (PrintGC && Verbose) { _cmsGen->print_heap_change(prev_used); } } ! void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { assert(_foregroundGCIsActive && !_foregroundGCShouldWait, "Foreground collector should be waiting, not executing"); assert(Thread::current()->is_VM_thread(), "A foreground collection" "may only be done by the VM Thread with the world stopped"); assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); ! NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, ! true, gclog_or_tty);) if (UseAdaptiveSizePolicy) { size_policy()->ms_collection_begin(); } COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); --- 2438,2489 ---- if (PrintGC && Verbose) { _cmsGen->print_heap_change(prev_used); } } ! void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) { ! if (!_cms_start_registered) { ! register_gc_start(cause); ! } ! } ! ! void CMSCollector::register_gc_start(GCCause::Cause cause) { ! _cms_start_registered = true; ! _gc_timer_cm->register_gc_start(os::elapsed_counter()); ! _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start()); ! } ! ! void CMSCollector::register_gc_end() { ! if (_cms_start_registered) { ! report_heap_summary(GCWhen::AfterGC); ! ! _gc_timer_cm->register_gc_end(os::elapsed_counter()); ! _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); ! _cms_start_registered = false; ! } ! } ! ! void CMSCollector::save_heap_summary() { ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _last_heap_summary = gch->create_heap_summary(); ! _last_metaspace_summary = gch->create_metaspace_summary(); ! } ! ! void CMSCollector::report_heap_summary(GCWhen::Type when) { ! _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary); ! } ! ! void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) { assert(_foregroundGCIsActive && !_foregroundGCShouldWait, "Foreground collector should be waiting, not executing"); assert(Thread::current()->is_VM_thread(), "A foreground collection" "may only be done by the VM Thread with the world stopped"); assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); ! NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, ! true, NULL);) if (UseAdaptiveSizePolicy) { size_policy()->ms_collection_begin(); } COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
*** 2432,2441 **** --- 2503,2513 ---- gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", Thread::current(), _collectorState); } switch (_collectorState) { case InitialMarking: + register_foreground_gc_start(cause); init_mark_was_synchronous = true; // fact to be exploited in re-mark checkpointRootsInitial(false); assert(_collectorState == Marking, "Collector state should have changed" " within checkpointRootsInitial()"); break;
*** 2480,2489 **** --- 2552,2562 ---- // The heap has been resized. if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { Universe::verify("Verify before reset: "); } + save_heap_summary(); reset(false); assert(_collectorState == Idling, "Collector state should " "have changed"); break; case Precleaning:
*** 3502,3511 **** --- 3575,3587 ---- void CMSCollector::checkpointRootsInitial(bool asynch) { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); + save_heap_summary(); + report_heap_summary(GCWhen::BeforeGC); + ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); if (asynch) { // acquire locks for subsequent manipulations
*** 3547,3558 **** // Setup the verification and class unloading state for this // CMS collection cycle. setup_cms_unloading_and_verification_state(); ! NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", ! PrintGCDetails && Verbose, true, gclog_or_tty);) if (UseAdaptiveSizePolicy) { size_policy()->checkpoint_roots_initial_begin(); } // Reset all the PLAB chunk arrays if necessary. --- 3623,3634 ---- // Setup the verification and class unloading state for this // CMS collection cycle. setup_cms_unloading_and_verification_state(); ! NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", ! PrintGCDetails && Verbose, true, _gc_timer_cm);) if (UseAdaptiveSizePolicy) { size_policy()->checkpoint_roots_initial_begin(); } // Reset all the PLAB chunk arrays if necessary.
*** 4540,4551 **** // The following will yield to allow foreground // collection to proceed promptly. XXX YSR: // The code in this method may need further // tweaking for better performance and some restructuring // for cleaner interfaces. rp->preclean_discovered_references( ! rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl); } if (clean_survivor) { // preclean the active survivor space(s) assert(_young_gen->kind() == Generation::DefNew || _young_gen->kind() == Generation::ParNew || --- 4616,4629 ---- // The following will yield to allow foreground // collection to proceed promptly. XXX YSR: // The code in this method may need further // tweaking for better performance and some restructuring // for cleaner interfaces. + GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases rp->preclean_discovered_references( ! rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, ! gc_timer); } if (clean_survivor) { // preclean the active survivor space(s) assert(_young_gen->kind() == Generation::DefNew || _young_gen->kind() == Generation::ParNew ||
*** 4883,4894 **** if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); ! NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", ! PrintGCDetails && Verbose, true, gclog_or_tty);) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size --- 4961,4972 ---- if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); ! NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", ! PrintGCDetails && Verbose, true, _gc_timer_cm);) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size
*** 4913,4923 **** } void CMSCollector::checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous) { ! NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); if (UseAdaptiveSizePolicy) { --- 4991,5001 ---- } void CMSCollector::checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous) { ! NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); if (UseAdaptiveSizePolicy) {
*** 4964,4978 **** // are detected via the mod union table which is the set of all cards // dirtied since the first checkpoint in this GC cycle and prior to // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { ! TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); do_remark_parallel(); } else { ! TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, ! gclog_or_tty); do_remark_non_parallel(); } } } else { assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); --- 5042,5056 ---- // are detected via the mod union table which is the set of all cards // dirtied since the first checkpoint in this GC cycle and prior to // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { ! GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_parallel(); } else { ! GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, ! _gc_timer_cm); do_remark_non_parallel(); } } } else { assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
*** 4981,4991 **** } verify_work_stacks_empty(); verify_overflow_empty(); { ! NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); verify_overflow_empty(); --- 5059,5069 ---- } verify_work_stacks_empty(); verify_overflow_empty(); { ! NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); verify_overflow_empty();
*** 5042,5051 **** --- 5120,5131 ---- if ((VerifyAfterGC || VerifyDuringGC) && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { verify_after_remark(); } + _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure); + // Change under the freelistLocks. _collectorState = Sweeping; // Call isAllClear() under bitMapLock assert(_modUnionTable.isAllClear(), "Should be clear by end of the final marking");
*** 5695,5705 **** MarkFromDirtyCardsClosure markFromDirtyCardsClosure(this, _span, NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { ! TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { ModUnionClosure modUnionClosure(&_modUnionTable); _ct->ct_bs()->dirty_card_iterate( --- 5775,5785 ---- MarkFromDirtyCardsClosure markFromDirtyCardsClosure(this, _span, NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { ! GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { ModUnionClosure modUnionClosure(&_modUnionTable); _ct->ct_bs()->dirty_card_iterate(
*** 5732,5742 **** GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(); } { ! TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. GenCollectedHeap::StrongRootsScope srs(gch); --- 5812,5822 ---- GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(); } { ! GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. GenCollectedHeap::StrongRootsScope srs(gch);
*** 5754,5764 **** || (roots_scanning_options() & SharedHeap::SO_CodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } { ! TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty); verify_work_stacks_empty(); // Scan all class loader data objects that might have been introduced // during concurrent marking. --- 5834,5844 ---- || (roots_scanning_options() & SharedHeap::SO_CodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } { ! GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); // Scan all class loader data objects that might have been introduced // during concurrent marking.
*** 5773,5783 **** verify_work_stacks_empty(); } { ! TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty); verify_work_stacks_empty(); RemarkKlassClosure remark_klass_closure(&mrias_cl); ClassLoaderDataGraph::classes_do(&remark_klass_closure); --- 5853,5863 ---- verify_work_stacks_empty(); } { ! GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); RemarkKlassClosure remark_klass_closure(&mrias_cl); ClassLoaderDataGraph::classes_do(&remark_klass_closure);
*** 5975,5985 **** &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { ! TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see --- 6055,6067 ---- &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { ! GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); ! ! ReferenceProcessorStats stats; if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see
*** 5994,6021 **** // investigate. assert(active_workers > 0, "Should have been set during scavenge"); } rp->set_active_mt_degree(active_workers); CMSRefProcTaskExecutor task_executor(*this); ! rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, ! &task_executor); } else { ! rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, ! NULL); } } // This is the point where the entire marking should have completed. verify_work_stacks_empty(); if (should_unload_classes()) { { ! TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); // Unload nmethods. --- 6076,6107 ---- // investigate. assert(active_workers > 0, "Should have been set during scavenge"); } rp->set_active_mt_degree(active_workers); CMSRefProcTaskExecutor task_executor(*this); ! stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, ! &task_executor, ! _gc_timer_cm); } else { ! stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, ! NULL, ! _gc_timer_cm); } + _gc_tracer_cm->report_gc_reference_stats(stats); + } // This is the point where the entire marking should have completed. verify_work_stacks_empty(); if (should_unload_classes()) { { ! GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); // Unload nmethods.
*** 6024,6043 **** // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); } { ! TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } } // CMS doesn't use the StringTable as hard roots when class unloading is turned off. // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { ! TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } // Restore any preserved marks as a result of mark stack or --- 6110,6129 ---- // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); } { ! GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } } // CMS doesn't use the StringTable as hard roots when class unloading is turned off. // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { ! GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } // Restore any preserved marks as a result of mark stack or
*** 6378,6393 **** NOT_PRODUCT( if (RotateCMSCollectionTypes) { _cmsGen->rotate_debug_collection_type(); } ) } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { SvcGCMarker sgcm(SvcGCMarker::OTHER); --- 6464,6481 ---- NOT_PRODUCT( if (RotateCMSCollectionTypes) { _cmsGen->rotate_debug_collection_type(); } ) + + register_gc_end(); } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { SvcGCMarker sgcm(SvcGCMarker::OTHER);