< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 51,60 **** --- 51,61 ---- #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/taskqueue.inline.hpp" + #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.inline.hpp" #include "memory/padded.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp"
*** 364,380 **** // Apply a further correction factor which tries to adjust // for recent occurance of concurrent mode failures. cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); cms_free_dbl = cms_free_dbl * cms_adjustment; ! if (PrintGCDetails && Verbose) { ! gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " ! SIZE_FORMAT " expected_promotion " SIZE_FORMAT, cms_free, expected_promotion); ! gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0); - } // Add 1 in case the consumption rate goes to zero. return cms_free_dbl / (cms_consumption_rate() + 1.0); } return 0.0; } --- 365,378 ---- // Apply a further correction factor which tries to adjust // for recent occurance of concurrent mode failures. cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); cms_free_dbl = cms_free_dbl * cms_adjustment; ! log_trace(gc, stats)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT, cms_free, expected_promotion); ! log_trace(gc, stats)(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0); // Add 1 in case the consumption rate goes to zero. return cms_free_dbl / (cms_consumption_rate() + 1.0); } return 0.0; }
*** 399,414 **** double work = cms_duration() + gc0_period(); double deadline = time_until_cms_gen_full(); // If a concurrent mode failure occurred recently, we want to be // more conservative and halve our expected time_until_cms_gen_full() if (work > deadline) { ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print( ! " CMSCollector: collect because of anticipated promotion " ! "before full %3.7f + %3.7f > %3.7f ", cms_duration(), ! gc0_period(), time_until_cms_gen_full()); ! } return 0.0; } return work - deadline; } --- 397,408 ---- double work = cms_duration() + gc0_period(); double deadline = time_until_cms_gen_full(); // If a concurrent mode failure occurred recently, we want to be // more conservative and halve our expected time_until_cms_gen_full() if (work > deadline) { ! log_develop(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ", ! cms_duration(), gc0_period(), time_until_cms_gen_full()); return 0.0; } return work - deadline; }
*** 666,700 **** void ConcurrentMarkSweepGeneration::print_statistics() { cmsSpace()->printFLCensus(0); } #endif - void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - if (PrintGCDetails) { - // I didn't want to change the logging when removing the level concept, - // but I guess this logging could say "old" or something instead of "1". - assert(gch->is_old_gen(this), - "The CMS generation should be the old generation"); - uint level = 1; - if (Verbose) { - gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]", - level, short_name(), s, used(), capacity()); - } else { - gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]", - level, short_name(), s, used() / K, capacity() / K); - } - } - if (Verbose) { - gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")", - gch->used(), gch->capacity()); - } else { - gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)", - gch->used() / K, gch->capacity() / K); - } - } - size_t ConcurrentMarkSweepGeneration::contiguous_available() const { // dld proposes an improvement in precision here. If the committed // part of the space ends in a free block we should add that to // uncommitted size in the calculation below. Will make this --- 660,669 ----
*** 714,738 **** bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { size_t available = max_available(); size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print_cr( ! "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT ")," ! "max_promo(" SIZE_FORMAT ")", ! res? "":" not", available, res? ">=":"<", ! av_promo, max_promotion_in_bytes); ! } return res; } // At a promotion failure dump information on block layout in heap // (cms old generation). void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { ! if (CMSDumpAtPromotionFailure) { ! cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); } } void ConcurrentMarkSweepGeneration::reset_after_compaction() { // Clear the promotion information. These pointers can be adjusted --- 683,704 ---- bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { size_t available = max_available(); size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); ! log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")", ! res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes); return res; } // At a promotion failure dump information on block layout in heap // (cms old generation). void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { ! LogHandle(gc, promotion) log; ! if (log.is_trace()) { ! ResourceMark rm; ! cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream()); } } void ConcurrentMarkSweepGeneration::reset_after_compaction() { // Clear the promotion information. These pointers can be adjusted
*** 784,814 **** // compute expansion delta needed for reaching desired free percentage if (free_percentage < desired_free_percentage) { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity >= capacity(), "invalid expansion size"); size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); ! if (PrintGCDetails && Verbose) { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); ! gclog_or_tty->print_cr("\nFrom compute_new_size: "); ! gclog_or_tty->print_cr(" Free fraction %f", free_percentage); ! gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage); ! gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage); ! gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000); ! gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); size_t young_size = gch->young_gen()->capacity(); ! gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000); ! gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); ! gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); ! gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); } // safe if expansion fails expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); ! if (PrintGCDetails && Verbose) { ! gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); ! } } else { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity <= capacity(), "invalid expansion size"); size_t shrink_bytes = capacity() - desired_capacity; // Don't shrink unless the delta is greater than the minimum shrink we want --- 750,779 ---- // compute expansion delta needed for reaching desired free percentage if (free_percentage < desired_free_percentage) { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity >= capacity(), "invalid expansion size"); size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); ! LogHandle(gc) log; ! if (log.is_trace()) { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); ! log.trace("From compute_new_size: "); ! log.trace(" Free fraction %f", free_percentage); ! log.trace(" Desired free fraction %f", desired_free_percentage); ! log.trace(" Maximum free fraction %f", maximum_free_percentage); ! log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000); ! log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); size_t young_size = gch->young_gen()->capacity(); ! log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000); ! log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); ! log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); ! log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); } // safe if expansion fails expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); ! log.trace(" Expanded free fraction %f", ((double) free()) / capacity()); } else { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity <= capacity(), "invalid expansion size"); size_t shrink_bytes = capacity() - desired_capacity; // Don't shrink unless the delta is greater than the minimum shrink we want
*** 1142,1180 **** // hip and should be fixed by untying them. } bool CMSCollector::shouldConcurrentCollect() { if (_full_gc_requested) { ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print_cr("CMSCollector: collect because of explicit " ! " gc request (or gc_locker)"); ! } return true; } FreelistLocker x(this); // ------------------------------------------------------------------ // Print out lots of information which affects the initiation of // a collection. ! if (PrintCMSInitiationStatistics && stats().valid()) { ! gclog_or_tty->print("CMSCollector shouldConcurrentCollect: "); ! gclog_or_tty->stamp(); ! gclog_or_tty->cr(); ! stats().print_on(gclog_or_tty); ! gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f", ! stats().time_until_cms_gen_full()); ! gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free()); ! gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT, ! _cmsGen->contiguous_available()); ! gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); ! gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); ! gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); ! gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); ! gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); ! gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end()); ! gclog_or_tty->print_cr("metadata initialized %d", ! MetaspaceGC::should_concurrent_collect()); } // ------------------------------------------------------------------ // If the estimated time to complete a cms collection (cms_duration()) // is less than the estimated time remaining until the cms generation --- 1107,1139 ---- // hip and should be fixed by untying them. } bool CMSCollector::shouldConcurrentCollect() { if (_full_gc_requested) { ! log_trace(gc)("CMSCollector: collect because of explicit gc request (or gc_locker)"); return true; } FreelistLocker x(this); // ------------------------------------------------------------------ // Print out lots of information which affects the initiation of // a collection. ! LogHandle(gc, stats) log; ! if (log.is_debug() && stats().valid()) { ! log.debug("CMSCollector shouldConcurrentCollect: "); ! ResourceMark rm; ! stats().print_on(log.debug_stream()); ! log.debug("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full()); ! log.debug("free=" SIZE_FORMAT, _cmsGen->free()); ! log.debug("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available()); ! log.debug("promotion_rate=%g", stats().promotion_rate()); ! log.debug("cms_allocation_rate=%g", stats().cms_allocation_rate()); ! log.debug("occupancy=%3.7f", _cmsGen->occupancy()); ! log.debug("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); ! log.debug("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); ! log.debug("cms_time_since_end=%3.7f", stats().cms_time_since_end()); ! log.debug("metadata initialized %d", MetaspaceGC::should_concurrent_collect()); } // ------------------------------------------------------------------ // If the estimated time to complete a cms collection (cms_duration()) // is less than the estimated time remaining until the cms generation
*** 1188,1203 **** // We want to conservatively collect somewhat early in order // to try and "bootstrap" our CMS/promotion statistics; // this branch will not fire after the first successful CMS // collection because the stats should then be valid. if (_cmsGen->occupancy() >= _bootstrap_occupancy) { ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print_cr( ! " CMSCollector: collect for bootstrapping statistics:" ! " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(), ! _bootstrap_occupancy); ! } return true; } } } --- 1147,1158 ---- // We want to conservatively collect somewhat early in order // to try and "bootstrap" our CMS/promotion statistics; // this branch will not fire after the first successful CMS // collection because the stats should then be valid. if (_cmsGen->occupancy() >= _bootstrap_occupancy) { ! log_trace(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f", ! _cmsGen->occupancy(), _bootstrap_occupancy); return true; } } }
*** 1205,1237 **** // old gen want a collection cycle started. Each may use // an appropriate criterion for making this decision. // XXX We need to make sure that the gen expansion // criterion dovetails well with this. XXX NEED TO FIX THIS if (_cmsGen->should_concurrent_collect()) { ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print_cr("CMS old gen initiated"); ! } return true; } // We start a collection if we believe an incremental collection may fail; // this is not likely to be productive in practice because it's probably too // late anyway. GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->collector_policy()->is_generation_policy(), "You may want to check the correctness of the following"); if (gch->incremental_collection_will_fail(true /* consult_young */)) { ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); ! } return true; } if (MetaspaceGC::should_concurrent_collect()) { ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print("CMSCollector: collect for metadata allocation "); ! } return true; } // CMSTriggerInterval starts a CMS cycle if enough time has passed. if (CMSTriggerInterval >= 0) { --- 1160,1186 ---- // old gen want a collection cycle started. Each may use // an appropriate criterion for making this decision. // XXX We need to make sure that the gen expansion // criterion dovetails well with this. XXX NEED TO FIX THIS if (_cmsGen->should_concurrent_collect()) { ! log_trace(gc)("CMS old gen initiated"); return true; } // We start a collection if we believe an incremental collection may fail; // this is not likely to be productive in practice because it's probably too // late anyway. GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->collector_policy()->is_generation_policy(), "You may want to check the correctness of the following"); if (gch->incremental_collection_will_fail(true /* consult_young */)) { ! log_trace(gc)("CMSCollector: collect because incremental collection will fail "); return true; } if (MetaspaceGC::should_concurrent_collect()) { ! log_trace(gc)("CMSCollector: collect for metadata allocation "); return true; } // CMSTriggerInterval starts a CMS cycle if enough time has passed. if (CMSTriggerInterval >= 0) {
*** 1241,1257 **** } // Check the CMS time since begin (we do not check the stats validity // as we want to be able to trigger the first CMS cycle as well) if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) { - if (Verbose && PrintGCDetails) { if (stats().valid()) { ! gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", stats().cms_time_since_begin()); } else { ! gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)"); ! } } return true; } } --- 1190,1204 ---- } // Check the CMS time since begin (we do not check the stats validity // as we want to be able to trigger the first CMS cycle as well) if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) { if (stats().valid()) { ! log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", stats().cms_time_since_begin()); } else { ! log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)"); } return true; } }
*** 1290,1313 **** // we will be out of sufficient free space given allocation rate estimates.] bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { assert_lock_strong(freelistLock()); if (occupancy() > initiating_occupancy()) { ! if (PrintGCDetails && Verbose) { ! gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", short_name(), occupancy(), initiating_occupancy()); - } return true; } if (UseCMSInitiatingOccupancyOnly) { return false; } if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { ! if (PrintGCDetails && Verbose) { ! gclog_or_tty->print(" %s: collect because expanded for allocation ", ! short_name()); ! } return true; } return false; } --- 1237,1255 ---- // we will be out of sufficient free space given allocation rate estimates.] bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { assert_lock_strong(freelistLock()); if (occupancy() > initiating_occupancy()) { ! log_trace(gc)(" %s: collect because of occupancy %f / %f ", short_name(), occupancy(), initiating_occupancy()); return true; } if (UseCMSInitiatingOccupancyOnly) { return false; } if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { ! log_trace(gc)(" %s: collect because expanded for allocation ", short_name()); return true; } return false; }
*** 1360,1376 **** GCCause::is_serviceability_requested_gc(cause); } void CMSCollector::report_concurrent_mode_interruption() { if (is_external_interruption()) { ! if (PrintGCDetails) { ! gclog_or_tty->print(" (concurrent mode interrupted)"); ! } } else { ! if (PrintGCDetails) { ! gclog_or_tty->print(" (concurrent mode failure)"); ! } _gc_tracer_cm->report_concurrent_mode_failure(); } } --- 1302,1314 ---- GCCause::is_serviceability_requested_gc(cause); } void CMSCollector::report_concurrent_mode_interruption() { if (is_external_interruption()) { ! log_debug(gc)("Concurrent mode interrupted"); } else { ! log_debug(gc)("Concurrent mode failure"); _gc_tracer_cm->report_concurrent_mode_failure(); } }
*** 1500,1514 **** // The CMS_token is already held. Get back the other locks. assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); getFreelistLocks(); bitMapLock()->lock_without_safepoint_check(); ! if (TraceCMSState) { ! gclog_or_tty->print_cr("CMS foreground collector has asked for control " ! INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state); ! gclog_or_tty->print_cr(" gets control with state %d", _collectorState); ! } // Inform cms gen if this was due to partial collection failing. // The CMS gen may use this fact to determine its expansion policy. GenCollectedHeap* gch = GenCollectedHeap::heap(); if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { --- 1438,1450 ---- // The CMS_token is already held. Get back the other locks. assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); getFreelistLocks(); bitMapLock()->lock_without_safepoint_check(); ! log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d", ! p2i(Thread::current()), first_state); ! log_debug(gc, state)(" gets control with state %d", _collectorState); // Inform cms gen if this was due to partial collection failing. // The CMS gen may use this fact to determine its expansion policy. GenCollectedHeap* gch = GenCollectedHeap::heap(); if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
*** 1578,1588 **** gc_timer->register_gc_start(); SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); ! GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); // Temporarily widen the span of the weak reference processing to // the entire heap. MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); --- 1514,1524 ---- gc_timer->register_gc_start(); SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); ! GCTraceTime(Trace, gc) t("CMS:MSC"); // Temporarily widen the span of the weak reference processing to // the entire heap. MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
*** 1663,1699 **** // For a mark-sweep-compact, compute_new_size() will be called // in the heap's do_collection() method. } void CMSCollector::print_eden_and_survivor_chunk_arrays() { ContiguousSpace* eden_space = _young_gen->eden(); ContiguousSpace* from_space = _young_gen->from(); ContiguousSpace* to_space = _young_gen->to(); // Eden if (_eden_chunk_array != NULL) { ! gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", p2i(eden_space->bottom()), p2i(eden_space->top()), p2i(eden_space->end()), eden_space->capacity()); ! gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", " ! "_eden_chunk_capacity=" SIZE_FORMAT, _eden_chunk_index, _eden_chunk_capacity); for (size_t i = 0; i < _eden_chunk_index; i++) { ! gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, ! i, p2i(_eden_chunk_array[i])); } } // Survivor if (_survivor_chunk_array != NULL) { ! gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", p2i(from_space->bottom()), p2i(from_space->top()), p2i(from_space->end()), from_space->capacity()); ! gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", " ! "_survivor_chunk_capacity=" SIZE_FORMAT, _survivor_chunk_index, _survivor_chunk_capacity); for (size_t i = 0; i < _survivor_chunk_index; i++) { ! gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, ! i, p2i(_survivor_chunk_array[i])); } } } void CMSCollector::getFreelistLocks() const { --- 1599,1636 ---- // For a mark-sweep-compact, compute_new_size() will be called // in the heap's do_collection() method. } void CMSCollector::print_eden_and_survivor_chunk_arrays() { + LogHandle(gc, heap) log; + if (!log.is_trace()) { + return; + } + ContiguousSpace* eden_space = _young_gen->eden(); ContiguousSpace* from_space = _young_gen->from(); ContiguousSpace* to_space = _young_gen->to(); // Eden if (_eden_chunk_array != NULL) { ! log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", p2i(eden_space->bottom()), p2i(eden_space->top()), p2i(eden_space->end()), eden_space->capacity()); ! log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT, _eden_chunk_index, _eden_chunk_capacity); for (size_t i = 0; i < _eden_chunk_index; i++) { ! log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i])); } } // Survivor if (_survivor_chunk_array != NULL) { ! log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", p2i(from_space->bottom()), p2i(from_space->top()), p2i(from_space->end()), from_space->capacity()); ! log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT, _survivor_chunk_index, _survivor_chunk_capacity); for (size_t i = 0; i < _survivor_chunk_index; i++) { ! log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i])); } } } void CMSCollector::getFreelistLocks() const {
*** 1778,1806 **** // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); } ! // Used for PrintGC ! size_t prev_used = 0; ! if (PrintGC && Verbose) { ! prev_used = _cmsGen->used(); ! } // The change of the collection state is normally done at this level; // the exceptions are phases that are executed while the world is // stopped. For those phases the change of state is done while the // world is stopped. For baton passing purposes this allows the // background collector to finish the phase and change state atomically. // The foreground collector cannot wait on a phase that is done // while the world is stopped because the foreground collector already // has the world stopped and would deadlock. while (_collectorState != Idling) { ! if (TraceCMSState) { ! gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", p2i(Thread::current()), _collectorState); - } // The foreground collector // holds the Heap_lock throughout its collection. // holds the CMS token (but not the lock) // except while it is waiting for the background collector to yield. // --- 1715,1737 ---- // Signal that we are about to start a collection gch->increment_total_full_collections(); // ... starting a collection cycle _collection_count_start = gch->total_full_collections(); } ! size_t prev_used = _cmsGen->used(); // The change of the collection state is normally done at this level; // the exceptions are phases that are executed while the world is // stopped. For those phases the change of state is done while the // world is stopped. For baton passing purposes this allows the // background collector to finish the phase and change state atomically. // The foreground collector cannot wait on a phase that is done // while the world is stopped because the foreground collector already // has the world stopped and would deadlock. while (_collectorState != Idling) { ! log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d", p2i(Thread::current()), _collectorState); // The foreground collector // holds the Heap_lock throughout its collection. // holds the CMS token (but not the lock) // except while it is waiting for the background collector to yield. //
*** 1826,1840 **** if (waitForForegroundGC()) { // We yielded to a foreground GC, nothing more to be // done this round. assert(_foregroundGCShouldWait == false, "We set it to false in " "waitForForegroundGC()"); ! if (TraceCMSState) { ! gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT ! " exiting collection CMS state %d", p2i(Thread::current()), _collectorState); - } return; } else { // The background collector can run but check to see if the // foreground collector has done a collection while the // background collector was waiting to get the CGC_lock --- 1757,1768 ---- if (waitForForegroundGC()) { // We yielded to a foreground GC, nothing more to be // done this round. assert(_foregroundGCShouldWait == false, "We set it to false in " "waitForForegroundGC()"); ! log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", p2i(Thread::current()), _collectorState); return; } else { // The background collector can run but check to see if the // foreground collector has done a collection while the // background collector was waiting to get the CGC_lock
*** 1934,1947 **** case Idling: default: ShouldNotReachHere(); break; } ! if (TraceCMSState) { ! gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", p2i(Thread::current()), _collectorState); - } assert(_foregroundGCShouldWait, "block post-condition"); } // Should this be in gc_epilogue? collector_policy()->counters()->update_counters(); --- 1862,1873 ---- case Idling: default: ShouldNotReachHere(); break; } ! log_debug(gc, state)(" Thread " INTPTR_FORMAT " done - next CMS state %d", p2i(Thread::current()), _collectorState); assert(_foregroundGCShouldWait, "block post-condition"); } // Should this be in gc_epilogue? collector_policy()->counters()->update_counters();
*** 1956,1973 **** CGC_lock->notify(); } assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Possible deadlock"); } ! if (TraceCMSState) { ! gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT ! " exiting collection CMS state %d", p2i(Thread::current()), _collectorState); ! } ! if (PrintGC && Verbose) { ! _cmsGen->print_heap_change(prev_used); ! } } void CMSCollector::register_gc_start(GCCause::Cause cause) { _cms_start_registered = true; _gc_timer_cm->register_gc_start(); --- 1882,1895 ---- CGC_lock->notify(); } assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Possible deadlock"); } ! log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", p2i(Thread::current()), _collectorState); ! log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", ! prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K); } void CMSCollector::register_gc_start(GCCause::Cause cause) { _cms_start_registered = true; _gc_timer_cm->register_gc_start();
*** 2015,2040 **** ConcurrentMarkSweepThread::CMS_cms_has_token); ConcurrentMarkSweepThread::set_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_wants_token); // Get a possibly blocked foreground thread going CGC_lock->notify(); ! if (TraceCMSState) { ! gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", p2i(Thread::current()), _collectorState); - } while (_foregroundGCIsActive) { CGC_lock->wait(Mutex::_no_safepoint_check_flag); } ConcurrentMarkSweepThread::set_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_has_token); ConcurrentMarkSweepThread::clear_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_wants_token); } ! if (TraceCMSState) { ! gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", p2i(Thread::current()), _collectorState); - } return res; } // Because of the need to lock the free lists and other structures in // the collector, common to all the generations that the collector is --- 1937,1958 ---- ConcurrentMarkSweepThread::CMS_cms_has_token); ConcurrentMarkSweepThread::set_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_wants_token); // Get a possibly blocked foreground thread going CGC_lock->notify(); ! log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", p2i(Thread::current()), _collectorState); while (_foregroundGCIsActive) { CGC_lock->wait(Mutex::_no_safepoint_check_flag); } ConcurrentMarkSweepThread::set_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_has_token); ConcurrentMarkSweepThread::clear_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_wants_token); } ! log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", p2i(Thread::current()), _collectorState); return res; } // Because of the need to lock the free lists and other structures in // the collector, common to all the generations that the collector is
*** 2127,2141 **** cmsSpace()->gc_prologue(); // Clear stat counters NOT_PRODUCT( assert(_numObjectsPromoted == 0, "check"); assert(_numWordsPromoted == 0, "check"); ! if (Verbose && PrintGC) { ! gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, " ! SIZE_FORMAT " bytes concurrently", _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); - } _numObjectsAllocated = 0; _numWordsAllocated = 0; ) } --- 2045,2056 ---- cmsSpace()->gc_prologue(); // Clear stat counters NOT_PRODUCT( assert(_numObjectsPromoted == 0, "check"); assert(_numWordsPromoted == 0, "check"); ! log_develop(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently", _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); _numObjectsAllocated = 0; _numWordsAllocated = 0; ) }
*** 2208,2232 **** cmsSpace()->gc_epilogue(); // Print stat counters NOT_PRODUCT( assert(_numObjectsAllocated == 0, "check"); assert(_numWordsAllocated == 0, "check"); ! if (Verbose && PrintGC) { ! gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, " ! SIZE_FORMAT " bytes", _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); - } _numObjectsPromoted = 0; _numWordsPromoted = 0; ) - if (PrintGC && Verbose) { // Call down the chain in contiguous_available needs the freelistLock // so print this out before releasing the freeListLock. ! gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ", ! contiguous_available()); ! } } #ifndef PRODUCT bool CMSCollector::have_cms_token() { Thread* thr = Thread::current(); --- 2123,2141 ---- cmsSpace()->gc_epilogue(); // Print stat counters NOT_PRODUCT( assert(_numObjectsAllocated == 0, "check"); assert(_numWordsAllocated == 0, "check"); ! log_develop(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); _numObjectsPromoted = 0; _numWordsPromoted = 0; ) // Call down the chain in contiguous_available needs the freelistLock // so print this out before releasing the freeListLock. ! log_develop(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available()); } #ifndef PRODUCT bool CMSCollector::have_cms_token() { Thread* thr = Thread::current();
*** 2240,2283 **** } return false; } #endif - // Check reachability of the given heap address in CMS generation, - // treating all other generations as roots. - bool CMSCollector::is_cms_reachable(HeapWord* addr) { - // We could "guarantee" below, rather than assert, but I'll - // leave these as "asserts" so that an adventurous debugger - // could try this in the product build provided some subset of - // the conditions were met, provided they were interested in the - // results and knew that the computation below wouldn't interfere - // with other concurrent computations mutating the structures - // being read or written. - assert(SafepointSynchronize::is_at_safepoint(), - "Else mutations in object graph will make answer suspect"); - assert(have_cms_token(), "Should hold cms token"); - assert(haveFreelistLocks(), "must hold free list locks"); - assert_lock_strong(bitMapLock()); - - // Clear the marking bit map array before starting, but, just - // for kicks, first report if the given address is already marked - gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr), - _markBitMap.isMarked(addr) ? "" : " not"); - - if (verify_after_remark()) { - MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); - bool result = verification_mark_bm()->isMarked(addr); - gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr), - result ? "IS" : "is NOT"); - return result; - } else { - gclog_or_tty->print_cr("Could not compute result"); - return false; - } - } - - void CMSCollector::print_on_error(outputStream* st) { CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector; if (collector != NULL) { CMSBitMap* bitmap = &collector->_markBitMap; --- 2149,2158 ----
*** 2307,2328 **** VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} bool do_bit(size_t offset) { HeapWord* addr = _marks->offsetToHeapWord(offset); if (!_marks->isMarked(addr)) { ! oop(addr)->print_on(gclog_or_tty); ! gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); _failed = true; } return true; } bool failed() { return _failed; } }; ! bool CMSCollector::verify_after_remark(bool silent) { ! if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... "); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; assert(SafepointSynchronize::is_at_safepoint(), "Else mutations in object graph will make answer suspect"); --- 2182,2205 ---- VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} bool do_bit(size_t offset) { HeapWord* addr = _marks->offsetToHeapWord(offset); if (!_marks->isMarked(addr)) { ! LogHandle(gc, verify) log; ! ResourceMark rm; ! oop(addr)->print_on(log.info_stream()); ! log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); _failed = true; } return true; } bool failed() { return _failed; } }; ! bool CMSCollector::verify_after_remark() { ! GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking."); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; assert(SafepointSynchronize::is_at_safepoint(), "Else mutations in object graph will make answer suspect");
*** 2381,2391 **** verify_after_remark_work_2(); } else { warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant", CMSRemarkVerifyVariant); } - if (!silent) gclog_or_tty->print(" done] "); return true; } void CMSCollector::verify_after_remark_work_1() { ResourceMark rm; --- 2258,2267 ----
*** 2433,2444 **** // verification_mark_bm() is also marked in markBitMap(); flag all // errors by printing corresponding objects. VerifyMarkedClosure vcl(markBitMap()); verification_mark_bm()->iterate(&vcl); if (vcl.failed()) { ! gclog_or_tty->print("Verification failed"); ! gch->print_on(gclog_or_tty); fatal("CMS: failed marking verification after remark"); } } class VerifyKlassOopsKlassClosure : public KlassClosure { --- 2309,2322 ---- // verification_mark_bm() is also marked in markBitMap(); flag all // errors by printing corresponding objects. VerifyMarkedClosure vcl(markBitMap()); verification_mark_bm()->iterate(&vcl); if (vcl.failed()) { ! LogHandle(gc, verify) log; ! log.info("Verification failed"); ! ResourceMark rm; ! gch->print_on(log.info_stream()); fatal("CMS: failed marking verification after remark"); } } class VerifyKlassOopsKlassClosure : public KlassClosure {
*** 2727,2740 **** // remember why we expanded; this information is used // by shouldConcurrentCollect() when making decisions on whether to start // a new CMS cycle. if (success) { set_expansion_cause(cause); ! if (PrintGCDetails && Verbose) { ! gclog_or_tty->print_cr("Expanded CMS gen for %s", ! CMSExpansionCause::to_string(cause)); ! } } } HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { HeapWord* res = NULL; --- 2605,2615 ---- // remember why we expanded; this information is used // by shouldConcurrentCollect() when making decisions on whether to start // a new CMS cycle. if (success) { set_expansion_cause(cause); ! log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause)); } } HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { HeapWord* res = NULL;
*** 2798,2876 **** } void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); ! if (PrintGCDetails && Verbose) { ! warning("Shrinking of CMS not yet implemented"); ! } return; } // Simple ctor/dtor wrapper for accounting & timer chores around concurrent // phases. class CMSPhaseAccounting: public StackObj { public: CMSPhaseAccounting(CMSCollector *collector, ! const char *phase, ! bool print_cr = true); ~CMSPhaseAccounting(); private: CMSCollector *_collector; ! const char *_phase; ! elapsedTimer _wallclock; ! bool _print_cr; public: // Not MT-safe; so do not pass around these StackObj's // where they may be accessed by other threads. jlong wallclock_millis() { ! assert(_wallclock.is_active(), "Wall clock should not stop"); ! _wallclock.stop(); // to record time ! jlong ret = _wallclock.milliseconds(); ! _wallclock.start(); // restart ! return ret; } }; CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, ! const char *phase, ! bool print_cr) : ! _collector(collector), _phase(phase), _print_cr(print_cr) { - if (PrintCMSStatistics != 0) { _collector->resetYields(); - } - if (PrintGCDetails) { - gclog_or_tty->gclog_stamp(); - gclog_or_tty->print_cr("[%s-concurrent-%s-start]", - _collector->cmsGen()->short_name(), _phase); - } _collector->resetTimer(); - _wallclock.start(); _collector->startTimer(); } CMSPhaseAccounting::~CMSPhaseAccounting() { - assert(_wallclock.is_active(), "Wall clock should not have stopped"); _collector->stopTimer(); ! _wallclock.stop(); ! if (PrintGCDetails) { ! gclog_or_tty->gclog_stamp(); ! gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", ! _collector->cmsGen()->short_name(), ! _phase, _collector->timerValue(), _wallclock.seconds()); ! if (_print_cr) { ! gclog_or_tty->cr(); ! } ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase, ! _collector->yields()); ! } ! } } // CMS work // The common parts of CMSParInitialMarkTask and CMSParRemarkTask. --- 2673,2722 ---- } void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); ! log_trace(gc)("Shrinking of CMS not yet implemented"); return; } // Simple ctor/dtor wrapper for accounting & timer chores around concurrent // phases. class CMSPhaseAccounting: public StackObj { public: CMSPhaseAccounting(CMSCollector *collector, ! const char *title); ~CMSPhaseAccounting(); private: CMSCollector *_collector; ! const char *_title; ! GCTraceConcTime(Info, gc) _trace_time; public: // Not MT-safe; so do not pass around these StackObj's // where they may be accessed by other threads. jlong wallclock_millis() { ! return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time()); } }; CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, ! const char *title) : ! _collector(collector), _title(title), _trace_time(title) { _collector->resetYields(); _collector->resetTimer(); _collector->startTimer(); } CMSPhaseAccounting::~CMSPhaseAccounting() { _collector->stopTimer(); ! jlong end_counter = os::elapsed_counter(); ! log_debug(gc)("Concurrent active time: %.3fms", _collector->timerValue()); ! log_debug(gc, stats)(" (CMS %s yielded %d times)", _title, _collector->yields()); } // CMS work // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
*** 2933,2944 **** // Setup the verification and class unloading state for this // CMS collection cycle. setup_cms_unloading_and_verification_state(); ! NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", ! PrintGCDetails && Verbose, true, _gc_timer_cm);) // Reset all the PLAB chunk arrays if necessary. if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { reset_survivor_plab_arrays(); } --- 2779,2789 ---- // Setup the verification and class unloading state for this // CMS collection cycle. setup_cms_unloading_and_verification_state(); ! GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm); // Reset all the PLAB chunk arrays if necessary. if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { reset_survivor_plab_arrays(); }
*** 2965,2977 **** // Whenever a CLD is found, it will be claimed before proceeding to mark // the klasses. The claimed marks need to be cleared before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); - if (CMSPrintEdenSurvivorChunks) { print_eden_and_survivor_chunk_arrays(); - } { #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTableDeactivate dpt_deact; #endif --- 2810,2820 ----
*** 3038,3058 **** // Weak ref discovery note: We may be discovering weak // refs in this generation concurrent (but interleaved) with // weak ref discovery by the young generation collector. CMSTokenSyncWithLocks ts(true, bitMapLock()); ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); bool res = markFromRootsWork(); if (res) { _collectorState = Precleaning; } else { // We failed and a foreground collection wants to take over assert(_foregroundGCIsActive, "internal state inconsistency"); assert(_restart_addr == NULL, "foreground will restart from scratch"); ! if (PrintGCDetails) { ! gclog_or_tty->print_cr("bailing out to foreground collection"); ! } } verify_overflow_empty(); return res; } --- 2881,2899 ---- // Weak ref discovery note: We may be discovering weak // refs in this generation concurrent (but interleaved) with // weak ref discovery by the young generation collector. CMSTokenSyncWithLocks ts(true, bitMapLock()); ! GCTraceCPUTime tcpu; ! CMSPhaseAccounting pa(this, "Concrurrent Mark"); bool res = markFromRootsWork(); if (res) { _collectorState = Precleaning; } else { // We failed and a foreground collection wants to take over assert(_foregroundGCIsActive, "internal state inconsistency"); assert(_restart_addr == NULL, "foreground will restart from scratch"); ! log_debug(gc)("bailing out to foreground collection"); } verify_overflow_empty(); return res; }
*** 3253,3278 **** assert(work_queue(worker_id)->size() == 0, "Expected to be empty"); // Scan the bitmap covering _cms_space, tracing through grey objects. _timer.start(); do_scan_and_mark(worker_id, _cms_space); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! // XXX: need xxx/xxx type of notation, two timers ! } // ... do work stealing _timer.reset(); _timer.start(); do_work_steal(worker_id); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! // XXX: need xxx/xxx type of notation, two timers ! } assert(_collector->_markStack.isEmpty(), "Should have been emptied"); assert(work_queue(worker_id)->size() == 0, "Should have been emptied"); // Note that under the current task protocol, the // following assertion is true even of the spaces // expanded since the completion of the concurrent --- 3094,3111 ---- assert(work_queue(worker_id)->size() == 0, "Expected to be empty"); // Scan the bitmap covering _cms_space, tracing through grey objects. _timer.start(); do_scan_and_mark(worker_id, _cms_space); _timer.stop(); ! log_debug(gc, stats)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds()); // ... do work stealing _timer.reset(); _timer.start(); do_work_steal(worker_id); _timer.stop(); ! log_debug(gc, stats)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); assert(_collector->_markStack.isEmpty(), "Should have been emptied"); assert(work_queue(worker_id)->size() == 0, "Should have been emptied"); // Note that under the current task protocol, the // following assertion is true even of the spaces // expanded since the completion of the concurrent
*** 3483,3496 **** } ) if (simulate_overflow || !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " ! SIZE_FORMAT, _overflow_stack->capacity()); ! } // We cannot assert that the overflow stack is full because // it may have been emptied since. assert(simulate_overflow || _work_queue->size() == _work_queue->max_elems(), "Else push should have succeeded"); --- 3316,3326 ---- } ) if (simulate_overflow || !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow ! log_debug(gc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); // We cannot assert that the overflow stack is full because // it may have been emptied since. assert(simulate_overflow || _work_queue->size() == _work_queue->max_elems(), "Else push should have succeeded");
*** 3571,3583 **** // serve our purpose. XXX assert_lock_strong(_bit_map_lock); _bit_map_lock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // It is possible for whichever thread initiated the yield request // not to get a chance to wake up and take the bitmap lock between // this thread releasing it and reacquiring it. So, while the // should_yield() flag is on, let's sleep for a bit to give the --- 3401,3411 ----
*** 3735,3746 **** * CMSScheduleRemarkEdenPenetration)) { _start_sampling = true; } else { _start_sampling = false; } ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); } CMSTokenSync x(true); // is cms thread if (CMSPrecleaningEnabled) { sample_eden(); --- 3563,3574 ---- * CMSScheduleRemarkEdenPenetration)) { _start_sampling = true; } else { _start_sampling = false; } ! GCTraceCPUTime tcpu; ! CMSPhaseAccounting pa(this, "Concurrent Preclean"); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); } CMSTokenSync x(true); // is cms thread if (CMSPrecleaningEnabled) { sample_eden();
*** 3764,3775 **** // past the next scavenge in an effort to // schedule the pause as described above. By choosing // CMSScheduleRemarkEdenSizeThreshold >= max eden size // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); // We need more smarts in the abortable preclean // loop below to deal with cases where allocation // in young gen is very very slow, and our precleaning // is running a losing race against a horde of // mutators intent on flooding us with CMS updates --- 3592,3603 ---- // past the next scavenge in an effort to // schedule the pause as described above. By choosing // CMSScheduleRemarkEdenSizeThreshold >= max eden size // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { ! GCTraceCPUTime tcpu; ! CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean"); // We need more smarts in the abortable preclean // loop below to deal with cases where allocation // in young gen is very very slow, and our precleaning // is running a losing race against a horde of // mutators intent on flooding us with CMS updates
*** 3787,3805 **** loops++; // Voluntarily terminate abortable preclean phase if we have // been at it for too long. if ((CMSMaxAbortablePrecleanLoops != 0) && loops >= CMSMaxAbortablePrecleanLoops) { ! if (PrintGCDetails) { ! gclog_or_tty->print(" CMS: abort preclean due to loops "); ! } break; } if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { ! if (PrintGCDetails) { ! gclog_or_tty->print(" CMS: abort preclean due to time "); ! } break; } // If we are doing little work each iteration, we should // take a short break. if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { --- 3615,3629 ---- loops++; // Voluntarily terminate abortable preclean phase if we have // been at it for too long. if ((CMSMaxAbortablePrecleanLoops != 0) && loops >= CMSMaxAbortablePrecleanLoops) { ! log_debug(gc)(" CMS: abort preclean due to loops "); break; } if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { ! log_debug(gc)(" CMS: abort preclean due to time "); break; } // If we are doing little work each iteration, we should // take a short break. if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
*** 3808,3822 **** cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); startTimer(); waited++; } } ! if (PrintCMSStatistics > 0) { ! gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ", loops, waited, cumworkdone); } - } CMSTokenSync x(true); // is cms thread if (_collectorState != Idling) { assert(_collectorState == AbortablePreclean, "Spontaneous state transition?"); _collectorState = FinalMarking; --- 3632,3644 ---- cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); startTimer(); waited++; } } ! log_debug(gc, stats)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ", loops, waited, cumworkdone); } CMSTokenSync x(true); // is cms thread if (_collectorState != Idling) { assert(_collectorState == AbortablePreclean, "Spontaneous state transition?"); _collectorState = FinalMarking;
*** 3955,3967 **** size_t numIter, cumNumCards, lastNumCards, curNumCards; for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; numIter < CMSPrecleanIter; numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); ! if (Verbose && PrintGCDetails) { ! gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards); ! } // Either there are very few dirty cards, so re-mark // pause will be small anyway, or our pre-cleaning isn't // that much faster than the rate at which cards are being // dirtied, so we might as well stop and re-mark since // precleaning won't improve our re-mark time by much. --- 3777,3787 ---- size_t numIter, cumNumCards, lastNumCards, curNumCards; for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; numIter < CMSPrecleanIter; numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); ! log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards); // Either there are very few dirty cards, so re-mark // pause will be small anyway, or our pre-cleaning isn't // that much faster than the rate at which cards are being // dirtied, so we might as well stop and re-mark since // precleaning won't improve our re-mark time by much.
*** 3977,3990 **** preclean_klasses(&mrias_cl, _cmsGen->freelistLock()); curNumCards = preclean_card_table(_cmsGen, &smoac_cl); cumNumCards += curNumCards; ! if (PrintGCDetails && PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", curNumCards, cumNumCards, numIter); - } return cumNumCards; // as a measure of useful work done } // PRECLEANING NOTES: // Precleaning involves: --- 3797,3808 ---- preclean_klasses(&mrias_cl, _cmsGen->freelistLock()); curNumCards = preclean_card_table(_cmsGen, &smoac_cl); cumNumCards += curNumCards; ! log_debug(gc, stats)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", curNumCards, cumNumCards, numIter); return cumNumCards; // as a measure of useful work done } // PRECLEANING NOTES: // Precleaning involves:
*** 4234,4256 **** TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); verify_work_stacks_empty(); verify_overflow_empty(); ! if (PrintGCDetails) { ! gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]", ! _young_gen->used() / K, ! _young_gen->capacity() / K); ! } { if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); ! NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", ! PrintGCDetails && Verbose, true, _gc_timer_cm);) gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size false, // is_tlab GenCollectedHeap::YoungGen // type --- 4052,4072 ---- TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); verify_work_stacks_empty(); verify_overflow_empty(); ! log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)", ! _young_gen->used() / K, _young_gen->capacity() / K); { if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); ! ! GCTraceTime(Trace, gc) tm("Scavenge Before Remark", _gc_timer_cm); ! gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size false, // is_tlab GenCollectedHeap::YoungGen // type
*** 4264,4274 **** verify_work_stacks_empty(); verify_overflow_empty(); } void CMSCollector::checkpointRootsFinalWork() { ! NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); ResourceMark rm; --- 4080,4090 ---- verify_work_stacks_empty(); verify_overflow_empty(); } void CMSCollector::checkpointRootsFinalWork() { ! GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm); assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); ResourceMark rm;
*** 4296,4308 **** // so here just in case a scavenge did not happen. gch->ensure_parsability(false); // fill TLAB's, but no need to retire them // Update the saved marks which may affect the root scans. gch->save_marks(); - if (CMSPrintEdenSurvivorChunks) { print_eden_and_survivor_chunk_arrays(); - } { #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTableDeactivate dpt_deact; #endif --- 4112,4122 ----
*** 4316,4337 **** // are detected via the mod union table which is the set of all cards // dirtied since the first checkpoint in this GC cycle and prior to // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled) { ! GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_parallel(); } else { ! GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm); do_remark_non_parallel(); } } verify_work_stacks_empty(); verify_overflow_empty(); { ! NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) refProcessingWork(); } verify_work_stacks_empty(); verify_overflow_empty(); --- 4130,4151 ---- // are detected via the mod union table which is the set of all cards // dirtied since the first checkpoint in this GC cycle and prior to // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled) { ! GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm); do_remark_parallel(); } else { ! GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm); do_remark_non_parallel(); } } verify_work_stacks_empty(); verify_overflow_empty(); { ! GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm); refProcessingWork(); } verify_work_stacks_empty(); verify_overflow_empty();
*** 4346,4388 **** // recurrence of that condition. assert(_markStack.isEmpty(), "No grey objects"); size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + _ser_kac_ovflw + _ser_kac_preclean_ovflw; if (ser_ovflw > 0) { ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("Marking stack overflow (benign) " ! "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ! ", kac_preclean=" SIZE_FORMAT ")", ! _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, ! _ser_kac_ovflw, _ser_kac_preclean_ovflw); ! } _markStack.expand(); _ser_pmc_remark_ovflw = 0; _ser_pmc_preclean_ovflw = 0; _ser_kac_preclean_ovflw = 0; _ser_kac_ovflw = 0; } if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("Work queue overflow (benign) " ! "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")", _par_pmc_remark_ovflw, _par_kac_ovflw); - } _par_pmc_remark_ovflw = 0; _par_kac_ovflw = 0; } - if (PrintCMSStatistics != 0) { if (_markStack._hit_limit > 0) { ! gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")", _markStack._hit_limit); } if (_markStack._failed_double > 0) { ! gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT ")," ! " current capacity " SIZE_FORMAT, ! _markStack._failed_double, ! _markStack.capacity()); ! } } _markStack._hit_limit = 0; _markStack._failed_double = 0; if ((VerifyAfterGC || VerifyDuringGC) && --- 4160,4190 ---- // recurrence of that condition. assert(_markStack.isEmpty(), "No grey objects"); size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + _ser_kac_ovflw + _ser_kac_preclean_ovflw; if (ser_ovflw > 0) { ! log_debug(gc, stats)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")", ! _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw); _markStack.expand(); _ser_pmc_remark_ovflw = 0; _ser_pmc_preclean_ovflw = 0; _ser_kac_preclean_ovflw = 0; _ser_kac_ovflw = 0; } if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { ! log_debug(gc, stats)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")", _par_pmc_remark_ovflw, _par_kac_ovflw); _par_pmc_remark_ovflw = 0; _par_kac_ovflw = 0; } if (_markStack._hit_limit > 0) { ! log_debug(gc, stats)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")", _markStack._hit_limit); } if (_markStack._failed_double > 0) { ! log_debug(gc, stats)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT, ! _markStack._failed_double, _markStack.capacity()); } _markStack._hit_limit = 0; _markStack._failed_double = 0; if ((VerifyAfterGC || VerifyDuringGC) &&
*** 4413,4427 **** // ---------- young gen roots -------------- { work_on_young_gen_roots(worker_id, &par_mri_cl); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished young gen initial mark scan work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } } // ---------- remaining roots -------------- _timer.reset(); _timer.start(); --- 4215,4225 ---- // ---------- young gen roots -------------- { work_on_young_gen_roots(worker_id, &par_mri_cl); _timer.stop(); ! log_debug(gc, stats)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- remaining roots -------------- _timer.reset(); _timer.start();
*** 4438,4452 **** &cld_closure); assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished remaining root initial mark scan work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } } // Parallel remark task class CMSParRemarkTask: public CMSParMarkTask { CompactibleFreeListSpace* _cms_space; --- 4236,4246 ---- &cld_closure); assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); ! log_debug(gc, stats)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // Parallel remark task class CMSParRemarkTask: public CMSParMarkTask { CompactibleFreeListSpace* _cms_space;
*** 4555,4569 **** // work first. // ---------- young gen roots -------------- { work_on_young_gen_roots(worker_id, &par_mrias_cl); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished young gen rescan work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } } // ---------- remaining roots -------------- _timer.reset(); _timer.start(); --- 4349,4359 ---- // work first. // ---------- young gen roots -------------- { work_on_young_gen_roots(worker_id, &par_mrias_cl); _timer.stop(); ! log_debug(gc, stats)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- remaining roots -------------- _timer.reset(); _timer.start();
*** 4578,4592 **** assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished remaining root rescan work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } // ---------- unhandled CLD scanning ---------- if (worker_id == 0) { // Single threaded at the moment. _timer.reset(); _timer.start(); --- 4368,4378 ---- assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); ! log_debug(gc, stats)("Finished remaining root rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); // ---------- unhandled CLD scanning ---------- if (worker_id == 0) { // Single threaded at the moment. _timer.reset(); _timer.start();
*** 4601,4615 **** // We don't need to keep track of new CLDs anymore. ClassLoaderDataGraph::remember_new_clds(false); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished unhandled CLD scanning work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } } // ---------- dirty klass scanning ---------- if (worker_id == 0) { // Single threaded at the moment. _timer.reset(); --- 4387,4397 ---- // We don't need to keep track of new CLDs anymore. ClassLoaderDataGraph::remember_new_clds(false); _timer.stop(); ! log_debug(gc, stats)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- dirty klass scanning ---------- if (worker_id == 0) { // Single threaded at the moment. _timer.reset();
*** 4618,4632 **** // Scan all classes that was dirtied during the concurrent marking phase. RemarkKlassClosure remark_klass_closure(&par_mrias_cl); ClassLoaderDataGraph::classes_do(&remark_klass_closure); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished dirty klass scanning work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } } // We might have added oops to ClassLoaderData::_handles during the // concurrent marking phase. These oops point to newly allocated objects // that are guaranteed to be kept alive. Either by the direct allocation --- 4400,4410 ---- // Scan all classes that was dirtied during the concurrent marking phase. RemarkKlassClosure remark_klass_closure(&par_mrias_cl); ClassLoaderDataGraph::classes_do(&remark_klass_closure); _timer.stop(); ! log_debug(gc, stats)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // We might have added oops to ClassLoaderData::_handles during the // concurrent marking phase. These oops point to newly allocated objects // that are guaranteed to be kept alive. Either by the direct allocation
*** 4640,4666 **** // Do the rescan tasks for each of the two spaces // (cms_space) in turn. // "worker_id" is passed to select the task_queue for "worker_id" do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished dirty card rescan work in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } // ---------- steal work from other threads ... // ---------- ... and drain overflow list. _timer.reset(); _timer.start(); do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id)); _timer.stop(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr( ! "Finished work stealing in %dth thread: %3.3f sec", ! worker_id, _timer.seconds()); ! } } // Note that parameter "i" is not used. void CMSParMarkTask::do_young_space_rescan(uint worker_id, --- 4418,4436 ---- // Do the rescan tasks for each of the two spaces // (cms_space) in turn. // "worker_id" is passed to select the task_queue for "worker_id" do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl); _timer.stop(); ! log_debug(gc, stats)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); // ---------- steal work from other threads ... // ---------- ... and drain overflow list. _timer.reset(); _timer.start(); do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id)); _timer.stop(); ! log_debug(gc, stats)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // Note that parameter "i" is not used. void CMSParMarkTask::do_young_space_rescan(uint worker_id,
*** 4850,4864 **** // Loop around, finish this work, and try to steal some more } else if (terminator()->offer_termination()) { break; // nirvana from the infinite cycle } } ! NOT_PRODUCT( ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); ! } ! ) assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), "Else our work is not yet done"); } // Record object boundaries in _eden_chunk_array by sampling the eden --- 4620,4630 ---- // Loop around, finish this work, and try to steal some more } else if (terminator()->offer_termination()) { break; // nirvana from the infinite cycle } } ! log_develop(gc, stats)("\t(%d: stole %d oops)", i, num_steals); assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), "Else our work is not yet done"); } // Record object boundaries in _eden_chunk_array by sampling the eden
*** 4951,4963 **** _survivor_chunk_array[i] = min_val; _cursor[min_tid]++; } // We are all done; record the size of the _survivor_chunk_array _survivor_chunk_index = i; // exclusive: [0, i) ! if (PrintCMSStatistics > 0) { ! gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i); ! } // Verify that we used up all the recorded entries #ifdef ASSERT size_t total = 0; for (int j = 0; j < no_of_gc_threads; j++) { assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); --- 4717,4727 ---- _survivor_chunk_array[i] = min_val; _cursor[min_tid]++; } // We are all done; record the size of the _survivor_chunk_array _survivor_chunk_index = i; // exclusive: [0, i) ! log_debug(gc, stats)(" (Survivor:" SIZE_FORMAT "chunks) ", i); // Verify that we used up all the recorded entries #ifdef ASSERT size_t total = 0; for (int j = 0; j < no_of_gc_threads; j++) { assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
*** 4965,4978 **** } assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); // Check that the merged array is in sorted order if (total > 0) { for (size_t i = 0; i < total - 1; i++) { ! if (PrintCMSStatistics > 0) { ! gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", i, p2i(_survivor_chunk_array[i])); - } assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], "Not sorted"); } } #endif // ASSERT --- 4729,4740 ---- } assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); // Check that the merged array is in sorted order if (total > 0) { for (size_t i = 0; i < total - 1; i++) { ! log_develop(gc, stats)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", i, p2i(_survivor_chunk_array[i])); assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], "Not sorted"); } } #endif // ASSERT
*** 5102,5112 **** MarkFromDirtyCardsClosure markFromDirtyCardsClosure(this, _span, NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { ! GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { ModUnionClosure modUnionClosure(&_modUnionTable); _ct->ct_bs()->dirty_card_iterate( --- 4864,4874 ---- MarkFromDirtyCardsClosure markFromDirtyCardsClosure(this, _span, NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { ! GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { ModUnionClosure modUnionClosure(&_modUnionTable); _ct->ct_bs()->dirty_card_iterate(
*** 5127,5149 **** HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); MemRegion cms_span(lb, ub); _modUnionTable.dirty_range_iterate_clear(cms_span, &markFromDirtyCardsClosure); verify_work_stacks_empty(); ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", ! markFromDirtyCardsClosure.num_dirty_cards()); ! } } } if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(); } { ! GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. StrongRootsScope srs(1); --- 4889,4908 ---- HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); MemRegion cms_span(lb, ub); _modUnionTable.dirty_range_iterate_clear(cms_span, &markFromDirtyCardsClosure); verify_work_stacks_empty(); ! log_debug(gc, stats)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards()); } } if (VerifyDuringGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(); } { ! GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm); verify_work_stacks_empty(); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. StrongRootsScope srs(1);
*** 5161,5171 **** || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } { ! GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); // Scan all class loader data objects that might have been introduced // during concurrent marking. --- 4920,4930 ---- || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); } { ! GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm); verify_work_stacks_empty(); // Scan all class loader data objects that might have been introduced // during concurrent marking.
*** 5180,5190 **** verify_work_stacks_empty(); } { ! GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); verify_work_stacks_empty(); RemarkKlassClosure remark_klass_closure(&mrias_cl); ClassLoaderDataGraph::classes_do(&remark_klass_closure); --- 4939,4949 ---- verify_work_stacks_empty(); } { ! GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm); verify_work_stacks_empty(); RemarkKlassClosure remark_klass_closure(&mrias_cl); ClassLoaderDataGraph::classes_do(&remark_klass_closure);
*** 5342,5356 **** // Loop around, finish this work, and try to steal some more } else if (terminator()->offer_termination()) { break; // nirvana from the infinite cycle } } ! NOT_PRODUCT( ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); ! } ! ) } void CMSRefProcTaskExecutor::execute(ProcessTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap(); --- 5101,5111 ---- // Loop around, finish this work, and try to steal some more } else if (terminator()->offer_termination()) { break; // nirvana from the infinite cycle } } ! log_develop(gc, stats)("\t(%d: stole %d oops)", i, num_steals); } void CMSRefProcTaskExecutor::execute(ProcessTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap();
*** 5388,5398 **** &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { ! GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery --- 5143,5153 ---- &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { ! GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery
*** 5430,5440 **** // This is the point where the entire marking should have completed. verify_work_stacks_empty(); if (should_unload_classes()) { { ! GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); // Unload nmethods. --- 5185,5195 ---- // This is the point where the entire marking should have completed. verify_work_stacks_empty(); if (should_unload_classes()) { { ! GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); // Unload nmethods.
*** 5443,5459 **** // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); } { ! GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } { ! GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } } --- 5198,5214 ---- // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(&_is_alive_closure); } { ! GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } { ! GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } }
*** 5516,5527 **** assert(!_intra_sweep_timer.is_active(), "Should not be active"); _intra_sweep_timer.reset(); _intra_sweep_timer.start(); { ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), bitMapLock()); sweepWork(_cmsGen); --- 5271,5282 ---- assert(!_intra_sweep_timer.is_active(), "Should not be active"); _intra_sweep_timer.reset(); _intra_sweep_timer.start(); { ! GCTraceCPUTime tcpu; ! CMSPhaseAccounting pa(this, "Concurrent Sweep"); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), bitMapLock()); sweepWork(_cmsGen);
*** 5600,5616 **** largestAddr = _cmsSpace->end(); } size_t largestOffset = pointer_delta(largestAddr, minAddr); size_t nearLargestOffset = (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; ! if (PrintFLSStatistics != 0) { ! gclog_or_tty->print_cr( ! "CMS: Large Block: " PTR_FORMAT ";" ! " Proximity: " PTR_FORMAT " -> " PTR_FORMAT, ! p2i(largestAddr), ! p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset)); ! } _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); } bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { return addr >= _cmsSpace->nearLargestChunk(); --- 5355,5366 ---- largestAddr = _cmsSpace->end(); } size_t largestOffset = pointer_delta(largestAddr, minAddr); size_t nearLargestOffset = (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; ! log_debug(gc, freelist, stats)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT, ! p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset)); _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); } bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { return addr >= _cmsSpace->nearLargestChunk();
*** 5700,5711 **** return; } // Clear the mark bitmap (no grey objects to start with) // for the next cycle. ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); HeapWord* curAddr = _markBitMap.startWord(); while (curAddr < _markBitMap.endWord()) { size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); --- 5450,5461 ---- return; } // Clear the mark bitmap (no grey objects to start with) // for the next cycle. ! GCTraceCPUTime tcpu; ! CMSPhaseAccounting cmspa(this, "Concurrent Reset"); HeapWord* curAddr = _markBitMap.startWord(); while (curAddr < _markBitMap.endWord()) { size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
*** 5717,5729 **** "CMS thread should hold CMS token"); assert_lock_strong(bitMapLock()); bitMapLock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); stopTimer(); - if (PrintCMSStatistics != 0) { incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { --- 5467,5477 ----
*** 5756,5784 **** _collectorState = Idling; register_gc_end(); } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsInitial(); - if (PrintGC) { - _cmsGen->printOccupancy("initial-mark"); - } break; } case CMS_op_checkpointRootsFinal: { SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsFinal(); - if (PrintGC) { - _cmsGen->printOccupancy("remark"); - } break; } default: fatal("No such CMS_op"); } --- 5504,5527 ---- _collectorState = Idling; register_gc_end(); } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { ! GCTraceCPUTime tcpu; TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { + GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true); SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsInitial(); break; } case CMS_op_checkpointRootsFinal: { + GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true); SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsFinal(); break; } default: fatal("No such CMS_op"); }
*** 5987,5999 **** // For now we take the expedient path of just disabling the // messages for the problematic case.) void CMSMarkStack::expand() { assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); if (_capacity == MarkStackSizeMax) { ! if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { // We print a warning message only once per CMS cycle. ! gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); } return; } // Double capacity if possible size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); --- 5730,5742 ---- // For now we take the expedient path of just disabling the // messages for the problematic case.) void CMSMarkStack::expand() { assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); if (_capacity == MarkStackSizeMax) { ! if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) { // We print a warning message only once per CMS cycle. ! log_debug(gc)(" (benign) Hit CMSMarkStack max size limit"); } return; } // Double capacity if possible size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
*** 6009,6023 **** fatal("Not enough swap for expanded marking stack"); } _base = (oop*)(_virtual_space.low()); _index = 0; _capacity = new_capacity; ! } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { // Failed to double capacity, continue; // we print a detail message only once per CMS cycle. ! gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " ! SIZE_FORMAT "K", _capacity / K, new_capacity / K); } } --- 5752,5765 ---- fatal("Not enough swap for expanded marking stack"); } _base = (oop*)(_virtual_space.low()); _index = 0; _capacity = new_capacity; ! } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) { // Failed to double capacity, continue; // we print a detail message only once per CMS cycle. ! log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K", _capacity / K, new_capacity / K); } }
*** 6091,6102 **** assert(obj->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr)) { _verification_bm->mark(addr); if (!_cms_bm->isMarked(addr)) { ! oop(addr)->print(); ! gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); fatal("... aborting"); } } } --- 5833,5846 ---- assert(obj->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr)) { _verification_bm->mark(addr); if (!_cms_bm->isMarked(addr)) { ! LogHandle(gc, verify) log; ! ResourceMark rm; ! oop(addr)->print_on(log.info_stream()); ! log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); fatal("... aborting"); } } }
*** 6188,6200 **** // relinquish the free_list_lock and bitMaplock() _bit_map->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && --- 5932,5942 ----
*** 6346,6358 **** // relinquish the free_list_lock and bitMaplock() _bitMap->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { --- 6088,6098 ----
*** 6415,6427 **** assert_lock_strong(_bit_map->lock()); // Relinquish the bit map lock _bit_map->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { --- 6155,6165 ----
*** 6570,6582 **** "CMS thread should hold CMS token"); assert_lock_strong(_bitMap->lock()); _bitMap->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { --- 6308,6318 ----
*** 6878,6898 **** HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black _verification_bm->mark(addr); // now grey if (!_cms_bm->isMarked(addr)) { ! oop(addr)->print(); ! gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", ! p2i(addr)); fatal("... aborting"); } if (!_mark_stack->push(obj)) { // stack overflow ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " ! SIZE_FORMAT, _mark_stack->capacity()); ! } assert(_mark_stack->isFull(), "Else push should have succeeded"); handle_stack_overflow(addr); } // anything including and to the right of _finger // will be scanned as we iterate over the remainder of the --- 6614,6632 ---- HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black _verification_bm->mark(addr); // now grey if (!_cms_bm->isMarked(addr)) { ! LogHandle(gc, verify) log; ! ResourceMark rm; ! oop(addr)->print_on(log.info_stream()); ! log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); fatal("... aborting"); } if (!_mark_stack->push(obj)) { // stack overflow ! log_debug(gc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity()); assert(_mark_stack->isFull(), "Else push should have succeeded"); handle_stack_overflow(addr); } // anything including and to the right of _finger // will be scanned as we iterate over the remainder of the
*** 6988,7001 **** // simulate a stack overflow simulate_overflow = true; } ) if (simulate_overflow || !_markStack->push(obj)) { // stack overflow ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " ! SIZE_FORMAT, _markStack->capacity()); ! } assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); handle_stack_overflow(addr); } } // anything including and to the right of _finger --- 6722,6732 ---- // simulate a stack overflow simulate_overflow = true; } ) if (simulate_overflow || !_markStack->push(obj)) { // stack overflow ! log_debug(gc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity()); assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); handle_stack_overflow(addr); } } // anything including and to the right of _finger
*** 7040,7053 **** } ) if (simulate_overflow || !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow ! if (PrintCMSStatistics != 0) { ! gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " ! SIZE_FORMAT, _overflow_stack->capacity()); ! } // We cannot assert that the overflow stack is full because // it may have been emptied since. assert(simulate_overflow || _work_queue->size() == _work_queue->max_elems(), "Else push should have succeeded"); --- 6771,6781 ---- } ) if (simulate_overflow || !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow ! log_debug(gc, stats)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); // We cannot assert that the overflow stack is full because // it may have been emptied since. assert(simulate_overflow || _work_queue->size() == _work_queue->max_elems(), "Else push should have succeeded");
*** 7205,7217 **** bml->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { --- 6933,6943 ----
*** 7238,7251 **** // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, // "mr should be a range of cards"); // However, that would be too strong in one case -- the last // partition ends at _unallocated_block which, in general, can be // an arbitrary boundary, not necessarily card aligned. ! if (PrintCMSStatistics != 0) { ! _num_dirty_cards += ! mr.word_size()/CardTableModRefBS::card_size_in_words; ! } _space->object_iterate_mem(mr, &_scan_cl); } SweepClosure::SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, --- 6964,6974 ---- // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, // "mr should be a range of cards"); // However, that would be too strong in one case -- the last // partition ends at _unallocated_block which, in general, can be // an arbitrary boundary, not necessarily card aligned. ! _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words; _space->object_iterate_mem(mr, &_scan_cl); } SweepClosure::SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
*** 7274,7287 **** _sp->initializeIndexedFreeListArrayReturnedBytes(); _sp->dictionary()->initialize_dict_returned_bytes(); ) assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT, ! p2i(_limit)); ! } } void SweepClosure::print_on(outputStream* st) const { tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(_sp->bottom()), p2i(_sp->end())); --- 6997,7008 ---- _sp->initializeIndexedFreeListArrayReturnedBytes(); _sp->dictionary()->initialize_dict_returned_bytes(); ) assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); ! log_develop(gc, sweep)("===================="); ! log_develop(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit)); } void SweepClosure::print_on(outputStream* st) const { tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", p2i(_sp->bottom()), p2i(_sp->end()));
*** 7304,7349 **** if (inFreeRange()) { warning("inFreeRange() should have been reset; dumping state of SweepClosure"); print(); ShouldNotReachHere(); } ! if (Verbose && PrintGC) { ! gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); ! gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects, " ! SIZE_FORMAT " bytes " ! "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", ! _numObjectsLive, _numWordsLive*sizeof(HeapWord), ! _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); ! size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) ! * sizeof(HeapWord); ! gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes); ! if (PrintCMSStatistics && CMSVerifyReturnedBytes) { size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes(); size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes; ! gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes); ! gclog_or_tty->print(" Indexed List Returned " SIZE_FORMAT " bytes", ! indexListReturnedBytes); ! gclog_or_tty->print_cr(" Dictionary Returned " SIZE_FORMAT " bytes", ! dict_returned_bytes); ! } ! } ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================", ! p2i(_limit)); } } #endif // PRODUCT void SweepClosure::initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists) { ! if (CMSTraceSweeper) { ! gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n", p2i(freeFinger), freeRangeInFreeLists); - } assert(!inFreeRange(), "Trampling existing free range"); set_inFreeRange(true); set_lastFreeRangeCoalesced(false); set_freeFinger(freeFinger); --- 7025,7060 ---- if (inFreeRange()) { warning("inFreeRange() should have been reset; dumping state of SweepClosure"); print(); ShouldNotReachHere(); } ! ! if (log_is_enabled(Debug, gc, sweep)) { ! log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); ! log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", ! _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); ! size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord); ! log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes); ! } ! if (log_is_enabled(Debug, gc, stats) && CMSVerifyReturnedBytes) { size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes(); size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes; ! log_debug(gc, stats)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes", ! returned_bytes, indexListReturnedBytes, dict_returned_bytes); } + log_develop(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit)); + log_develop(gc, sweep)("================"); } #endif // PRODUCT void SweepClosure::initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists) { ! log_develop(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)", p2i(freeFinger), freeRangeInFreeLists); assert(!inFreeRange(), "Trampling existing free range"); set_inFreeRange(true); set_lastFreeRangeCoalesced(false); set_freeFinger(freeFinger);
*** 7405,7422 **** if (inFreeRange()) { assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); ! if (CMSTraceSweeper) { ! gclog_or_tty->print("Sweep: last chunk: "); ! gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") " ! "[coalesced:%d]\n", p2i(freeFinger()), pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced() ? 1 : 0); } - } // help the iterator loop finish return pointer_delta(_sp->end(), addr); } --- 7116,7129 ---- if (inFreeRange()) { assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); ! log_develop(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]", p2i(freeFinger()), pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced() ? 1 : 0); } // help the iterator loop finish return pointer_delta(_sp->end(), addr); }
*** 7622,7634 **** assert((HeapWord*)fc <= _limit, "sweep invariant"); if (CMSTestInFreeList && fcInFreeLists) { assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists"); } ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); ! } HeapWord* const fc_addr = (HeapWord*) fc; bool coalesce = false; const size_t left = pointer_delta(fc_addr, freeFinger()); --- 7329,7339 ---- assert((HeapWord*)fc <= _limit, "sweep invariant"); if (CMSTestInFreeList && fcInFreeLists) { assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists"); } ! log_develop(gc, sweep)(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); HeapWord* const fc_addr = (HeapWord*) fc; bool coalesce = false; const size_t left = pointer_delta(fc_addr, freeFinger());
*** 7725,7744 **** " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size); if (eob >= _limit) { assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit"); ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block " "[" PTR_FORMAT "," PTR_FORMAT ") in space " "[" PTR_FORMAT "," PTR_FORMAT ")", p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end())); - } // Return the storage we are tracking back into the free lists. ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr("Flushing ... "); ! } assert(freeFinger() < eob, "Error"); flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); } } --- 7430,7445 ---- " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size); if (eob >= _limit) { assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit"); ! log_develop(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block " "[" PTR_FORMAT "," PTR_FORMAT ") in space " "[" PTR_FORMAT "," PTR_FORMAT ")", p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end())); // Return the storage we are tracking back into the free lists. ! log_develop(gc, sweep)("Flushing ... "); assert(freeFinger() < eob, "Error"); flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); } }
*** 7751,7776 **** FreeChunk* fc = (FreeChunk*) chunk; fc->set_size(size); assert(!_sp->verify_chunk_in_free_list(fc), "chunk should not be in free lists yet"); } ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", ! p2i(chunk), size); ! } // A new free range is going to be starting. The current // free range has not been added to the free lists yet or // was removed so add it back. // If the current free range was coalesced, then the death // of the free range was recorded. Record a birth now. if (lastFreeRangeCoalesced()) { _sp->coalBirth(size); } _sp->addChunkAndRepairOffsetTable(chunk, size, lastFreeRangeCoalesced()); ! } else if (CMSTraceSweeper) { ! gclog_or_tty->print_cr("Already in free list: nothing to flush"); } set_inFreeRange(false); set_freeRangeInFreeLists(false); } --- 7452,7474 ---- FreeChunk* fc = (FreeChunk*) chunk; fc->set_size(size); assert(!_sp->verify_chunk_in_free_list(fc), "chunk should not be in free lists yet"); } ! log_develop(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size); // A new free range is going to be starting. The current // free range has not been added to the free lists yet or // was removed so add it back. // If the current free range was coalesced, then the death // of the free range was recorded. Record a birth now. if (lastFreeRangeCoalesced()) { _sp->coalBirth(size); } _sp->addChunkAndRepairOffsetTable(chunk, size, lastFreeRangeCoalesced()); ! } else { ! log_develop(gc, sweep)("Already in free list: nothing to flush"); } set_inFreeRange(false); set_freeRangeInFreeLists(false); }
*** 7797,7809 **** "CMS thread should hold CMS token"); _bitMap->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { _collector->incrementYields(); - } // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { --- 7495,7505 ----
*** 7824,7837 **** return debug_cms_space->verify_chunk_in_free_list(fc); } #endif void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { ! if (CMSTraceSweeper) { ! gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), fc->size()); - } } // CMSIsAliveClosure bool CMSIsAliveClosure::do_object_b(oop obj) { HeapWord* addr = (HeapWord*)obj; --- 7520,7531 ---- return debug_cms_space->verify_chunk_in_free_list(fc); } #endif void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { ! log_develop(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), fc->size()); } // CMSIsAliveClosure bool CMSIsAliveClosure::do_object_b(oop obj) { HeapWord* addr = (HeapWord*)obj;
< prev index next >