< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 7318 : imported patch foreground
rev 7319 : [mq]: foreground-review-stefank
rev 7320 : [mq]: foreground-review-kim

*** 190,200 **** ReservedSpace rs, size_t initial_byte_size, int level, CardTableRS* ct, bool use_adaptive_freelists, FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : CardGeneration(rs, initial_byte_size, level, ct), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), ! _debug_collection_type(Concurrent_collection_type), _did_compact(false) { HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* end = (HeapWord*) _virtual_space.high(); --- 190,200 ---- ReservedSpace rs, size_t initial_byte_size, int level, CardTableRS* ct, bool use_adaptive_freelists, FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : CardGeneration(rs, initial_byte_size, level, ct), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), ! _debug_concurrent_cycle(true), _did_compact(false) { HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* end = (HeapWord*) _virtual_space.high();
*** 610,621 **** _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); // Clip CMSBootstrapOccupancy between 0 and 100. _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100; - _full_gcs_since_conc_gc = 0; - // Now tell CMS generations the identity of their collector ConcurrentMarkSweepGeneration::set_collector(this); // Create & start a CMS thread for this CMS collector _cmsThread = ConcurrentMarkSweepThread::start(this); --- 610,619 ----
*** 1246,1265 **** } return true; } // For debugging purposes, change the type of collection. ! // If the rotation is not on the concurrent collection ! // type, don't start a concurrent collection. NOT_PRODUCT( ! if (RotateCMSCollectionTypes && ! (_cmsGen->debug_collection_type() != ! ConcurrentMarkSweepGeneration::Concurrent_collection_type)) { ! assert(_cmsGen->debug_collection_type() != ! ConcurrentMarkSweepGeneration::Unknown_collection_type, ! "Bad cms collection type"); ! return false; } ) FreelistLocker x(this); // ------------------------------------------------------------------ --- 1244,1257 ---- } return true; } // For debugging purposes, change the type of collection. ! // Rotate between concurrent and stop-the-world full GCs. NOT_PRODUCT( ! if (RotateCMSCollectionTypes) { ! return _cmsGen->debug_concurrent_cycle(); } ) FreelistLocker x(this); // ------------------------------------------------------------------
*** 1439,1458 **** void CMSCollector::collect(bool full, bool clear_all_soft_refs, size_t size, bool tlab) { - if (!UseCMSCollectionPassing && _collectorState > Idling) { - // For debugging purposes skip the collection if the state - // is not currently idle - if (TraceCMSState) { - gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", - Thread::current(), full, _collectorState); - } - return; - } - // The following "if" branch is present for defensive reasons. // In the current uses of this interface, it can be replaced with: // assert(!GC_locker.is_active(), "Can't be called otherwise"); // But I am not placing that assert here to allow future // generality in invoking this interface. --- 1431,1440 ----
*** 1464,1474 **** // Need the free list locks for the call to free() in compute_new_size() compute_new_size(); return; } acquire_control_and_collect(full, clear_all_soft_refs); - _full_gcs_since_conc_gc++; } void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { GenCollectedHeap* gch = GenCollectedHeap::heap(); unsigned int gc_count = gch->total_full_collections(); --- 1446,1455 ----
*** 1634,1668 **** gclog_or_tty->print_cr("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d", Thread::current(), first_state); gclog_or_tty->print_cr(" gets control with state %d", _collectorState); } - // Check if we need to do a compaction, or if not, whether - // we need to start the mark-sweep from scratch. - bool should_compact = false; - bool should_start_over = false; - decide_foreground_collection_type(clear_all_soft_refs, - &should_compact, &should_start_over); - - NOT_PRODUCT( - if (RotateCMSCollectionTypes) { - if (_cmsGen->debug_collection_type() == - ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { - should_compact = true; - } else if (_cmsGen->debug_collection_type() == - ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { - should_compact = false; - } - } - ) - if (first_state > Idling) { report_concurrent_mode_interruption(); } ! set_did_compact(should_compact); ! if (should_compact) { // If the collection is being acquired from the background // collector, there may be references on the discovered // references lists that have NULL referents (being those // that were concurrently cleared by a mutator) or // that are no longer active (having been enqueued concurrently --- 1615,1630 ---- gclog_or_tty->print_cr("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d", Thread::current(), first_state); gclog_or_tty->print_cr(" gets control with state %d", _collectorState); } if (first_state > Idling) { report_concurrent_mode_interruption(); } ! set_did_compact(true); ! // If the collection is being acquired from the background // collector, there may be references on the discovered // references lists that have NULL referents (being those // that were concurrently cleared by a mutator) or // that are no longer active (having been enqueued concurrently
*** 1690,1703 **** _cmsGen->max_capacity(), max_eden_size, full, gc_cause, gch->collector_policy()); ! } else { ! do_mark_sweep_work(clear_all_soft_refs, first_state, ! should_start_over); ! } // Reset the expansion cause, now that we just completed // a collection cycle. clear_expansion_cause(); _foregroundGCIsActive = false; return; --- 1652,1662 ---- _cmsGen->max_capacity(), max_eden_size, full, gc_cause, gch->collector_policy()); ! // Reset the expansion cause, now that we just completed // a collection cycle. clear_expansion_cause(); _foregroundGCIsActive = false; return;
*** 1711,1782 **** FreelistLocker z(this); MetaspaceGC::compute_new_size(); _cmsGen->compute_new_size_free_list(); } - // A work method used by foreground collection to determine - // what type of collection (compacting or not, continuing or fresh) - // it should do. - // NOTE: the intent is to make UseCMSCompactAtFullCollection - // and CMSCompactWhenClearAllSoftRefs the default in the future - // and do away with the flags after a suitable period. - void CMSCollector::decide_foreground_collection_type( - bool clear_all_soft_refs, bool* should_compact, - bool* should_start_over) { - // Normally, we'll compact only if the UseCMSCompactAtFullCollection - // flag is set, and we have either requested a System.gc() or - // the number of full gc's since the last concurrent cycle - // has exceeded the threshold set by CMSFullGCsBeforeCompaction, - // or if an incremental collection has failed - GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(gch->collector_policy()->is_generation_policy(), - "You may want to check the correctness of the following"); - // Inform cms gen if this was due to partial collection failing. - // The CMS gen may use this fact to determine its expansion policy. - if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { - assert(!_cmsGen->incremental_collection_failed(), - "Should have been noticed, reacted to and cleared"); - _cmsGen->set_incremental_collection_failed(); - } - *should_compact = - UseCMSCompactAtFullCollection && - ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || - GCCause::is_user_requested_gc(gch->gc_cause()) || - gch->incremental_collection_will_fail(true /* consult_young */)); - *should_start_over = false; - if (clear_all_soft_refs && !*should_compact) { - // We are about to do a last ditch collection attempt - // so it would normally make sense to do a compaction - // to reclaim as much space as possible. - if (CMSCompactWhenClearAllSoftRefs) { - // Default: The rationale is that in this case either - // we are past the final marking phase, in which case - // we'd have to start over, or so little has been done - // that there's little point in saving that work. Compaction - // appears to be the sensible choice in either case. - *should_compact = true; - } else { - // We have been asked to clear all soft refs, but not to - // compact. Make sure that we aren't past the final checkpoint - // phase, for that is where we process soft refs. If we are already - // past that phase, we'll need to redo the refs discovery phase and - // if necessary clear soft refs that weren't previously - // cleared. We do so by remembering the phase in which - // we came in, and if we are past the refs processing - // phase, we'll choose to just redo the mark-sweep - // collection from scratch. - if (_collectorState > FinalMarking) { - // We are past the refs processing phase; - // start over and do a fresh synchronous CMS cycle - _collectorState = Resetting; // skip to reset to start new cycle - reset(false /* == !asynch */); - *should_start_over = true; - } // else we can continue a possibly ongoing current cycle - } - } - } - // A work method used by the foreground collector to do // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { GenCollectedHeap* gch = GenCollectedHeap::heap(); --- 1670,1679 ----
*** 1785,1798 **** SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id()); - if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { - gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " - "collections passed to foreground collector", _full_gcs_since_conc_gc); - } // Temporarily widen the span of the weak reference processing to // the entire heap. MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); --- 1682,1691 ----
*** 1850,1860 **** "There should be at most 2 free chunks after compaction"); #endif // ASSERT _collectorState = Resetting; assert(_restart_addr == NULL, "Should have been NULL'd before baton was passed"); ! reset(false /* == !asynch */); _cmsGen->reset_after_compaction(); _concurrent_cycles_since_last_unload = 0; // Clear any data recorded in the PLAB chunk arrays. if (_survivor_plab_array != NULL) { --- 1743,1753 ---- "There should be at most 2 free chunks after compaction"); #endif // ASSERT _collectorState = Resetting; assert(_restart_addr == NULL, "Should have been NULL'd before baton was passed"); ! reset(false /* == !concurrent */); _cmsGen->reset_after_compaction(); _concurrent_cycles_since_last_unload = 0; // Clear any data recorded in the PLAB chunk arrays. if (_survivor_plab_array != NULL) {
*** 1873,1916 **** // For a mark-sweep-compact, compute_new_size() will be called // in the heap's do_collection() method. } - // A work method used by the foreground collector to do - // a mark-sweep, after taking over from a possibly on-going - // concurrent mark-sweep collection. - void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, - CollectorState first_state, bool should_start_over) { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr("Pass concurrent collection to foreground " - "collector with count %d", - _full_gcs_since_conc_gc); - } - switch (_collectorState) { - case Idling: - if (first_state == Idling || should_start_over) { - // The background GC was not active, or should - // restarted from scratch; start the cycle. - _collectorState = InitialMarking; - } - // If first_state was not Idling, then a background GC - // was in progress and has now finished. No need to do it - // again. Leave the state as Idling. - break; - case Precleaning: - // In the foreground case don't do the precleaning since - // it is not done concurrently and there is extra work - // required. - _collectorState = FinalMarking; - } - collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause()); - - // For a mark-sweep, compute_new_size() will be called - // in the heap's do_collection() method. - } - - void CMSCollector::print_eden_and_survivor_chunk_arrays() { DefNewGeneration* dng = _young_gen->as_DefNewGeneration(); ContiguousSpace* eden_space = dng->eden(); ContiguousSpace* from_space = dng->from(); ContiguousSpace* to_space = dng->to(); --- 1766,1775 ----
*** 1987,2003 **** MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); _c->_foregroundGCShouldWait = true; } }; ! // There are separate collect_in_background and collect_in_foreground because of ! // the different locking requirements of the background collector and the ! // foreground collector. There was originally an attempt to share ! // one "collect" method between the background collector and the foreground ! // collector but the if-then-else required made it cleaner to have ! // separate methods. ! void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); GenCollectedHeap* gch = GenCollectedHeap::heap(); { --- 1846,1856 ---- MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); _c->_foregroundGCShouldWait = true; } }; ! void CMSCollector::collect_in_background(GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); GenCollectedHeap* gch = GenCollectedHeap::heap(); {
*** 2034,2044 **** } // Used for PrintGC size_t prev_used; if (PrintGC && Verbose) { ! prev_used = _cmsGen->used(); // XXXPERM } // The change of the collection state is normally done at this level; // the exceptions are phases that are executed while the world is // stopped. For those phases the change of state is done while the --- 1887,1897 ---- } // Used for PrintGC size_t prev_used; if (PrintGC && Verbose) { ! prev_used = _cmsGen->used(); } // The change of the collection state is normally done at this level; // the exceptions are phases that are executed while the world is // stopped. For those phases the change of state is done while the
*** 2114,2124 **** // since the background collector may have yielded to the // foreground collector. break; case Marking: // initial marking in checkpointRootsInitialWork has been completed ! if (markFromRoots(true)) { // we were successful assert(_collectorState == Precleaning, "Collector state should " "have changed"); } else { assert(_foregroundGCIsActive, "Internal state inconsistency"); } --- 1967,1977 ---- // since the background collector may have yielded to the // foreground collector. break; case Marking: // initial marking in checkpointRootsInitialWork has been completed ! if (markFromRoots()) { // we were successful assert(_collectorState == Precleaning, "Collector state should " "have changed"); } else { assert(_foregroundGCIsActive, "Internal state inconsistency"); }
*** 2144,2157 **** } assert(_foregroundGCShouldWait, "block post-condition"); break; case Sweeping: // final marking in checkpointRootsFinal has been completed ! sweep(true); assert(_collectorState == Resizing, "Collector state change " "to Resizing must be done under the free_list_lock"); - _full_gcs_since_conc_gc = 0; case Resizing: { // Sweeping has been completed... // At this point the background collection has completed. // Don't move the call to compute_new_size() down --- 1997,2009 ---- } assert(_foregroundGCShouldWait, "block post-condition"); break; case Sweeping: // final marking in checkpointRootsFinal has been completed ! sweep(); assert(_collectorState == Resizing, "Collector state change " "to Resizing must be done under the free_list_lock"); case Resizing: { // Sweeping has been completed... // At this point the background collection has completed. // Don't move the call to compute_new_size() down
*** 2220,2235 **** if (PrintGC && Verbose) { _cmsGen->print_heap_change(prev_used); } } - void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) { - if (!_cms_start_registered) { - register_gc_start(cause); - } - } - void CMSCollector::register_gc_start(GCCause::Cause cause) { _cms_start_registered = true; _gc_timer_cm->register_gc_start(); _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start()); } --- 2072,2081 ----
*** 2253,2376 **** void CMSCollector::report_heap_summary(GCWhen::Type when) { _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); } - void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) { - assert(_foregroundGCIsActive && !_foregroundGCShouldWait, - "Foreground collector should be waiting, not executing"); - assert(Thread::current()->is_VM_thread(), "A foreground collection" - "may only be done by the VM Thread with the world stopped"); - assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), - "VM thread should have CMS token"); - - // The gc id is created in register_foreground_gc_start if this collection is synchronous - const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id(); - NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, - true, NULL, gc_id);) - COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); - - HandleMark hm; // Discard invalid handles created during verification - - if (VerifyBeforeGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify(); - } - - // Snapshot the soft reference policy to be used in this collection cycle. - ref_processor()->setup_policy(clear_all_soft_refs); - - // Decide if class unloading should be done - update_should_unload_classes(); - - bool init_mark_was_synchronous = false; // until proven otherwise - while (_collectorState != Idling) { - if (TraceCMSState) { - gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", - Thread::current(), _collectorState); - } - switch (_collectorState) { - case InitialMarking: - register_foreground_gc_start(cause); - init_mark_was_synchronous = true; // fact to be exploited in re-mark - checkpointRootsInitial(false); - assert(_collectorState == Marking, "Collector state should have changed" - " within checkpointRootsInitial()"); - break; - case Marking: - // initial marking in checkpointRootsInitialWork has been completed - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before initial mark: "); - } - { - bool res = markFromRoots(false); - assert(res && _collectorState == FinalMarking, "Collector state should " - "have changed"); - break; - } - case FinalMarking: - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before re-mark: "); - } - checkpointRootsFinal(false, clear_all_soft_refs, - init_mark_was_synchronous); - assert(_collectorState == Sweeping, "Collector state should not " - "have changed within checkpointRootsFinal()"); - break; - case Sweeping: - // final marking in checkpointRootsFinal has been completed - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before sweep: "); - } - sweep(false); - assert(_collectorState == Resizing, "Incorrect state"); - break; - case Resizing: { - // Sweeping has been completed; the actual resize in this case - // is done separately; nothing to be done in this state. - _collectorState = Resetting; - break; - } - case Resetting: - // The heap has been resized. - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before reset: "); - } - save_heap_summary(); - reset(false); - assert(_collectorState == Idling, "Collector state should " - "have changed"); - break; - case Precleaning: - case AbortablePreclean: - // Elide the preclean phase - _collectorState = FinalMarking; - break; - default: - ShouldNotReachHere(); - } - if (TraceCMSState) { - gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", - Thread::current(), _collectorState); - } - } - - if (VerifyAfterGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify(); - } - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT - " exiting collection CMS state %d", - Thread::current(), _collectorState); - } - } - bool CMSCollector::waitForForegroundGC() { bool res = false; assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "CMS thread should have CMS token"); // Block the foreground collector until the --- 2099,2108 ----
*** 3343,3389 **** }; // Checkpoint the roots into this generation from outside // this generation. [Note this initial checkpoint need only // be approximate -- we'll do a catch up phase subsequently.] ! void CMSCollector::checkpointRootsInitial(bool asynch) { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); save_heap_summary(); report_heap_summary(GCWhen::BeforeGC); ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); ! if (asynch) { // acquire locks for subsequent manipulations MutexLockerEx x(bitMapLock(), Mutex::_no_safepoint_check_flag); ! checkpointRootsInitialWork(asynch); // enable ("weak") refs discovery rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/); _collectorState = Marking; - } else { - // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection - // which recognizes if we are a CMS generation, and doesn't try to turn on - // discovery; verify that they aren't meddling. - assert(!rp->discovery_is_atomic(), - "incorrect setting of discovery predicate"); - assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " - "ref discovery for this generation kind"); - // already have locks - checkpointRootsInitialWork(asynch); - // now enable ("weak") refs discovery - rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/); - _collectorState = Marking; } SpecializationStats::print(); } ! void CMSCollector::checkpointRootsInitialWork(bool asynch) { assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); assert(_collectorState == InitialMarking, "just checking"); // If there has not been a GC[n-1] since last GC[n] cycle completed, // precede our marking with a collection of all --- 3075,3108 ---- }; // Checkpoint the roots into this generation from outside // this generation. [Note this initial checkpoint need only // be approximate -- we'll do a catch up phase subsequently.] ! void CMSCollector::checkpointRootsInitial() { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); save_heap_summary(); report_heap_summary(GCWhen::BeforeGC); ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); ! { // acquire locks for subsequent manipulations MutexLockerEx x(bitMapLock(), Mutex::_no_safepoint_check_flag); ! checkpointRootsInitialWork(); // enable ("weak") refs discovery rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/); _collectorState = Marking; } SpecializationStats::print(); } ! void CMSCollector::checkpointRootsInitialWork() { assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); assert(_collectorState == InitialMarking, "just checking"); // If there has not been a GC[n-1] since last GC[n] cycle completed, // precede our marking with a collection of all
*** 3481,3532 **** // to be used to limit the extent of sweep in each generation. save_sweep_limits(); verify_overflow_empty(); } ! bool CMSCollector::markFromRoots(bool asynch) { // we might be tempted to assert that: ! // assert(asynch == !SafepointSynchronize::is_at_safepoint(), // "inconsistent argument?"); // However that wouldn't be right, because it's possible that // a safepoint is indeed in progress as a younger generation // stop-the-world GC happens even as we mark in this generation. assert(_collectorState == Marking, "inconsistent state?"); check_correct_thread_executing(); verify_overflow_empty(); - bool res; - if (asynch) { // Weak ref discovery note: We may be discovering weak // refs in this generation concurrent (but interleaved) with // weak ref discovery by a younger generation collector. CMSTokenSyncWithLocks ts(true, bitMapLock()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); ! res = markFromRootsWork(asynch); if (res) { _collectorState = Precleaning; } else { // We failed and a foreground collection wants to take over assert(_foregroundGCIsActive, "internal state inconsistency"); assert(_restart_addr == NULL, "foreground will restart from scratch"); if (PrintGCDetails) { gclog_or_tty->print_cr("bailing out to foreground collection"); } } - } else { - assert(SafepointSynchronize::is_at_safepoint(), - "inconsistent with asynch == false"); - // already have locks - res = markFromRootsWork(asynch); - _collectorState = FinalMarking; - } verify_overflow_empty(); return res; } ! bool CMSCollector::markFromRootsWork(bool asynch) { // iterate over marked bits in bit map, doing a full scan and mark // from these roots using the following algorithm: // . if oop is to the right of the current scan pointer, // mark corresponding bit (we'll process it later) // . else (oop is to left of current scan pointer) --- 3200,3242 ---- // to be used to limit the extent of sweep in each generation. save_sweep_limits(); verify_overflow_empty(); } ! bool CMSCollector::markFromRoots() { // we might be tempted to assert that: ! // assert(!SafepointSynchronize::is_at_safepoint(), // "inconsistent argument?"); // However that wouldn't be right, because it's possible that // a safepoint is indeed in progress as a younger generation // stop-the-world GC happens even as we mark in this generation. assert(_collectorState == Marking, "inconsistent state?"); check_correct_thread_executing(); verify_overflow_empty(); // Weak ref discovery note: We may be discovering weak // refs in this generation concurrent (but interleaved) with // weak ref discovery by a younger generation collector. CMSTokenSyncWithLocks ts(true, bitMapLock()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); ! bool res = markFromRootsWork(); if (res) { _collectorState = Precleaning; } else { // We failed and a foreground collection wants to take over assert(_foregroundGCIsActive, "internal state inconsistency"); assert(_restart_addr == NULL, "foreground will restart from scratch"); if (PrintGCDetails) { gclog_or_tty->print_cr("bailing out to foreground collection"); } } verify_overflow_empty(); return res; } ! bool CMSCollector::markFromRootsWork() { // iterate over marked bits in bit map, doing a full scan and mark // from these roots using the following algorithm: // . if oop is to the right of the current scan pointer, // mark corresponding bit (we'll process it later) // . else (oop is to left of current scan pointer)
*** 3547,3559 **** verify_work_stacks_empty(); verify_overflow_empty(); bool result = false; if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { ! result = do_marking_mt(asynch); } else { ! result = do_marking_st(asynch); } return result; } // Forward decl --- 3257,3269 ---- verify_work_stacks_empty(); verify_overflow_empty(); bool result = false; if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { ! result = do_marking_mt(); } else { ! result = do_marking_st(); } return result; } // Forward decl
*** 3589,3599 **** // MT Concurrent Marking Task class CMSConcMarkingTask: public YieldingFlexibleGangTask { CMSCollector* _collector; int _n_workers; // requested/desired # workers - bool _asynch; bool _result; CompactibleFreeListSpace* _cms_space; char _pad_front[64]; // padding to ... HeapWord* _global_finger; // ... avoid sharing cache line char _pad_back[64]; --- 3299,3308 ----
*** 3610,3626 **** CMSConcMarkingTerminatorTerminator _term_term; public: CMSConcMarkingTask(CMSCollector* collector, CompactibleFreeListSpace* cms_space, - bool asynch, YieldingFlexibleWorkGang* workers, OopTaskQueueSet* task_queues): YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), _collector(collector), _cms_space(cms_space), ! _asynch(asynch), _n_workers(0), _result(true), _task_queues(task_queues), _term(_n_workers, task_queues, _collector), _bit_map_lock(collector->bitMapLock()) { _requested_size = _n_workers; --- 3319,3334 ---- CMSConcMarkingTerminatorTerminator _term_term; public: CMSConcMarkingTask(CMSCollector* collector, CompactibleFreeListSpace* cms_space, YieldingFlexibleWorkGang* workers, OopTaskQueueSet* task_queues): YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), _collector(collector), _cms_space(cms_space), ! _n_workers(0), _result(true), _task_queues(task_queues), _term(_n_workers, task_queues, _collector), _bit_map_lock(collector->bitMapLock()) { _requested_size = _n_workers;
*** 3643,3654 **** } void work(uint worker_id); bool should_yield() { return ConcurrentMarkSweepThread::should_yield() ! && !_collector->foregroundGCIsActive() ! && _asynch; } virtual void coordinator_yield(); // stuff done by coordinator bool result() { return _result; } --- 3351,3361 ---- } void work(uint worker_id); bool should_yield() { return ConcurrentMarkSweepThread::should_yield() ! && !_collector->foregroundGCIsActive(); } virtual void coordinator_yield(); // stuff done by coordinator bool result() { return _result; }
*** 3876,3887 **** // the last argument to the constructor indicates whether the // iteration should be incremental with periodic yields. Par_MarkFromRootsClosure cl(this, _collector, my_span, &_collector->_markBitMap, work_queue(i), ! &_collector->_markStack, ! _asynch); _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); } // else nothing to do for this task } // else nothing to do for this task } // We'd be tempted to assert here that since there are no --- 3583,3593 ---- // the last argument to the constructor indicates whether the // iteration should be incremental with periodic yields. Par_MarkFromRootsClosure cl(this, _collector, my_span, &_collector->_markBitMap, work_queue(i), ! &_collector->_markStack); _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); } // else nothing to do for this task } // else nothing to do for this task } // We'd be tempted to assert here that since there are no
*** 4082,4092 **** ConcurrentMarkSweepThread::synchronize(true); _bit_map_lock->lock_without_safepoint_check(); _collector->startTimer(); } ! bool CMSCollector::do_marking_mt(bool asynch) { assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); int num_workers = AdaptiveSizePolicy::calc_active_conc_workers( conc_workers()->total_workers(), conc_workers()->active_workers(), Threads::number_of_non_daemon_threads()); --- 3788,3798 ---- ConcurrentMarkSweepThread::synchronize(true); _bit_map_lock->lock_without_safepoint_check(); _collector->startTimer(); } ! bool CMSCollector::do_marking_mt() { assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); int num_workers = AdaptiveSizePolicy::calc_active_conc_workers( conc_workers()->total_workers(), conc_workers()->active_workers(), Threads::number_of_non_daemon_threads());
*** 4094,4104 **** CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); CMSConcMarkingTask tsk(this, cms_space, - asynch, conc_workers(), task_queues()); // Since the actual number of workers we get may be different // from the number we requested above, do we need to do anything different --- 3800,3809 ----
*** 4123,4133 **** // and is deferred for now; see CR# TBF. 07252005YSR. XXX assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); // If _restart_addr is non-NULL, a marking stack overflow // occurred; we need to do a fresh marking iteration from the // indicated restart address. ! if (_foregroundGCIsActive && asynch) { // We may be running into repeated stack overflows, having // reached the limit of the stack size, while making very // slow forward progress. It may be best to bail out and // let the foreground collector do its job. // Clear _restart_addr, so that foreground GC --- 3828,3838 ---- // and is deferred for now; see CR# TBF. 07252005YSR. XXX assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); // If _restart_addr is non-NULL, a marking stack overflow // occurred; we need to do a fresh marking iteration from the // indicated restart address. ! if (_foregroundGCIsActive) { // We may be running into repeated stack overflows, having // reached the limit of the stack size, while making very // slow forward progress. It may be best to bail out and // let the foreground collector do its job. // Clear _restart_addr, so that foreground GC
*** 4152,4177 **** assert(tsk.completed(), "Inconsistency"); assert(tsk.result() == true, "Inconsistency"); return true; } ! bool CMSCollector::do_marking_st(bool asynch) { ResourceMark rm; HandleMark hm; // Temporarily make refs discovery single threaded (non-MT) ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, ! &_markStack, CMSYield && asynch); // the last argument to iterate indicates whether the iteration // should be incremental with periodic yields. _markBitMap.iterate(&markFromRootsClosure); // If _restart_addr is non-NULL, a marking stack overflow // occurred; we need to do a fresh iteration from the // indicated restart address. while (_restart_addr != NULL) { ! if (_foregroundGCIsActive && asynch) { // We may be running into repeated stack overflows, having // reached the limit of the stack size, while making very // slow forward progress. It may be best to bail out and // let the foreground collector do its job. // Clear _restart_addr, so that foreground GC --- 3857,3882 ---- assert(tsk.completed(), "Inconsistency"); assert(tsk.result() == true, "Inconsistency"); return true; } ! bool CMSCollector::do_marking_st() { ResourceMark rm; HandleMark hm; // Temporarily make refs discovery single threaded (non-MT) ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, ! &_markStack, CMSYield); // the last argument to iterate indicates whether the iteration // should be incremental with periodic yields. _markBitMap.iterate(&markFromRootsClosure); // If _restart_addr is non-NULL, a marking stack overflow // occurred; we need to do a fresh iteration from the // indicated restart address. while (_restart_addr != NULL) { ! if (_foregroundGCIsActive) { // We may be running into repeated stack overflows, having // reached the limit of the stack size, while making very // slow forward progress. It may be best to bail out and // let the foreground collector do its job. // Clear _restart_addr, so that foreground GC
*** 4701,4712 **** verify_work_stacks_empty(); verify_overflow_empty(); } ! void CMSCollector::checkpointRootsFinal(bool asynch, ! bool clear_all_soft_refs, bool init_mark_was_synchronous) { assert(_collectorState == FinalMarking, "incorrect state transition?"); check_correct_thread_executing(); // world is stopped at this checkpoint assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); --- 4406,4416 ---- verify_work_stacks_empty(); verify_overflow_empty(); } ! void CMSCollector::checkpointRootsFinal() { assert(_collectorState == FinalMarking, "incorrect state transition?"); check_correct_thread_executing(); // world is stopped at this checkpoint assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
*** 4719,4729 **** if (PrintGCDetails) { gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]", _young_gen->used() / K, _young_gen->capacity() / K); } ! if (asynch) { if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); --- 4423,4433 ---- if (PrintGCDetails) { gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]", _young_gen->used() / K, _young_gen->capacity() / K); } ! { if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false);
*** 4740,4764 **** } } FreelistLocker x(this); MutexLockerEx y(bitMapLock(), Mutex::_no_safepoint_check_flag); ! assert(!init_mark_was_synchronous, "but that's impossible!"); ! checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); ! } else { ! // already have all the locks ! checkpointRootsFinalWork(asynch, clear_all_soft_refs, ! init_mark_was_synchronous); } verify_work_stacks_empty(); verify_overflow_empty(); SpecializationStats::print(); } ! void CMSCollector::checkpointRootsFinalWork(bool asynch, ! bool clear_all_soft_refs, bool init_mark_was_synchronous) { ! NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); --- 4444,4461 ---- } } FreelistLocker x(this); MutexLockerEx y(bitMapLock(), Mutex::_no_safepoint_check_flag); ! checkpointRootsFinalWork(); } verify_work_stacks_empty(); verify_overflow_empty(); SpecializationStats::print(); } ! void CMSCollector::checkpointRootsFinalWork() { NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock());
*** 4771,4781 **** CodeCache::gc_prologue(); } assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); - if (!init_mark_was_synchronous) { // We might assume that we need not fill TLAB's when // CMSScavengeBeforeRemark is set, because we may have just done // a scavenge which would have filled all TLAB's -- and besides // Eden would be empty. This however may not always be the case -- // for instance although we asked for a scavenge, it may not have --- 4468,4477 ----
*** 4814,4834 **** GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); do_remark_non_parallel(); } } - } else { - assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); - // The initial mark was stop-world, so there's no rescanning to - // do; go straight on to the next step below. - } verify_work_stacks_empty(); verify_overflow_empty(); { NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) ! refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); verify_overflow_empty(); if (should_unload_classes()) { --- 4510,4525 ---- GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); do_remark_non_parallel(); } } verify_work_stacks_empty(); verify_overflow_empty(); { NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) ! refProcessingWork(); } verify_work_stacks_empty(); verify_overflow_empty(); if (should_unload_classes()) {
*** 5870,5889 **** assert(workers != NULL, "Need parallel worker threads."); CMSRefEnqueueTaskProxy enq_task(task); workers->run_task(&enq_task); } ! void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { ! ResourceMark rm; HandleMark hm; ReferenceProcessor* rp = ref_processor(); assert(rp->span().equals(_span), "Spans should be equal"); assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); // Process weak references. ! rp->setup_policy(clear_all_soft_refs); verify_work_stacks_empty(); CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, --- 5561,5579 ---- assert(workers != NULL, "Need parallel worker threads."); CMSRefEnqueueTaskProxy enq_task(task); workers->run_task(&enq_task); } ! void CMSCollector::refProcessingWork() { ResourceMark rm; HandleMark hm; ReferenceProcessor* rp = ref_processor(); assert(rp->span().equals(_span), "Spans should be equal"); assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); // Process weak references. ! rp->setup_policy(false); verify_work_stacks_empty(); CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, &_markStack, false /* !preclean */); CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
*** 6003,6013 **** } } } #endif ! void CMSCollector::sweep(bool asynch) { assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); verify_overflow_empty(); increment_sweep_count(); --- 5693,5703 ---- } } } #endif ! void CMSCollector::sweep() { assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); verify_overflow_empty(); increment_sweep_count();
*** 6017,6034 **** _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); assert(!_intra_sweep_timer.is_active(), "Should not be active"); _intra_sweep_timer.reset(); _intra_sweep_timer.start(); ! if (asynch) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), bitMapLock()); ! sweepWork(_cmsGen, asynch); } // Update Universe::_heap_*_at_gc figures. // We need all the free list locks to make the abstract state // transition from Sweeping to Resetting. See detailed note --- 5707,5724 ---- _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); assert(!_intra_sweep_timer.is_active(), "Should not be active"); _intra_sweep_timer.reset(); _intra_sweep_timer.start(); ! { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), bitMapLock()); ! sweepWork(_cmsGen); } // Update Universe::_heap_*_at_gc figures. // We need all the free list locks to make the abstract state // transition from Sweeping to Resetting. See detailed note
*** 6038,6054 **** // Update heap occupancy information which is used as // input to soft ref clearing policy at the next gc. Universe::update_heap_info_at_gc(); _collectorState = Resizing; } - } else { - // already have needed locks - sweepWork(_cmsGen, asynch); - // Update heap occupancy information which is used as - // input to soft ref clearing policy at the next gc. - Universe::update_heap_info_at_gc(); - _collectorState = Resizing; } verify_work_stacks_empty(); verify_overflow_empty(); if (should_unload_classes()) { --- 5728,5737 ----
*** 6139,6160 **** } } void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { if (PrintGCDetails && Verbose) { ! gclog_or_tty->print("Rotate from %d ", _debug_collection_type); } - _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); - _debug_collection_type = - (CollectionTypes) (_debug_collection_type % Unknown_collection_type); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("to %d ", _debug_collection_type); } } ! void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, ! bool asynch) { // We iterate over the space(s) underlying this generation, // checking the mark bit map to see if the bits corresponding // to specific blocks are marked or not. Blocks that are // marked are live and are not swept up. All remaining blocks // are swept up, with coalescing on-the-fly as we sweep up --- 5822,5841 ---- } } void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { if (PrintGCDetails && Verbose) { ! if (_debug_concurrent_cycle) { ! gclog_or_tty->print_cr("Rotate from concurrent to STW collections"); ! } else { ! gclog_or_tty->print_cr("Rotate from STW to concurrent collections"); } } + _debug_concurrent_cycle = !_debug_concurrent_cycle; } ! void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) { // We iterate over the space(s) underlying this generation, // checking the mark bit map to see if the bits corresponding // to specific blocks are marked or not. Blocks that are // marked are live and are not swept up. All remaining blocks // are swept up, with coalescing on-the-fly as we sweep up
*** 6178,6190 **** // GC's while we do a sweeping step. For the same reason, we might // as well take the bit map lock for the entire duration // check that we hold the requisite locks assert(have_cms_token(), "Should hold cms token"); ! assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) ! || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), ! "Should possess CMS token to sweep"); assert_lock_strong(gen->freelistLock()); assert_lock_strong(bitMapLock()); assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); --- 5859,5869 ---- // GC's while we do a sweeping step. For the same reason, we might // as well take the bit map lock for the entire duration // check that we hold the requisite locks assert(have_cms_token(), "Should hold cms token"); ! assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); assert_lock_strong(gen->freelistLock()); assert_lock_strong(bitMapLock()); assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
*** 6192,6203 **** _inter_sweep_estimate.padded_average(), _intra_sweep_estimate.padded_average()); gen->setNearLargestChunk(); { ! SweepClosure sweepClosure(this, gen, &_markBitMap, ! CMSYield && asynch); gen->cmsSpace()->blk_iterate_careful(&sweepClosure); // We need to free-up/coalesce garbage/blocks from a // co-terminal free run. This is done in the SweepClosure // destructor; so, do not remove this scope, else the // end-of-sweep-census below will be off by a little bit. --- 5871,5881 ---- _inter_sweep_estimate.padded_average(), _intra_sweep_estimate.padded_average()); gen->setNearLargestChunk(); { ! SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield); gen->cmsSpace()->blk_iterate_careful(&sweepClosure); // We need to free-up/coalesce garbage/blocks from a // co-terminal free run. This is done in the SweepClosure // destructor; so, do not remove this scope, else the // end-of-sweep-census below will be off by a little bit.
*** 6211,6222 **** } } // Reset CMS data structures (for now just the marking bit map) // preparatory for the next cycle. ! void CMSCollector::reset(bool asynch) { ! if (asynch) { CMSTokenSyncWithLocks ts(true, bitMapLock()); // If the state is not "Resetting", the foreground thread // has done a collection and the resetting. if (_collectorState != Resetting) { --- 5889,5900 ---- } } // Reset CMS data structures (for now just the marking bit map) // preparatory for the next cycle. ! void CMSCollector::reset(bool concurrent) { ! if (concurrent) { CMSTokenSyncWithLocks ts(true, bitMapLock()); // If the state is not "Resetting", the foreground thread // has done a collection and the resetting. if (_collectorState != Resetting) {
*** 6291,6311 **** TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { SvcGCMarker sgcm(SvcGCMarker::OTHER); ! checkpointRootsInitial(true); // asynch if (PrintGC) { _cmsGen->printOccupancy("initial-mark"); } break; } case CMS_op_checkpointRootsFinal: { SvcGCMarker sgcm(SvcGCMarker::OTHER); ! checkpointRootsFinal(true, // asynch ! false, // !clear_all_soft_refs ! false); // !init_mark_was_synchronous if (PrintGC) { _cmsGen->printOccupancy("remark"); } break; } --- 5969,5987 ---- TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { SvcGCMarker sgcm(SvcGCMarker::OTHER); ! checkpointRootsInitial(); if (PrintGC) { _cmsGen->printOccupancy("initial-mark"); } break; } case CMS_op_checkpointRootsFinal: { SvcGCMarker sgcm(SvcGCMarker::OTHER); ! checkpointRootsFinal(); if (PrintGC) { _cmsGen->printOccupancy("remark"); } break; }
*** 7191,7210 **** Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, ! CMSMarkStack* overflow_stack, ! bool should_yield): _collector(collector), _whole_span(collector->_span), _span(span), _bit_map(bit_map), _mut(&collector->_modUnionTable), _work_queue(work_queue), _overflow_stack(overflow_stack), - _yield(should_yield), _skip_bits(0), _task(task) { assert(_work_queue->size() == 0, "work_queue should be empty"); _finger = span.start(); --- 6867,6884 ---- Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, ! CMSMarkStack* overflow_stack): _collector(collector), _whole_span(collector->_span), _span(span), _bit_map(bit_map), _mut(&collector->_modUnionTable), _work_queue(work_queue), _overflow_stack(overflow_stack), _skip_bits(0), _task(task) { assert(_work_queue->size() == 0, "work_queue should be empty"); _finger = span.start();
< prev index next >