< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 296,313 **** } } AdaptiveSizePolicy* CMSCollector::size_policy() { ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! return gch->gen_policy()->size_policy(); } void ConcurrentMarkSweepGeneration::initialize_performance_counters() { const char* gen_name = "old"; ! GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy(); // Generation Counters - generation 1, 1 subspace _gen_counters = new GenerationCounters(gen_name, 1, 1, gcp->min_old_size(), gcp->max_old_size(), &_virtual_space); _space_counters = new GSpaceCounters(gen_name, 0, --- 296,313 ---- } } AdaptiveSizePolicy* CMSCollector::size_policy() { ! CMSHeap* heap = CMSHeap::heap(); ! return heap->gen_policy()->size_policy(); } void ConcurrentMarkSweepGeneration::initialize_performance_counters() { const char* gen_name = "old"; ! GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy(); // Generation Counters - generation 1, 1 subspace _gen_counters = new GenerationCounters(gen_name, 1, 1, gcp->min_old_size(), gcp->max_old_size(), &_virtual_space); _space_counters = new GSpaceCounters(gen_name, 0,
*** 352,363 **** // If promotion failure handling is on use // the padded average size of the promotion for each // young generation collection. double CMSStats::time_until_cms_gen_full() const { size_t cms_free = _cms_gen->cmsSpace()->free(); ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! size_t expected_promotion = MIN2(gch->young_gen()->capacity(), (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); if (cms_free > expected_promotion) { // Start a cms collection if there isn't enough space to promote // for the next young collection. Use the padded average as // a safety factor. --- 352,363 ---- // If promotion failure handling is on use // the padded average size of the promotion for each // young generation collection. double CMSStats::time_until_cms_gen_full() const { size_t cms_free = _cms_gen->cmsSpace()->free(); ! CMSHeap* heap = CMSHeap::heap(); ! size_t expected_promotion = MIN2(heap->young_gen()->capacity(), (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); if (cms_free > expected_promotion) { // Start a cms collection if there isn't enough space to promote // for the next young collection. Use the padded average as // a safety factor.
*** 593,608 **** assert(cmsThread()->collector() == this, "CMS Thread should refer to this gen"); assert(CGC_lock != NULL, "Where's the CGC_lock?"); // Support for parallelizing young gen rescan ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew"); ! _young_gen = (ParNewGeneration*)gch->young_gen(); ! if (gch->supports_inline_contig_alloc()) { ! _top_addr = gch->top_addr(); ! _end_addr = gch->end_addr(); assert(_young_gen != NULL, "no _young_gen"); _eden_chunk_index = 0; _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain; _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); } --- 593,608 ---- assert(cmsThread()->collector() == this, "CMS Thread should refer to this gen"); assert(CGC_lock != NULL, "Where's the CGC_lock?"); // Support for parallelizing young gen rescan ! CMSHeap* heap = CMSHeap::heap(); ! assert(heap->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew"); ! _young_gen = (ParNewGeneration*)heap->young_gen(); ! if (heap->supports_inline_contig_alloc()) { ! _top_addr = heap->top_addr(); ! _end_addr = heap->end_addr(); assert(_young_gen != NULL, "no _young_gen"); _eden_chunk_index = 0; _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain; _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); }
*** 760,772 **** log.trace(" Free fraction %f", free_percentage); log.trace(" Desired free fraction %f", desired_free_percentage); log.trace(" Maximum free fraction %f", maximum_free_percentage); log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000); log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); ! size_t young_size = gch->young_gen()->capacity(); log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000); log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); } --- 760,772 ---- log.trace(" Free fraction %f", free_percentage); log.trace(" Desired free fraction %f", desired_free_percentage); log.trace(" Maximum free fraction %f", maximum_free_percentage); log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000); log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); ! CMSHeap* heap = CMSHeap::heap(); ! assert(heap->is_old_gen(this), "The CMS generation should always be the old generation"); ! size_t young_size = heap->young_gen()->capacity(); log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000); log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); }
*** 921,931 **** // allocate, copy and if necessary update promoinfo -- // delegate to underlying space. assert_lock_strong(freelistLock()); #ifndef PRODUCT ! if (GenCollectedHeap::heap()->promotion_should_fail()) { return NULL; } #endif // #ifndef PRODUCT oop res = _cmsSpace->promote(obj, obj_size); --- 921,931 ---- // allocate, copy and if necessary update promoinfo -- // delegate to underlying space. assert_lock_strong(freelistLock()); #ifndef PRODUCT ! if (CMSHeap::heap()->promotion_should_fail()) { return NULL; } #endif // #ifndef PRODUCT oop res = _cmsSpace->promote(obj, obj_size);
*** 998,1008 **** oop ConcurrentMarkSweepGeneration::par_promote(int thread_num, oop old, markOop m, size_t word_sz) { #ifndef PRODUCT ! if (GenCollectedHeap::heap()->promotion_should_fail()) { return NULL; } #endif // #ifndef PRODUCT CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; --- 998,1008 ---- oop ConcurrentMarkSweepGeneration::par_promote(int thread_num, oop old, markOop m, size_t word_sz) { #ifndef PRODUCT ! if (CMSHeap::heap()->promotion_should_fail()) { return NULL; } #endif // #ifndef PRODUCT CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
*** 1177,1190 **** } // We start a collection if we believe an incremental collection may fail; // this is not likely to be productive in practice because it's probably too // late anyway. ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! assert(gch->collector_policy()->is_generation_policy(), "You may want to check the correctness of the following"); ! if (gch->incremental_collection_will_fail(true /* consult_young */)) { log.print("CMSCollector: collect because incremental collection will fail "); return true; } if (MetaspaceGC::should_concurrent_collect()) { --- 1177,1190 ---- } // We start a collection if we believe an incremental collection may fail; // this is not likely to be productive in practice because it's probably too // late anyway. ! CMSHeap* heap = CMSHeap::heap(); ! assert(heap->collector_policy()->is_generation_policy(), "You may want to check the correctness of the following"); ! if (heap->incremental_collection_will_fail(true /* consult_young */)) { log.print("CMSCollector: collect because incremental collection will fail "); return true; } if (MetaspaceGC::should_concurrent_collect()) {
*** 1292,1303 **** } acquire_control_and_collect(full, clear_all_soft_refs); } void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! unsigned int gc_count = gch->total_full_collections(); if (gc_count == full_gc_count) { MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; _full_gc_cause = cause; CGC_lock->notify(); // nudge CMS thread --- 1292,1303 ---- } acquire_control_and_collect(full, clear_all_soft_refs); } void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { ! CMSHeap* heap = CMSHeap::heap(); ! unsigned int gc_count = heap->total_full_collections(); if (gc_count == full_gc_count) { MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); _full_gc_requested = true; _full_gc_cause = cause; CGC_lock->notify(); // nudge CMS thread
*** 1305,1315 **** assert(gc_count > full_gc_count, "Error: causal loop"); } } bool CMSCollector::is_external_interruption() { ! GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); return GCCause::is_user_requested_gc(cause) || GCCause::is_serviceability_requested_gc(cause); } void CMSCollector::report_concurrent_mode_interruption() { --- 1305,1315 ---- assert(gc_count > full_gc_count, "Error: causal loop"); } } bool CMSCollector::is_external_interruption() { ! GCCause::Cause cause = CMSHeap::heap()->gc_cause(); return GCCause::is_user_requested_gc(cause) || GCCause::is_serviceability_requested_gc(cause); } void CMSCollector::report_concurrent_mode_interruption() {
*** 1454,1465 **** p2i(Thread::current()), first_state); log_debug(gc, state)(" gets control with state %d", _collectorState); // Inform cms gen if this was due to partial collection failing. // The CMS gen may use this fact to determine its expansion policy. ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { assert(!_cmsGen->incremental_collection_failed(), "Should have been noticed, reacted to and cleared"); _cmsGen->set_incremental_collection_failed(); } --- 1454,1465 ---- p2i(Thread::current()), first_state); log_debug(gc, state)(" gets control with state %d", _collectorState); // Inform cms gen if this was due to partial collection failing. // The CMS gen may use this fact to determine its expansion policy. ! CMSHeap* heap = CMSHeap::heap(); ! if (heap->incremental_collection_will_fail(false /* don't consult_young */)) { assert(!_cmsGen->incremental_collection_failed(), "Should have been noticed, reacted to and cleared"); _cmsGen->set_incremental_collection_failed(); }
*** 1487,1504 **** do_compaction_work(clear_all_soft_refs); // Has the GC time limit been exceeded? size_t max_eden_size = _young_gen->max_eden_size(); ! GCCause::Cause gc_cause = gch->gc_cause(); size_policy()->check_gc_overhead_limit(_young_gen->used(), _young_gen->eden()->used(), _cmsGen->max_capacity(), max_eden_size, full, gc_cause, ! gch->collector_policy()); // Reset the expansion cause, now that we just completed // a collection cycle. clear_expansion_cause(); _foregroundGCIsActive = false; --- 1487,1504 ---- do_compaction_work(clear_all_soft_refs); // Has the GC time limit been exceeded? size_t max_eden_size = _young_gen->max_eden_size(); ! GCCause::Cause gc_cause = heap->gc_cause(); size_policy()->check_gc_overhead_limit(_young_gen->used(), _young_gen->eden()->used(), _cmsGen->max_capacity(), max_eden_size, full, gc_cause, ! heap->collector_policy()); // Reset the expansion cause, now that we just completed // a collection cycle. clear_expansion_cause(); _foregroundGCIsActive = false;
*** 1516,1540 **** } // A work method used by the foreground collector to do // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { ! GenCollectedHeap* gch = GenCollectedHeap::heap(); STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); gc_timer->register_gc_start(); SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); ! gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); ! gch->pre_full_gc_dump(gc_timer); GCTraceTime(Trace, gc, phases) t("CMS:MSC"); // Temporarily widen the span of the weak reference processing to // the entire heap. ! MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); // Temporarily, clear the "is_alive_non_header" field of the // reference processor. ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); // Temporarily make reference _processing_ single threaded (non-MT). --- 1516,1540 ---- } // A work method used by the foreground collector to do // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { ! CMSHeap* heap = CMSHeap::heap(); STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); gc_timer->register_gc_start(); SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); ! gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start()); ! heap->pre_full_gc_dump(gc_timer); GCTraceTime(Trace, gc, phases) t("CMS:MSC"); // Temporarily widen the span of the weak reference processing to // the entire heap. ! MemRegion new_span(CMSHeap::heap()->reserved_region()); ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); // Temporarily, clear the "is_alive_non_header" field of the // reference processor. ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); // Temporarily make reference _processing_ single threaded (non-MT).
*** 1605,1615 **** _inter_sweep_timer.start(); // No longer a need to do a concurrent collection for Metaspace. MetaspaceGC::set_should_concurrent_collect(false); ! gch->post_full_gc_dump(gc_timer); gc_timer->register_gc_end(); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); --- 1605,1615 ---- _inter_sweep_timer.start(); // No longer a need to do a concurrent collection for Metaspace. MetaspaceGC::set_should_concurrent_collect(false); ! heap->post_full_gc_dump(gc_timer); gc_timer->register_gc_end(); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
*** 1699,1709 **** void CMSCollector::collect_in_background(GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); ! GenCollectedHeap* gch = GenCollectedHeap::heap(); { bool safepoint_check = Mutex::_no_safepoint_check_flag; MutexLockerEx hl(Heap_lock, safepoint_check); FreelistLocker fll(this); MutexLockerEx x(CGC_lock, safepoint_check); --- 1699,1709 ---- void CMSCollector::collect_in_background(GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); ! CMSHeap* heap = CMSHeap::heap(); { bool safepoint_check = Mutex::_no_safepoint_check_flag; MutexLockerEx hl(Heap_lock, safepoint_check); FreelistLocker fll(this); MutexLockerEx x(CGC_lock, safepoint_check);
*** 1728,1739 **** // ensuing concurrent GC cycle. update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests _full_gc_cause = GCCause::_no_gc; // Signal that we are about to start a collection ! gch->increment_total_full_collections(); // ... starting a collection cycle ! _collection_count_start = gch->total_full_collections(); } size_t prev_used = _cmsGen->used(); // The change of the collection state is normally done at this level; --- 1728,1739 ---- // ensuing concurrent GC cycle. update_should_unload_classes(); _full_gc_requested = false; // acks all outstanding full gc requests _full_gc_cause = GCCause::_no_gc; // Signal that we are about to start a collection ! heap->increment_total_full_collections(); // ... starting a collection cycle ! _collection_count_start = heap->total_full_collections(); } size_t prev_used = _cmsGen->used(); // The change of the collection state is normally done at this level;
*** 1922,1934 **** _cms_start_registered = false; } } void CMSCollector::save_heap_summary() { ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _last_heap_summary = gch->create_heap_summary(); ! _last_metaspace_summary = gch->create_metaspace_summary(); } void CMSCollector::report_heap_summary(GCWhen::Type when) { _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); --- 1922,1934 ---- _cms_start_registered = false; } } void CMSCollector::save_heap_summary() { ! CMSHeap* heap = CMSHeap::heap(); ! _last_heap_summary = heap->create_heap_summary(); ! _last_metaspace_summary = heap->create_metaspace_summary(); } void CMSCollector::report_heap_summary(GCWhen::Type when) { _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
*** 2299,2312 **** // Clear any marks from a previous round verification_mark_bm()->clear_all(); assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); verify_work_stacks_empty(); ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! gch->ensure_parsability(false); // fill TLABs, but no need to retire them // Update the saved marks which may affect the root scans. ! gch->save_marks(); if (CMSRemarkVerifyVariant == 1) { // In this first variant of verification, we complete // all marking, then check if the new marks-vector is // a subset of the CMS marks-vector. --- 2299,2312 ---- // Clear any marks from a previous round verification_mark_bm()->clear_all(); assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); verify_work_stacks_empty(); ! CMSHeap* heap = CMSHeap::heap(); ! heap->ensure_parsability(false); // fill TLABs, but no need to retire them // Update the saved marks which may affect the root scans. ! heap->save_marks(); if (CMSRemarkVerifyVariant == 1) { // In this first variant of verification, we complete // all marking, then check if the new marks-vector is // a subset of the CMS marks-vector.
*** 2325,2347 **** } void CMSCollector::verify_after_remark_work_1() { ResourceMark rm; HandleMark hm; ! CMSHeap* gch = CMSHeap::heap(); // Get a clear set of claim bits for the roots processing to work with. ClassLoaderDataGraph::clear_claimed_marks(); // Mark from roots one level into CMS MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); ! gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. { StrongRootsScope srs(1); ! gch->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder, NULL); --- 2325,2347 ---- } void CMSCollector::verify_after_remark_work_1() { ResourceMark rm; HandleMark hm; ! CMSHeap* heap = CMSHeap::heap(); // Get a clear set of claim bits for the roots processing to work with. ClassLoaderDataGraph::clear_claimed_marks(); // Mark from roots one level into CMS MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); ! heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. { StrongRootsScope srs(1); ! heap->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder, NULL);
*** 2371,2381 **** verification_mark_bm()->iterate(&vcl); if (vcl.failed()) { Log(gc, verify) log; log.error("Failed marking verification after remark"); ResourceMark rm; ! gch->print_on(log.error_stream()); fatal("CMS: failed marking verification after remark"); } } class VerifyKlassOopsKlassClosure : public KlassClosure { --- 2371,2381 ---- verification_mark_bm()->iterate(&vcl); if (vcl.failed()) { Log(gc, verify) log; log.error("Failed marking verification after remark"); ResourceMark rm; ! heap->print_on(log.error_stream()); fatal("CMS: failed marking verification after remark"); } } class VerifyKlassOopsKlassClosure : public KlassClosure {
*** 2394,2419 **** }; void CMSCollector::verify_after_remark_work_2() { ResourceMark rm; HandleMark hm; ! CMSHeap* gch = CMSHeap::heap(); // Get a clear set of claim bits for the roots processing to work with. ClassLoaderDataGraph::clear_claimed_marks(); // Mark from roots one level into CMS MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), markBitMap()); CLDToOopClosure cld_closure(&notOlder, true); ! gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. { StrongRootsScope srs(1); ! gch->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder, &cld_closure); --- 2394,2419 ---- }; void CMSCollector::verify_after_remark_work_2() { ResourceMark rm; HandleMark hm; ! CMSHeap* heap = CMSHeap::heap(); // Get a clear set of claim bits for the roots processing to work with. ClassLoaderDataGraph::clear_claimed_marks(); // Mark from roots one level into CMS MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), markBitMap()); CLDToOopClosure cld_closure(&notOlder, true); ! heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. { StrongRootsScope srs(1); ! heap->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder, &cld_closure);
*** 2798,2808 **** // this generation. [Note this initial checkpoint need only // be approximate -- we'll do a catch up phase subsequently.] void CMSCollector::checkpointRootsInitial() { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); ! TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); save_heap_summary(); report_heap_summary(GCWhen::BeforeGC); ReferenceProcessor* rp = ref_processor(); --- 2798,2808 ---- // this generation. [Note this initial checkpoint need only // be approximate -- we'll do a catch up phase subsequently.] void CMSCollector::checkpointRootsInitial() { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); ! TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); save_heap_summary(); report_heap_summary(GCWhen::BeforeGC); ReferenceProcessor* rp = ref_processor();
*** 2839,2856 **** ResourceMark rm; HandleMark hm; MarkRefsIntoClosure notOlder(_span, &_markBitMap); ! CMSHeap* gch = CMSHeap::heap(); verify_work_stacks_empty(); verify_overflow_empty(); ! gch->ensure_parsability(false); // fill TLABs, but no need to retire them // Update the saved marks which may affect the root scans. ! gch->save_marks(); // weak reference processing has not started yet. ref_processor()->set_enqueuing_is_done(false); // Need to remember all newly created CLDs, --- 2839,2856 ---- ResourceMark rm; HandleMark hm; MarkRefsIntoClosure notOlder(_span, &_markBitMap); ! CMSHeap* heap = CMSHeap::heap(); verify_work_stacks_empty(); verify_overflow_empty(); ! heap->ensure_parsability(false); // fill TLABs, but no need to retire them // Update the saved marks which may affect the root scans. ! heap->save_marks(); // weak reference processing has not started yet. ref_processor()->set_enqueuing_is_done(false); // Need to remember all newly created CLDs,
*** 2867,2877 **** #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTableDeactivate dpt_deact; #endif if (CMSParallelInitialMarkEnabled) { // The parallel version. ! WorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); uint n_workers = workers->active_workers(); StrongRootsScope srs(n_workers); --- 2867,2877 ---- #if defined(COMPILER2) || INCLUDE_JVMCI DerivedPointerTableDeactivate dpt_deact; #endif if (CMSParallelInitialMarkEnabled) { // The parallel version. ! WorkGang* workers = heap->workers(); assert(workers != NULL, "Need parallel worker threads."); uint n_workers = workers->active_workers(); StrongRootsScope srs(n_workers);
*** 2886,2900 **** tsk.work(0); } } else { // The serial version. CLDToOopClosure cld_closure(&notOlder, true); ! gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. StrongRootsScope srs(1); ! gch->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder, &cld_closure); --- 2886,2900 ---- tsk.work(0); } } else { // The serial version. CLDToOopClosure cld_closure(&notOlder, true); ! heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. StrongRootsScope srs(1); ! heap->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &notOlder, &cld_closure);
*** 3795,3805 **** stopTimer(); CMSTokenSyncWithLocks ts(true /* is cms thread */, bitMapLock()); startTimer(); unsigned int before_count = ! GenCollectedHeap::heap()->total_collections(); SurvivorSpacePrecleanClosure sss_cl(this, _span, &_markBitMap, &_markStack, &pam_cl, before_count, CMSYield); _young_gen->from()->object_iterate_careful(&sss_cl); _young_gen->to()->object_iterate_careful(&sss_cl); --- 3795,3805 ---- stopTimer(); CMSTokenSyncWithLocks ts(true /* is cms thread */, bitMapLock()); startTimer(); unsigned int before_count = ! CMSHeap::heap()->total_collections(); SurvivorSpacePrecleanClosure sss_cl(this, _span, &_markBitMap, &_markStack, &pam_cl, before_count, CMSYield); _young_gen->from()->object_iterate_careful(&sss_cl); _young_gen->to()->object_iterate_careful(&sss_cl);
*** 4098,4122 **** assert(_collectorState == FinalMarking, "incorrect state transition?"); check_correct_thread_executing(); // world is stopped at this checkpoint assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); ! TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); verify_work_stacks_empty(); verify_overflow_empty(); log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)", _young_gen->used() / K, _young_gen->capacity() / K); { if (CMSScavengeBeforeRemark) { ! GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true ! FlagSetting fl(gch->_is_gc_active, false); ! gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size false, // is_tlab GenCollectedHeap::YoungGen // type ); --- 4098,4122 ---- assert(_collectorState == FinalMarking, "incorrect state transition?"); check_correct_thread_executing(); // world is stopped at this checkpoint assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); ! TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); verify_work_stacks_empty(); verify_overflow_empty(); log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)", _young_gen->used() / K, _young_gen->capacity() / K); { if (CMSScavengeBeforeRemark) { ! CMSHeap* heap = CMSHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true ! FlagSetting fl(heap->_is_gc_active, false); ! heap->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size false, // is_tlab GenCollectedHeap::YoungGen // type );
*** 4137,4147 **** assert_lock_strong(bitMapLock()); ResourceMark rm; HandleMark hm; ! GenCollectedHeap* gch = GenCollectedHeap::heap(); if (should_unload_classes()) { CodeCache::gc_prologue(); } assert(haveFreelistLocks(), "must have free list locks"); --- 4137,4147 ---- assert_lock_strong(bitMapLock()); ResourceMark rm; HandleMark hm; ! CMSHeap* heap = CMSHeap::heap(); if (should_unload_classes()) { CodeCache::gc_prologue(); } assert(haveFreelistLocks(), "must have free list locks");
*** 4157,4169 **** // the critical section releases and then do the remark following // the scavenge, and skip it here. In the absence of that policy, // or of an indication of whether the scavenge did indeed occur, // we cannot rely on TLAB's having been filled and must do // so here just in case a scavenge did not happen. ! gch->ensure_parsability(false); // fill TLAB's, but no need to retire them // Update the saved marks which may affect the root scans. ! gch->save_marks(); print_eden_and_survivor_chunk_arrays(); { #if defined(COMPILER2) || INCLUDE_JVMCI --- 4157,4169 ---- // the critical section releases and then do the remark following // the scavenge, and skip it here. In the absence of that policy, // or of an indication of whether the scavenge did indeed occur, // we cannot rely on TLAB's having been filled and must do // so here just in case a scavenge did not happen. ! heap->ensure_parsability(false); // fill TLAB's, but no need to retire them // Update the saved marks which may affect the root scans. ! heap->save_marks(); print_eden_and_survivor_chunk_arrays(); { #if defined(COMPILER2) || INCLUDE_JVMCI
*** 4235,4245 **** } _markStack._hit_limit = 0; _markStack._failed_double = 0; if ((VerifyAfterGC || VerifyDuringGC) && ! GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { verify_after_remark(); } _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure); --- 4235,4245 ---- } _markStack._hit_limit = 0; _markStack._failed_double = 0; if ((VerifyAfterGC || VerifyDuringGC) && ! CMSHeap::heap()->total_collections() >= VerifyGCStartAt) { verify_after_remark(); } _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
*** 4257,4267 **** ResourceMark rm; HandleMark hm; // ---------- scan from roots -------------- _timer.start(); ! CMSHeap* gch = CMSHeap::heap(); ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); // ---------- young gen roots -------------- { work_on_young_gen_roots(&par_mri_cl); --- 4257,4267 ---- ResourceMark rm; HandleMark hm; // ---------- scan from roots -------------- _timer.start(); ! CMSHeap* heap = CMSHeap::heap(); ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); // ---------- young gen roots -------------- { work_on_young_gen_roots(&par_mri_cl);
*** 4273,4283 **** _timer.reset(); _timer.start(); CLDToOopClosure cld_closure(&par_mri_cl, true); ! gch->cms_process_roots(_strong_roots_scope, false, // yg was scanned above GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mri_cl, &cld_closure); --- 4273,4283 ---- _timer.reset(); _timer.start(); CLDToOopClosure cld_closure(&par_mri_cl, true); ! heap->cms_process_roots(_strong_roots_scope, false, // yg was scanned above GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mri_cl, &cld_closure);
*** 4382,4392 **** ResourceMark rm; HandleMark hm; // ---------- rescan from roots -------------- _timer.start(); ! CMSHeap* gch = CMSHeap::heap(); ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector, _collector->_span, _collector->ref_processor(), &(_collector->_markBitMap), work_queue(worker_id)); --- 4382,4392 ---- ResourceMark rm; HandleMark hm; // ---------- rescan from roots -------------- _timer.start(); ! CMSHeap* heap = CMSHeap::heap(); ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector, _collector->_span, _collector->ref_processor(), &(_collector->_markBitMap), work_queue(worker_id));
*** 4402,4412 **** } // ---------- remaining roots -------------- _timer.reset(); _timer.start(); ! gch->cms_process_roots(_strong_roots_scope, false, // yg was scanned above GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mrias_cl, NULL); // The dirty klasses will be handled below --- 4402,4412 ---- } // ---------- remaining roots -------------- _timer.reset(); _timer.start(); ! heap->cms_process_roots(_strong_roots_scope, false, // yg was scanned above GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mrias_cl, NULL); // The dirty klasses will be handled below
*** 4834,4845 **** } } // Parallel version of remark void CMSCollector::do_remark_parallel() { ! CMSHeap* gch = CMSHeap::heap(); ! WorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); // Choose to use the number of GC workers most recently set // into "active_workers". uint n_workers = workers->active_workers(); --- 4834,4845 ---- } } // Parallel version of remark void CMSCollector::do_remark_parallel() { ! CMSHeap* heap = CMSHeap::heap(); ! WorkGang* workers = heap->workers(); assert(workers != NULL, "Need parallel worker threads."); // Choose to use the number of GC workers most recently set // into "active_workers". uint n_workers = workers->active_workers();
*** 4851,4861 **** // We won't be iterating over the cards in the card table updating // the younger_gen cards, so we shouldn't call the following else // the verification code as well as subsequent younger_refs_iterate // code would get confused. XXX ! // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel // The young gen rescan work will not be done as part of // process_roots (which currently doesn't know how to // parallelize such a scan), but rather will be broken up into // a set of parallel tasks (via the sampling that the [abortable] --- 4851,4861 ---- // We won't be iterating over the cards in the card table updating // the younger_gen cards, so we shouldn't call the following else // the verification code as well as subsequent younger_refs_iterate // code would get confused. XXX ! // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel // The young gen rescan work will not be done as part of // process_roots (which currently doesn't know how to // parallelize such a scan), but rather will be broken up into // a set of parallel tasks (via the sampling that the [abortable]
*** 4893,4903 **** // Non-parallel version of remark void CMSCollector::do_remark_non_parallel() { ResourceMark rm; HandleMark hm; ! CMSHeap* gch = CMSHeap::heap(); ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); MarkRefsIntoAndScanClosure mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */, &_markStack, this, --- 4893,4903 ---- // Non-parallel version of remark void CMSCollector::do_remark_non_parallel() { ResourceMark rm; HandleMark hm; ! CMSHeap* heap = CMSHeap::heap(); ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); MarkRefsIntoAndScanClosure mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */, &_markStack, this,
*** 4934,4956 **** verify_work_stacks_empty(); log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards()); } } if (VerifyDuringGC && ! GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(); } { GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm); verify_work_stacks_empty(); ! gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. StrongRootsScope srs(1); ! gch->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &mrias_cl, NULL); // The dirty klasses will be handled below --- 4934,4956 ---- verify_work_stacks_empty(); log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards()); } } if (VerifyDuringGC && ! CMSHeap::heap()->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(); } { GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm); verify_work_stacks_empty(); ! heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. StrongRootsScope srs(1); ! heap->cms_process_roots(&srs, true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &mrias_cl, NULL); // The dirty klasses will be handled below
*** 5145,5156 **** log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); } void CMSRefProcTaskExecutor::execute(ProcessTask& task) { ! CMSHeap* gch = CMSHeap::heap(); ! WorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); CMSRefProcTaskProxy rp_task(task, &_collector, _collector.ref_processor()->span(), _collector.markBitMap(), workers, _collector.task_queues()); --- 5145,5156 ---- log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); } void CMSRefProcTaskExecutor::execute(ProcessTask& task) { ! CMSHeap* heap = CMSHeap::heap(); ! WorkGang* workers = heap->workers(); assert(workers != NULL, "Need parallel worker threads."); CMSRefProcTaskProxy rp_task(task, &_collector, _collector.ref_processor()->span(), _collector.markBitMap(), workers, _collector.task_queues());
*** 5158,5169 **** } void CMSRefProcTaskExecutor::execute(EnqueueTask& task) { ! CMSHeap* gch = CMSHeap::heap(); ! WorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); CMSRefEnqueueTaskProxy enq_task(task); workers->run_task(&enq_task); } --- 5158,5169 ---- } void CMSRefProcTaskExecutor::execute(EnqueueTask& task) { ! CMSHeap* heap = CMSHeap::heap(); ! WorkGang* workers = heap->workers(); assert(workers != NULL, "Need parallel worker threads."); CMSRefEnqueueTaskProxy enq_task(task); workers->run_task(&enq_task); }
*** 5191,5203 **** // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). ! CMSHeap* gch = CMSHeap::heap(); uint active_workers = ParallelGCThreads; ! WorkGang* workers = gch->workers(); if (workers != NULL) { active_workers = workers->active_workers(); // The expectation is that active_workers will have already // been set to a reasonable value. If it has not been set, // investigate. --- 5191,5203 ---- // Set the degree of MT here. If the discovery is done MT, there // may have been a different number of threads doing the discovery // and a different number of discovered lists may have Ref objects. // That is OK as long as the Reference lists are balanced (see // balance_all_queues() and balance_queues()). ! CMSHeap* heap = CMSHeap::heap(); uint active_workers = ParallelGCThreads; ! WorkGang* workers = heap->workers(); if (workers != NULL) { active_workers = workers->active_workers(); // The expectation is that active_workers will have already // been set to a reasonable value. If it has not been set, // investigate.
*** 5300,5310 **** assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); verify_overflow_empty(); increment_sweep_count(); ! TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); _inter_sweep_timer.stop(); _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); assert(!_intra_sweep_timer.is_active(), "Should not be active"); --- 5300,5310 ---- assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); verify_overflow_empty(); increment_sweep_count(); ! TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); _inter_sweep_timer.stop(); _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); assert(!_intra_sweep_timer.is_active(), "Should not be active");
*** 5373,5385 **** // the incremental_collection_failed flag, // thus inviting a younger gen collection to promote into // this generation. If such a promotion may still fail, // the flag will be set again when a young collection is // attempted. ! GenCollectedHeap* gch = GenCollectedHeap::heap(); ! gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up ! gch->update_full_collections_completed(_collection_count_start); } // FIX ME!!! Looks like this belongs in CFLSpace, with // CMSGen merely delegating to it. void ConcurrentMarkSweepGeneration::setNearLargestChunk() { --- 5373,5385 ---- // the incremental_collection_failed flag, // thus inviting a younger gen collection to promote into // this generation. If such a promotion may still fail, // the flag will be set again when a young collection is // attempted. ! CMSHeap* heap = CMSHeap::heap(); ! heap->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up ! heap->update_full_collections_completed(_collection_count_start); } // FIX ME!!! Looks like this belongs in CFLSpace, with // CMSGen merely delegating to it. void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
*** 5410,5420 **** void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation, bool full) { // If the young generation has been collected, gather any statistics // that are of interest at this point. ! bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation); if (!full && current_is_young) { // Gather statistics on the young generation collection. collector()->stats().record_gc0_end(used()); } } --- 5410,5420 ---- void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation, bool full) { // If the young generation has been collected, gather any statistics // that are of interest at this point. ! bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation); if (!full && current_is_young) { // Gather statistics on the young generation collection. collector()->stats().record_gc0_end(used()); } }
*** 6182,6192 **** new_oop->oop_iterate(_scanning_closure); // check if it's time to yield do_yield_check(); } unsigned int after_count = ! GenCollectedHeap::heap()->total_collections(); bool abort = (_before_count != after_count) || _collector->should_abort_preclean(); return abort ? 0 : size; } --- 6182,6192 ---- new_oop->oop_iterate(_scanning_closure); // check if it's time to yield do_yield_check(); } unsigned int after_count = ! CMSHeap::heap()->total_collections(); bool abort = (_before_count != after_count) || _collector->should_abort_preclean(); return abort ? 0 : size; }
< prev index next >