1987 // Sample collection interval time and reset for collection pause.
1988 if (UseAdaptiveSizePolicy) {
1989 size_policy()->msc_collection_begin();
1990 }
1991
1992 // Temporarily widen the span of the weak reference processing to
1993 // the entire heap.
1994 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1995 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1996 // Temporarily, clear the "is_alive_non_header" field of the
1997 // reference processor.
1998 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1999 // Temporarily make reference _processing_ single threaded (non-MT).
2000 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2001 // Temporarily make refs discovery atomic
2002 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2003 // Temporarily make reference _discovery_ single threaded (non-MT)
2004 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2005
2006 ref_processor()->set_enqueuing_is_done(false);
2007 ref_processor()->enable_discovery();
2008 ref_processor()->setup_policy(clear_all_soft_refs);
2009 // If an asynchronous collection finishes, the _modUnionTable is
2010 // all clear. If we are assuming the collection from an asynchronous
2011 // collection, clear the _modUnionTable.
2012 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2013 "_modUnionTable should be clear if the baton was not passed");
2014 _modUnionTable.clear_all();
2015
2016 // We must adjust the allocation statistics being maintained
2017 // in the free list space. We do so by reading and clearing
2018 // the sweep timer and updating the block flux rate estimates below.
2019 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2020 if (_inter_sweep_timer.is_active()) {
2021 _inter_sweep_timer.stop();
2022 // Note that we do not use this sample to update the _inter_sweep_estimate.
2023 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2024 _inter_sweep_estimate.padded_average(),
2025 _intra_sweep_estimate.padded_average());
2026 }
2027
3473 }
3474
3475 // CMS work
3476
3477 // Checkpoint the roots into this generation from outside
3478 // this generation. [Note this initial checkpoint need only
3479 // be approximate -- we'll do a catch up phase subsequently.]
3480 void CMSCollector::checkpointRootsInitial(bool asynch) {
3481 assert(_collectorState == InitialMarking, "Wrong collector state");
3482 check_correct_thread_executing();
3483 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3484
3485 ReferenceProcessor* rp = ref_processor();
3486 SpecializationStats::clear();
3487 assert(_restart_addr == NULL, "Control point invariant");
3488 if (asynch) {
3489 // acquire locks for subsequent manipulations
3490 MutexLockerEx x(bitMapLock(),
3491 Mutex::_no_safepoint_check_flag);
3492 checkpointRootsInitialWork(asynch);
3493 rp->verify_no_references_recorded();
3494 rp->enable_discovery(); // enable ("weak") refs discovery
3495 _collectorState = Marking;
3496 } else {
3497 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3498 // which recognizes if we are a CMS generation, and doesn't try to turn on
3499 // discovery; verify that they aren't meddling.
3500 assert(!rp->discovery_is_atomic(),
3501 "incorrect setting of discovery predicate");
3502 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3503 "ref discovery for this generation kind");
3504 // already have locks
3505 checkpointRootsInitialWork(asynch);
3506 rp->enable_discovery(); // now enable ("weak") refs discovery
3507 _collectorState = Marking;
3508 }
3509 SpecializationStats::print();
3510 }
3511
3512 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3513 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3514 assert(_collectorState == InitialMarking, "just checking");
3515
3516 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3517 // precede our marking with a collection of all
3518 // younger generations to keep floating garbage to a minimum.
3519 // XXX: we won't do this for now -- it's an optimization to be done later.
3520
3521 // already have locks
3522 assert_lock_strong(bitMapLock());
3523 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3524
3525 // Setup the verification and class unloading state for this
3526 // CMS collection cycle.
|
1987 // Sample collection interval time and reset for collection pause.
1988 if (UseAdaptiveSizePolicy) {
1989 size_policy()->msc_collection_begin();
1990 }
1991
1992 // Temporarily widen the span of the weak reference processing to
1993 // the entire heap.
1994 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1995 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1996 // Temporarily, clear the "is_alive_non_header" field of the
1997 // reference processor.
1998 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1999 // Temporarily make reference _processing_ single threaded (non-MT).
2000 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2001 // Temporarily make refs discovery atomic
2002 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2003 // Temporarily make reference _discovery_ single threaded (non-MT)
2004 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2005
2006 ref_processor()->set_enqueuing_is_done(false);
2007 ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2008 ref_processor()->setup_policy(clear_all_soft_refs);
2009 // If an asynchronous collection finishes, the _modUnionTable is
2010 // all clear. If we are assuming the collection from an asynchronous
2011 // collection, clear the _modUnionTable.
2012 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2013 "_modUnionTable should be clear if the baton was not passed");
2014 _modUnionTable.clear_all();
2015
2016 // We must adjust the allocation statistics being maintained
2017 // in the free list space. We do so by reading and clearing
2018 // the sweep timer and updating the block flux rate estimates below.
2019 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2020 if (_inter_sweep_timer.is_active()) {
2021 _inter_sweep_timer.stop();
2022 // Note that we do not use this sample to update the _inter_sweep_estimate.
2023 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2024 _inter_sweep_estimate.padded_average(),
2025 _intra_sweep_estimate.padded_average());
2026 }
2027
3473 }
3474
3475 // CMS work
3476
3477 // Checkpoint the roots into this generation from outside
3478 // this generation. [Note this initial checkpoint need only
3479 // be approximate -- we'll do a catch up phase subsequently.]
3480 void CMSCollector::checkpointRootsInitial(bool asynch) {
3481 assert(_collectorState == InitialMarking, "Wrong collector state");
3482 check_correct_thread_executing();
3483 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3484
3485 ReferenceProcessor* rp = ref_processor();
3486 SpecializationStats::clear();
3487 assert(_restart_addr == NULL, "Control point invariant");
3488 if (asynch) {
3489 // acquire locks for subsequent manipulations
3490 MutexLockerEx x(bitMapLock(),
3491 Mutex::_no_safepoint_check_flag);
3492 checkpointRootsInitialWork(asynch);
3493 // enable ("weak") refs discovery
3494 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3495 _collectorState = Marking;
3496 } else {
3497 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3498 // which recognizes if we are a CMS generation, and doesn't try to turn on
3499 // discovery; verify that they aren't meddling.
3500 assert(!rp->discovery_is_atomic(),
3501 "incorrect setting of discovery predicate");
3502 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3503 "ref discovery for this generation kind");
3504 // already have locks
3505 checkpointRootsInitialWork(asynch);
3506 // now enable ("weak") refs discovery
3507 rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3508 _collectorState = Marking;
3509 }
3510 SpecializationStats::print();
3511 }
3512
3513 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3514 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3515 assert(_collectorState == InitialMarking, "just checking");
3516
3517 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3518 // precede our marking with a collection of all
3519 // younger generations to keep floating garbage to a minimum.
3520 // XXX: we won't do this for now -- it's an optimization to be done later.
3521
3522 // already have locks
3523 assert_lock_strong(bitMapLock());
3524 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3525
3526 // Setup the verification and class unloading state for this
3527 // CMS collection cycle.
|