1591 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1592 }
1593
1594 // Inform cms gen if this was due to partial collection failing.
1595 // The CMS gen may use this fact to determine its expansion policy.
1596 GenCollectedHeap* gch = GenCollectedHeap::heap();
1597 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1598 assert(!_cmsGen->incremental_collection_failed(),
1599 "Should have been noticed, reacted to and cleared");
1600 _cmsGen->set_incremental_collection_failed();
1601 }
1602
1603 if (first_state > Idling) {
1604 report_concurrent_mode_interruption();
1605 }
1606
1607 set_did_compact(true);
1608
1609 // If the collection is being acquired from the background
1610 // collector, there may be references on the discovered
1611 // references lists that have NULL referents (being those
1612 // that were concurrently cleared by a mutator) or
1613 // that are no longer active (having been enqueued concurrently
1614 // by the mutator).
1615 // Scrub the list of those references because Mark-Sweep-Compact
1616 // code assumes referents are not NULL and that all discovered
1617 // Reference objects are active.
1618 ref_processor()->clean_up_discovered_references();
1619
1620 if (first_state > Idling) {
1621 save_heap_summary();
1622 }
1623
1624 do_compaction_work(clear_all_soft_refs);
1625
1626 // Has the GC time limit been exceeded?
1627 size_t max_eden_size = _young_gen->max_capacity() -
1628 _young_gen->to()->capacity() -
1629 _young_gen->from()->capacity();
1630 GCCause::Cause gc_cause = gch->gc_cause();
1631 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1632 _young_gen->eden()->used(),
1633 _cmsGen->max_capacity(),
1634 max_eden_size,
1635 full,
1636 gc_cause,
1637 gch->collector_policy());
1638
1664 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1665 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1666
1667 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1668
1669 // Temporarily widen the span of the weak reference processing to
1670 // the entire heap.
1671 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1672 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1673 // Temporarily, clear the "is_alive_non_header" field of the
1674 // reference processor.
1675 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1676 // Temporarily make reference _processing_ single threaded (non-MT).
1677 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1678 // Temporarily make refs discovery atomic
1679 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1680 // Temporarily make reference _discovery_ single threaded (non-MT)
1681 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1682
1683 ref_processor()->set_enqueuing_is_done(false);
1684 ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
1685 ref_processor()->setup_policy(clear_all_soft_refs);
1686 // If an asynchronous collection finishes, the _modUnionTable is
1687 // all clear. If we are assuming the collection from an asynchronous
1688 // collection, clear the _modUnionTable.
1689 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1690 "_modUnionTable should be clear if the baton was not passed");
1691 _modUnionTable.clear_all();
1692 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1693 "mod union for klasses should be clear if the baton was passed");
1694 _ct->klass_rem_set()->clear_mod_union();
1695
1696 // We must adjust the allocation statistics being maintained
1697 // in the free list space. We do so by reading and clearing
1698 // the sweep timer and updating the block flux rate estimates below.
1699 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1700 if (_inter_sweep_timer.is_active()) {
1701 _inter_sweep_timer.stop();
1702 // Note that we do not use this sample to update the _inter_sweep_estimate.
1703 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1704 _inter_sweep_estimate.padded_average(),
2981 // Checkpoint the roots into this generation from outside
2982 // this generation. [Note this initial checkpoint need only
2983 // be approximate -- we'll do a catch up phase subsequently.]
2984 void CMSCollector::checkpointRootsInitial() {
2985 assert(_collectorState == InitialMarking, "Wrong collector state");
2986 check_correct_thread_executing();
2987 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2988
2989 save_heap_summary();
2990 report_heap_summary(GCWhen::BeforeGC);
2991
2992 ReferenceProcessor* rp = ref_processor();
2993 SpecializationStats::clear();
2994 assert(_restart_addr == NULL, "Control point invariant");
2995 {
2996 // acquire locks for subsequent manipulations
2997 MutexLockerEx x(bitMapLock(),
2998 Mutex::_no_safepoint_check_flag);
2999 checkpointRootsInitialWork();
3000 // enable ("weak") refs discovery
3001 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3002 _collectorState = Marking;
3003 }
3004 SpecializationStats::print();
3005 }
3006
3007 void CMSCollector::checkpointRootsInitialWork() {
3008 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3009 assert(_collectorState == InitialMarking, "just checking");
3010
3011 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3012 // precede our marking with a collection of all
3013 // younger generations to keep floating garbage to a minimum.
3014 // XXX: we won't do this for now -- it's an optimization to be done later.
3015
3016 // already have locks
3017 assert_lock_strong(bitMapLock());
3018 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3019
3020 // Setup the verification and class unloading state for this
3021 // CMS collection cycle.
|
1591 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1592 }
1593
1594 // Inform cms gen if this was due to partial collection failing.
1595 // The CMS gen may use this fact to determine its expansion policy.
1596 GenCollectedHeap* gch = GenCollectedHeap::heap();
1597 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1598 assert(!_cmsGen->incremental_collection_failed(),
1599 "Should have been noticed, reacted to and cleared");
1600 _cmsGen->set_incremental_collection_failed();
1601 }
1602
1603 if (first_state > Idling) {
1604 report_concurrent_mode_interruption();
1605 }
1606
1607 set_did_compact(true);
1608
1609 // If the collection is being acquired from the background
1610 // collector, there may be references on the discovered
1611 // references lists. Abandon those references, since some
1612 // of them may have become unreachable due to later mutator
1613 // activity, and the compacting collector we're about to run
1614 // won't see them as live.
1615 ref_processor()->disable_discovery();
1616 ref_processor()->abandon_partial_discovery();
1617 ref_processor()->verify_no_references_recorded();
1618
1619 if (first_state > Idling) {
1620 save_heap_summary();
1621 }
1622
1623 do_compaction_work(clear_all_soft_refs);
1624
1625 // Has the GC time limit been exceeded?
1626 size_t max_eden_size = _young_gen->max_capacity() -
1627 _young_gen->to()->capacity() -
1628 _young_gen->from()->capacity();
1629 GCCause::Cause gc_cause = gch->gc_cause();
1630 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1631 _young_gen->eden()->used(),
1632 _cmsGen->max_capacity(),
1633 max_eden_size,
1634 full,
1635 gc_cause,
1636 gch->collector_policy());
1637
1663 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1664 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1665
1666 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1667
1668 // Temporarily widen the span of the weak reference processing to
1669 // the entire heap.
1670 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1671 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1672 // Temporarily, clear the "is_alive_non_header" field of the
1673 // reference processor.
1674 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1675 // Temporarily make reference _processing_ single threaded (non-MT).
1676 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1677 // Temporarily make refs discovery atomic
1678 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1679 // Temporarily make reference _discovery_ single threaded (non-MT)
1680 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1681
1682 ref_processor()->set_enqueuing_is_done(false);
1683 ref_processor()->enable_discovery();
1684 ref_processor()->setup_policy(clear_all_soft_refs);
1685 // If an asynchronous collection finishes, the _modUnionTable is
1686 // all clear. If we are assuming the collection from an asynchronous
1687 // collection, clear the _modUnionTable.
1688 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1689 "_modUnionTable should be clear if the baton was not passed");
1690 _modUnionTable.clear_all();
1691 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1692 "mod union for klasses should be clear if the baton was passed");
1693 _ct->klass_rem_set()->clear_mod_union();
1694
1695 // We must adjust the allocation statistics being maintained
1696 // in the free list space. We do so by reading and clearing
1697 // the sweep timer and updating the block flux rate estimates below.
1698 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1699 if (_inter_sweep_timer.is_active()) {
1700 _inter_sweep_timer.stop();
1701 // Note that we do not use this sample to update the _inter_sweep_estimate.
1702 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1703 _inter_sweep_estimate.padded_average(),
2980 // Checkpoint the roots into this generation from outside
2981 // this generation. [Note this initial checkpoint need only
2982 // be approximate -- we'll do a catch up phase subsequently.]
2983 void CMSCollector::checkpointRootsInitial() {
2984 assert(_collectorState == InitialMarking, "Wrong collector state");
2985 check_correct_thread_executing();
2986 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2987
2988 save_heap_summary();
2989 report_heap_summary(GCWhen::BeforeGC);
2990
2991 ReferenceProcessor* rp = ref_processor();
2992 SpecializationStats::clear();
2993 assert(_restart_addr == NULL, "Control point invariant");
2994 {
2995 // acquire locks for subsequent manipulations
2996 MutexLockerEx x(bitMapLock(),
2997 Mutex::_no_safepoint_check_flag);
2998 checkpointRootsInitialWork();
2999 // enable ("weak") refs discovery
3000 rp->enable_discovery();
3001 _collectorState = Marking;
3002 }
3003 SpecializationStats::print();
3004 }
3005
3006 void CMSCollector::checkpointRootsInitialWork() {
3007 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3008 assert(_collectorState == InitialMarking, "just checking");
3009
3010 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3011 // precede our marking with a collection of all
3012 // younger generations to keep floating garbage to a minimum.
3013 // XXX: we won't do this for now -- it's an optimization to be done later.
3014
3015 // already have locks
3016 assert_lock_strong(bitMapLock());
3017 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3018
3019 // Setup the verification and class unloading state for this
3020 // CMS collection cycle.
|