1976 // we came in, and if we are past the refs processing
1977 // phase, we'll choose to just redo the mark-sweep
1978 // collection from scratch.
1979 if (_collectorState > FinalMarking) {
1980 // We are past the refs processing phase;
1981 // start over and do a fresh synchronous CMS cycle
1982 _collectorState = Resetting; // skip to reset to start new cycle
1983 reset(false /* == !asynch */);
1984 *should_start_over = true;
1985 } // else we can continue a possibly ongoing current cycle
1986 }
1987 }
1988 }
1989
1990 // A work method used by the foreground collector to do
1991 // a mark-sweep-compact.
1992 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1993 GenCollectedHeap* gch = GenCollectedHeap::heap();
1994
1995 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1996 gc_timer->register_gc_start(os::elapsed_counter());
1997
1998 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1999 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2000
2001 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2002 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2003 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2004 "collections passed to foreground collector", _full_gcs_since_conc_gc);
2005 }
2006
2007 // Sample collection interval time and reset for collection pause.
2008 if (UseAdaptiveSizePolicy) {
2009 size_policy()->msc_collection_begin();
2010 }
2011
2012 // Temporarily widen the span of the weak reference processing to
2013 // the entire heap.
2014 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2015 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2016 // Temporarily, clear the "is_alive_non_header" field of the
2072 reset(false /* == !asynch */);
2073 _cmsGen->reset_after_compaction();
2074 _concurrent_cycles_since_last_unload = 0;
2075
2076 // Clear any data recorded in the PLAB chunk arrays.
2077 if (_survivor_plab_array != NULL) {
2078 reset_survivor_plab_arrays();
2079 }
2080
2081 // Adjust the per-size allocation stats for the next epoch.
2082 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2083 // Restart the "inter sweep timer" for the next epoch.
2084 _inter_sweep_timer.reset();
2085 _inter_sweep_timer.start();
2086
2087 // Sample collection pause time and reset for collection interval.
2088 if (UseAdaptiveSizePolicy) {
2089 size_policy()->msc_collection_end(gch->gc_cause());
2090 }
2091
2092 gc_timer->register_gc_end(os::elapsed_counter());
2093
2094 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2095
2096 // For a mark-sweep-compact, compute_new_size() will be called
2097 // in the heap's do_collection() method.
2098 }
2099
2100 // A work method used by the foreground collector to do
2101 // a mark-sweep, after taking over from a possibly on-going
2102 // concurrent mark-sweep collection.
2103 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2104 CollectorState first_state, bool should_start_over) {
2105 if (PrintGC && Verbose) {
2106 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2107 "collector with count %d",
2108 _full_gcs_since_conc_gc);
2109 }
2110 switch (_collectorState) {
2111 case Idling:
2112 if (first_state == Idling || should_start_over) {
2458 "Possible deadlock");
2459 }
2460 if (TraceCMSState) {
2461 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2462 " exiting collection CMS state %d",
2463 Thread::current(), _collectorState);
2464 }
2465 if (PrintGC && Verbose) {
2466 _cmsGen->print_heap_change(prev_used);
2467 }
2468 }
2469
2470 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2471 if (!_cms_start_registered) {
2472 register_gc_start(cause);
2473 }
2474 }
2475
2476 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2477 _cms_start_registered = true;
2478 _gc_timer_cm->register_gc_start(os::elapsed_counter());
2479 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2480 }
2481
2482 void CMSCollector::register_gc_end() {
2483 if (_cms_start_registered) {
2484 report_heap_summary(GCWhen::AfterGC);
2485
2486 _gc_timer_cm->register_gc_end(os::elapsed_counter());
2487 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2488 _cms_start_registered = false;
2489 }
2490 }
2491
2492 void CMSCollector::save_heap_summary() {
2493 GenCollectedHeap* gch = GenCollectedHeap::heap();
2494 _last_heap_summary = gch->create_heap_summary();
2495 _last_metaspace_summary = gch->create_metaspace_summary();
2496 }
2497
2498 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2499 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
2500 }
2501
2502 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2503 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2504 "Foreground collector should be waiting, not executing");
2505 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2506 "may only be done by the VM Thread with the world stopped");
|
1976 // we came in, and if we are past the refs processing
1977 // phase, we'll choose to just redo the mark-sweep
1978 // collection from scratch.
1979 if (_collectorState > FinalMarking) {
1980 // We are past the refs processing phase;
1981 // start over and do a fresh synchronous CMS cycle
1982 _collectorState = Resetting; // skip to reset to start new cycle
1983 reset(false /* == !asynch */);
1984 *should_start_over = true;
1985 } // else we can continue a possibly ongoing current cycle
1986 }
1987 }
1988 }
1989
1990 // A work method used by the foreground collector to do
1991 // a mark-sweep-compact.
1992 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1993 GenCollectedHeap* gch = GenCollectedHeap::heap();
1994
1995 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1996 gc_timer->register_gc_start();
1997
1998 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1999 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2000
2001 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2002 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2003 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2004 "collections passed to foreground collector", _full_gcs_since_conc_gc);
2005 }
2006
2007 // Sample collection interval time and reset for collection pause.
2008 if (UseAdaptiveSizePolicy) {
2009 size_policy()->msc_collection_begin();
2010 }
2011
2012 // Temporarily widen the span of the weak reference processing to
2013 // the entire heap.
2014 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2015 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2016 // Temporarily, clear the "is_alive_non_header" field of the
2072 reset(false /* == !asynch */);
2073 _cmsGen->reset_after_compaction();
2074 _concurrent_cycles_since_last_unload = 0;
2075
2076 // Clear any data recorded in the PLAB chunk arrays.
2077 if (_survivor_plab_array != NULL) {
2078 reset_survivor_plab_arrays();
2079 }
2080
2081 // Adjust the per-size allocation stats for the next epoch.
2082 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2083 // Restart the "inter sweep timer" for the next epoch.
2084 _inter_sweep_timer.reset();
2085 _inter_sweep_timer.start();
2086
2087 // Sample collection pause time and reset for collection interval.
2088 if (UseAdaptiveSizePolicy) {
2089 size_policy()->msc_collection_end(gch->gc_cause());
2090 }
2091
2092 gc_timer->register_gc_end();
2093
2094 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2095
2096 // For a mark-sweep-compact, compute_new_size() will be called
2097 // in the heap's do_collection() method.
2098 }
2099
2100 // A work method used by the foreground collector to do
2101 // a mark-sweep, after taking over from a possibly on-going
2102 // concurrent mark-sweep collection.
2103 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2104 CollectorState first_state, bool should_start_over) {
2105 if (PrintGC && Verbose) {
2106 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2107 "collector with count %d",
2108 _full_gcs_since_conc_gc);
2109 }
2110 switch (_collectorState) {
2111 case Idling:
2112 if (first_state == Idling || should_start_over) {
2458 "Possible deadlock");
2459 }
2460 if (TraceCMSState) {
2461 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2462 " exiting collection CMS state %d",
2463 Thread::current(), _collectorState);
2464 }
2465 if (PrintGC && Verbose) {
2466 _cmsGen->print_heap_change(prev_used);
2467 }
2468 }
2469
2470 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2471 if (!_cms_start_registered) {
2472 register_gc_start(cause);
2473 }
2474 }
2475
2476 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2477 _cms_start_registered = true;
2478 _gc_timer_cm->register_gc_start();
2479 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2480 }
2481
2482 void CMSCollector::register_gc_end() {
2483 if (_cms_start_registered) {
2484 report_heap_summary(GCWhen::AfterGC);
2485
2486 _gc_timer_cm->register_gc_end();
2487 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2488 _cms_start_registered = false;
2489 }
2490 }
2491
2492 void CMSCollector::save_heap_summary() {
2493 GenCollectedHeap* gch = GenCollectedHeap::heap();
2494 _last_heap_summary = gch->create_heap_summary();
2495 _last_metaspace_summary = gch->create_metaspace_summary();
2496 }
2497
2498 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2499 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
2500 }
2501
2502 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2503 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2504 "Foreground collector should be waiting, not executing");
2505 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2506 "may only be done by the VM Thread with the world stopped");
|