# HG changeset patch # User rkennke # Date 1490714530 -7200 # Tue Mar 28 17:22:10 2017 +0200 # Node ID bc43e4dff6bb3c6b7c745ae4350e5398c505add5 # Parent 001500bca1ad581696f50188d32570f969770430 [backport] Implement early update references phase. diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp b/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" #include "runtime/interfaceSupport.hpp" @@ -161,7 +162,11 @@ } bool ShenandoahBarrierSet::need_update_refs_barrier() { - return _heap->concurrent_mark_in_progress() && _heap->need_update_refs(); + if (_heap->shenandoahPolicy()->update_refs()) { + return _heap->is_update_refs_in_progress(); + } else { + return _heap->concurrent_mark_in_progress() && _heap->need_update_refs(); + } } void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) { diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp b/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp @@ -39,6 +39,9 @@ size_t _bytes_reclaimed_this_cycle; protected: + bool _update_refs_early; + bool _update_refs_adaptive; + typedef struct { size_t region_number; size_t garbage; @@ -73,6 +76,9 @@ uint _cancelled_cm_cycles_in_a_row; uint _successful_cm_cycles_in_a_row; + uint _cancelled_uprefs_cycles_in_a_row; + uint _successful_uprefs_cycles_in_a_row; + size_t _bytes_in_cset; public: @@ -85,6 +91,22 @@ void record_bytes_start_CM(size_t bytes); void record_bytes_end_CM(size_t bytes); + virtual void record_cycle_start() { + // Do nothing + } + + virtual void record_cycle_end() { + // Do nothing + } + + virtual void record_phase_start(ShenandoahCollectorPolicy::TimingPhase phase) { + // Do nothing + } + + virtual void record_phase_end(ShenandoahCollectorPolicy::TimingPhase phase) { + // Do nothing + } + size_t bytes_in_cset() const { return _bytes_in_cset; } virtual void print_thresholds() { @@ -92,10 +114,22 @@ virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0; + virtual bool should_start_update_refs() { + return _update_refs_early; + } + + virtual bool update_refs() const { + return _update_refs_early; + } + virtual bool handover_cancelled_marking() { return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold; } + virtual bool handover_cancelled_uprefs() { + return _cancelled_uprefs_cycles_in_a_row <= ShenandoahFullGCThreshold; + } + virtual void record_cm_cancelled() { _cancelled_cm_cycles_in_a_row++; _successful_cm_cycles_in_a_row = 0; @@ -106,10 +140,23 @@ _successful_cm_cycles_in_a_row++; } + virtual void record_uprefs_cancelled() { + _cancelled_uprefs_cycles_in_a_row++; + _successful_uprefs_cycles_in_a_row = 0; + } + + virtual void record_uprefs_success() { + _cancelled_uprefs_cycles_in_a_row = 0; + _successful_uprefs_cycles_in_a_row++; + } + virtual void record_full_gc() { _bytes_in_cset = 0; } + virtual void record_peak_occupancy() { + } + virtual void start_choose_collection_set() { } virtual void end_choose_collection_set() { @@ -150,9 +197,25 @@ _bytes_in_cset(0), _cancelled_cm_cycles_in_a_row(0), _successful_cm_cycles_in_a_row(0), + _cancelled_uprefs_cycles_in_a_row(0), + _successful_uprefs_cycles_in_a_row(0), _region_garbage(NULL), - _region_garbage_size(0) + _region_garbage_size(0), + _update_refs_early(false), + _update_refs_adaptive(false) { + if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 || + strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) { + _update_refs_early = true; + } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 || + strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) { + _update_refs_early = false; + } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) { + _update_refs_adaptive = true; + _update_refs_early = true; + } else { + vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly); + } } ShenandoahHeuristics::~ShenandoahHeuristics() { @@ -272,6 +335,7 @@ guarantee(phase == init_evac || phase == scan_roots || phase == update_roots || + phase == final_update_refs_roots || phase == _num_phases, "only in these phases we can add per-thread phase times"); if (phase != _num_phases) { @@ -285,13 +349,14 @@ void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) { _timing_data[phase]._start = os::elapsedTime(); - + _heuristics->record_phase_start(phase); } void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) { double end = os::elapsedTime(); double elapsed = end - _timing_data[phase]._start; _timing_data[phase]._secs.add(elapsed); + _heuristics->record_phase_end(phase); } void ShenandoahCollectorPolicy::report_concgc_cancelled() { @@ -388,8 +453,14 @@ size_t free_capacity = heap->free_regions()->capacity(); size_t free_used = heap->free_regions()->used(); assert(free_used <= free_capacity, "must use less than capacity"); - size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100); - size_t available = free_capacity - free_used + cset; + size_t available = free_capacity - free_used; + + if (! update_refs()) { + // Count in the memory available after cset reclamation. + size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100); + available += cset; + } + uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold; size_t targetStartMarking = (capacity * threshold) / 100; @@ -417,11 +488,24 @@ private: uintx _free_threshold; TruncatedSeq* _cset_history; - + size_t _peak_occupancy; + double _last_cycle_end; + TruncatedSeq* _cycle_gap_history; + double _conc_mark_start; + TruncatedSeq* _conc_mark_duration_history; + double _conc_uprefs_start; + TruncatedSeq* _conc_uprefs_duration_history; public: AdaptiveHeuristics() : ShenandoahHeuristics(), _free_threshold(ShenandoahInitFreeThreshold), + _peak_occupancy(0), + _last_cycle_end(0), + _conc_mark_start(0), + _conc_mark_duration_history(new TruncatedSeq(5)), + _conc_uprefs_start(0), + _conc_uprefs_duration_history(new TruncatedSeq(5)), + _cycle_gap_history(new TruncatedSeq(5)), _cset_history(new TruncatedSeq((uint)ShenandoahHappyCyclesThreshold)) { _cset_history->add((double) ShenandoahCSetThreshold); @@ -437,22 +521,96 @@ return r->garbage() > threshold; } + void handle_cycle_success() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t capacity = heap->capacity(); + size_t available = capacity - _peak_occupancy; + size_t available_threshold = ShenandoahMinFreeThreshold * capacity / 100; + log_info(gc, ergo)("Capacity: " SIZE_FORMAT "M, Peak Occupancy: " SIZE_FORMAT + "M, Lowest Free: " SIZE_FORMAT "M, Free Threshold: " UINTX_FORMAT "M", + capacity / M, _peak_occupancy / M, available / M, available_threshold / M); + if (available <= available_threshold) { + pessimize_free_threshold(); + } else { + optimize_free_threshold(); + } + _peak_occupancy = 0; + } + + void record_cycle_end() { + ShenandoahHeuristics::record_cycle_end(); + handle_cycle_success(); + _last_cycle_end = os::elapsedTime(); + } + + void record_cycle_start() { + ShenandoahHeuristics::record_cycle_start(); + double last_cycle_gap = (os::elapsedTime() - _last_cycle_end); + _cycle_gap_history->add(last_cycle_gap); + } + + void record_phase_start(ShenandoahCollectorPolicy::TimingPhase phase) { + if (phase == ShenandoahCollectorPolicy::conc_mark) { + _conc_mark_start = os::elapsedTime(); + } else if (phase == ShenandoahCollectorPolicy::conc_update_refs) { + _conc_uprefs_start = os::elapsedTime(); + } // Else ignore + + } + + virtual void record_phase_end(ShenandoahCollectorPolicy::TimingPhase phase) { + if (phase == ShenandoahCollectorPolicy::conc_mark) { + _conc_mark_duration_history->add(os::elapsedTime() - _conc_mark_start); + } else if (phase == ShenandoahCollectorPolicy::conc_update_refs) { + _conc_uprefs_duration_history->add(os::elapsedTime() - _conc_uprefs_start); + } // Else ignore + } + + void optimize_free_threshold() { + if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold && + (! update_refs() || (_successful_uprefs_cycles_in_a_row > ShenandoahHappyCyclesThreshold)) && + _free_threshold > 0) { + _free_threshold--; + log_info(gc,ergo)("Reducing free threshold to: " UINTX_FORMAT "%% (" SIZE_FORMAT "M)", + _free_threshold, _free_threshold * ShenandoahHeap::heap()->capacity() / 100 / M); + _successful_cm_cycles_in_a_row = 0; + _successful_uprefs_cycles_in_a_row = 0; + } + } + + void pessimize_free_threshold() { + if (_free_threshold < ShenandoahMaxFreeThreshold) { + _free_threshold++; + log_info(gc,ergo)("Increasing free threshold to: " UINTX_FORMAT "%% (" SIZE_FORMAT "M)", + _free_threshold, _free_threshold * ShenandoahHeap::heap()->capacity() / 100 / M); + } + } + virtual void record_cm_cancelled() { ShenandoahHeuristics::record_cm_cancelled(); - if (_free_threshold < ShenandoahMaxFreeThreshold) { - _free_threshold++; - log_info(gc,ergo)("increasing free threshold to: "UINTX_FORMAT, _free_threshold); - } + pessimize_free_threshold(); } virtual void record_cm_success() { ShenandoahHeuristics::record_cm_success(); - if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold && - _free_threshold > ShenandoahMinFreeThreshold) { - _free_threshold--; - log_info(gc,ergo)("reducing free threshold to: "UINTX_FORMAT, _free_threshold); - _successful_cm_cycles_in_a_row = 0; - } + } + + virtual void record_uprefs_cancelled() { + ShenandoahHeuristics::record_uprefs_cancelled(); + pessimize_free_threshold(); + } + + virtual void record_uprefs_success() { + ShenandoahHeuristics::record_uprefs_success(); + } + + virtual void record_full_gc() { + ShenandoahHeuristics::record_full_gc(); + pessimize_free_threshold(); + } + + virtual void record_peak_occupancy() { + _peak_occupancy = MAX2(_peak_occupancy, ShenandoahHeap::heap()->used()); } virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const { @@ -462,36 +620,71 @@ size_t free_capacity = heap->free_regions()->capacity(); size_t free_used = heap->free_regions()->used(); assert(free_used <= free_capacity, "must use less than capacity"); - // size_t cset_threshold = (size_t) _cset_history->maximum(); - size_t cset_threshold = (size_t) _cset_history->davg(); - size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100); - size_t available = free_capacity - free_used + cset; - uintx factor = _free_threshold + cset_threshold; - size_t targetStartMarking = (capacity * factor) / 100; + size_t available = free_capacity - free_used; + uintx factor = _free_threshold; + size_t cset_threshold = 0; + if (! update_refs()) { + // Count in the memory available after cset reclamation. + cset_threshold = (size_t) _cset_history->davg(); + size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100); + available += cset; + factor += cset_threshold; + } + size_t threshold_available = (capacity * factor) / 100; + size_t bytes_allocated = heap->bytes_allocated_since_cm(); size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100; - if (available < targetStartMarking && - heap->bytes_allocated_since_cm() > threshold_bytes_allocated) - { + + if (available < threshold_available && + bytes_allocated > threshold_bytes_allocated) { + log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT + "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M", + available / M, threshold_available / M, available / M, threshold_bytes_allocated / M); // Need to check that an appropriate number of regions have // been allocated since last concurrent mark too. shouldStartConcurrentMark = true; } if (shouldStartConcurrentMark) { - log_info(gc,ergo)("predicted cset threshold: "SIZE_FORMAT, cset_threshold); - log_info(gc,ergo)("Starting concurrent mark at "SIZE_FORMAT"K CSet ("SIZE_FORMAT"%%)", _bytes_in_cset / K, _bytes_in_cset * 100 / capacity); - _cset_history->add((double) (_bytes_in_cset * 100 / capacity)); + if (! update_refs()) { + log_info(gc,ergo)("Predicted cset threshold: " SIZE_FORMAT ", " SIZE_FORMAT "K CSet ("SIZE_FORMAT"%%)", + cset_threshold, _bytes_in_cset / K, _bytes_in_cset * 100 / capacity); + _cset_history->add((double) (_bytes_in_cset * 100 / capacity)); + } } return shouldStartConcurrentMark; } + virtual bool should_start_update_refs() { + if (! _update_refs_adaptive) { + return _update_refs_early; + } + + double cycle_gap_avg = _cycle_gap_history->avg(); + double conc_mark_avg = _conc_mark_duration_history->avg(); + double conc_uprefs_avg = _conc_uprefs_duration_history->avg(); + + if (_update_refs_early) { + double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0; + if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) { + _update_refs_early = false; + } + } else { + double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0; + if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) { + _update_refs_early = true; + } + } + return _update_refs_early; + } }; ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : _cycle_counter(0), _successful_cm(0), - _degenerated_cm(0) + _degenerated_cm(0), + _successful_uprefs(0), + _degenerated_uprefs(0) { ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size()); @@ -579,6 +772,26 @@ _phase_names[conc_mark] = "Concurrent Marking"; _phase_names[conc_evac] = "Concurrent Evacuation"; + _phase_names[init_update_refs_gross] = "Pause Init Update Refs (G)"; + _phase_names[init_update_refs] = "Pause Init Update Refs (N)"; + _phase_names[conc_update_refs] = "Concurrent Update Refs"; + _phase_names[final_update_refs_gross] = "Pause Final Update Refs (G)"; + _phase_names[final_update_refs] = "Pause Final Update Refs (N)"; + + _phase_names[final_update_refs_roots] = " Update Roots"; + _phase_names[final_update_refs_thread_roots] = " UR: Thread Roots"; + _phase_names[final_update_refs_code_roots] = " UR: Code Cache Roots"; + _phase_names[final_update_refs_string_table_roots] = " UR: String Table Roots"; + _phase_names[final_update_refs_universe_roots] = " UR: Universe Roots"; + _phase_names[final_update_refs_jni_roots] = " UR: JNI Roots"; + _phase_names[final_update_refs_jni_weak_roots] = " UR: JNI Weak Roots"; + _phase_names[final_update_refs_synchronizer_roots] = " UR: Synchronizer Roots"; + _phase_names[final_update_refs_flat_profiler_roots] = " UR: Flat Profiler Roots"; + _phase_names[final_update_refs_management_roots] = " UR: Management Roots"; + _phase_names[final_update_refs_system_dict_roots] = " UR: System Dict Roots"; + _phase_names[final_update_refs_cldg_roots] = " UR: CLDG Roots"; + _phase_names[final_update_refs_jvmti_roots] = " UR: JVMTI Roots"; + if (ShenandoahGCHeuristics != NULL) { if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { log_info(gc, init)("Shenandoah heuristics: aggressive"); @@ -666,6 +879,18 @@ return _heuristics->handover_cancelled_marking(); } +bool ShenandoahCollectorPolicy::handover_cancelled_uprefs() { + return _heuristics->handover_cancelled_uprefs(); +} + +bool ShenandoahCollectorPolicy::update_refs() { + return _heuristics->update_refs(); +} + +bool ShenandoahCollectorPolicy::should_start_update_refs() { + return _heuristics->should_start_update_refs(); +} + void ShenandoahCollectorPolicy::record_cm_success() { _heuristics->record_cm_success(); _successful_cm++; @@ -679,10 +904,27 @@ _heuristics->record_cm_cancelled(); } +void ShenandoahCollectorPolicy::record_uprefs_success() { + _heuristics->record_uprefs_success(); + _successful_uprefs++; +} + +void ShenandoahCollectorPolicy::record_uprefs_degenerated() { + _degenerated_uprefs++; +} + +void ShenandoahCollectorPolicy::record_uprefs_cancelled() { + _heuristics->record_uprefs_cancelled(); +} + void ShenandoahCollectorPolicy::record_full_gc() { _heuristics->record_full_gc(); } +void ShenandoahCollectorPolicy::record_peak_occupancy() { + _heuristics->record_peak_occupancy(); +} + void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) { _heuristics->choose_collection_set(collection_set, connections); } @@ -717,6 +959,7 @@ out->cr(); out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs); out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm); + out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated update references ", _successful_uprefs, _degenerated_uprefs); out->cr(); } @@ -734,10 +977,6 @@ ); } -void ShenandoahCollectorPolicy::increase_cycle_counter() { - _cycle_counter++; -} - size_t ShenandoahCollectorPolicy::cycle_counter() const { return _cycle_counter; } @@ -911,6 +1150,15 @@ application_workers, 0, active_workers_by_liveset); } +void ShenandoahCollectorPolicy::record_cycle_start() { + _cycle_counter++; + _heuristics->record_cycle_start(); +} + +void ShenandoahCollectorPolicy::record_cycle_end() { + _heuristics->record_cycle_end(); +} + GCTimer* ShenandoahCollectorPolicy::conc_timer() {return _conc_timer;} GCTimer* ShenandoahCollectorPolicy::stw_timer() {return _stw_timer;} diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp @@ -105,8 +105,30 @@ evac_cldg_roots, evac_jvmti_roots, + init_update_refs_gross, + init_update_refs, + + final_update_refs_gross, + final_update_refs, + + // Per-thread timer block, should have "roots" counters in consistent order + final_update_refs_roots, + final_update_refs_thread_roots, + final_update_refs_code_roots, + final_update_refs_string_table_roots, + final_update_refs_universe_roots, + final_update_refs_jni_roots, + final_update_refs_jni_weak_roots, + final_update_refs_synchronizer_roots, + final_update_refs_flat_profiler_roots, + final_update_refs_management_roots, + final_update_refs_system_dict_roots, + final_update_refs_cldg_roots, + final_update_refs_jvmti_roots, + conc_mark, conc_evac, + conc_update_refs, reset_bitmaps, full_gc, @@ -139,6 +161,9 @@ size_t _degenerated_cm; size_t _successful_cm; + size_t _degenerated_uprefs; + size_t _successful_uprefs; + ShenandoahHeuristics* _heuristics; ShenandoahTracer* _tracer; STWGCTimer* _stw_timer; @@ -169,6 +194,11 @@ void post_heap_initialize(); + // TODO: This is different from gc_end: that one encompasses one VM operation. + // These two encompass the entire cycle. + void record_cycle_start(); + void record_cycle_end(); + void record_phase_start(TimingPhase phase); void record_phase_end(TimingPhase phase); @@ -185,12 +215,24 @@ void record_bytes_start_CM(size_t bytes); void record_bytes_end_CM(size_t bytes); bool should_start_concurrent_mark(size_t used, size_t capacity); + + // Returns true when there should be a separate concurrent reference + // updating phase after evacuation. + bool should_start_update_refs(); + bool update_refs(); + bool handover_cancelled_marking(); + bool handover_cancelled_uprefs(); void record_cm_cancelled(); void record_cm_success(); void record_cm_degenerated(); void record_full_gc(); + void record_uprefs_cancelled(); + void record_uprefs_success(); + void record_uprefs_degenerated(); + + void record_peak_occupancy(); void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections=NULL); void choose_free_set(ShenandoahFreeSet* free_set); @@ -207,7 +249,6 @@ void set_conc_gc_aborted() { _conc_gc_aborted = true;} void clear_conc_gc_aborted() {_conc_gc_aborted = false;} - void increase_cycle_counter(); size_t cycle_counter() const; diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp b/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentThread.cpp @@ -74,6 +74,9 @@ if (heap->is_evacuation_in_progress()) { heap->set_evacuation_in_progress_concurrently(false); } + if (heap->is_update_refs_in_progress()) { + heap->set_update_refs_in_progress(false); + } } else { Thread::current()->_ParkEvent->park(10); } @@ -101,7 +104,11 @@ gc_timer->register_gc_start(); gc_tracer->report_gc_start(GCCause::_no_cause_specified, gc_timer->gc_start()); - heap->shenandoahPolicy()->increase_cycle_counter(); + // Cycle started + heap->shenandoahPolicy()->record_cycle_start(); + + // Capture peak occupancy right after starting the cycle + heap->shenandoahPolicy()->record_peak_occupancy(); TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); TraceMemoryManagerStats tmms(false, GCCause::_no_cause_specified); @@ -133,6 +140,9 @@ ShenandoahHeap::heap()->concurrentMark()->mark_from_roots(); } + // Allocations happen during concurrent mark, record peak after the phase: + heap->shenandoahPolicy()->record_peak_occupancy(); + // Possibly hand over remaining marking work to final-mark phase. bool clear_full_gc = false; if (heap->cancelled_concgc()) { @@ -182,9 +192,60 @@ heap->do_evacuation(); } + // Allocations happen during evacuation, record peak after the phase: + heap->shenandoahPolicy()->record_peak_occupancy(); + + // Do an update-refs phase if required. + if (check_cancellation()) return; + + if (heap->shenandoahPolicy()->should_start_update_refs()) { + + VM_ShenandoahInitUpdateRefs init_update_refs; + heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross); + heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_update_refs_gross); + VMThread::execute(&init_update_refs); + heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_update_refs_gross); + heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross); + + { + GCTraceTime time("Concurrent update references ", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id(), true); + heap->concurrent_update_heap_references(); + } + + // Allocations happen during update-refs, record peak after the phase: + heap->shenandoahPolicy()->record_peak_occupancy(); + + clear_full_gc = false; + if (heap->cancelled_concgc()) { + heap->shenandoahPolicy()->record_uprefs_cancelled(); + if (_full_gc_cause == GCCause::_allocation_failure && + heap->shenandoahPolicy()->handover_cancelled_uprefs()) { + clear_full_gc = true; + heap->shenandoahPolicy()->record_uprefs_degenerated(); + } else { + heap->gc_timer()->register_gc_end(); + return; + } + } else { + heap->shenandoahPolicy()->record_uprefs_success(); + } + + VM_ShenandoahFinalUpdateRefs final_update_refs; + + heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause_gross); + heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_update_refs_gross); + VMThread::execute(&final_update_refs); + heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_update_refs_gross); + heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause_gross); + } + // Prepare for the next normal cycle: if (check_cancellation()) return; + if (clear_full_gc) { + reset_full_gc(); + } + { GCTraceTime time("Concurrent reset bitmaps", ShenandoahLogInfo, gc_timer, gc_tracer->gc_id()); heap->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::reset_bitmaps); @@ -194,6 +255,12 @@ heap->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::reset_bitmaps); } + // Allocations happen during bitmap cleanup, record peak after the phase: + heap->shenandoahPolicy()->record_peak_occupancy(); + + // Cycle is complete + heap->shenandoahPolicy()->record_cycle_end(); + gc_timer->register_gc_end(); gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); } diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp @@ -42,6 +42,7 @@ #include "gc_implementation/shenandoah/shenandoahHumongous.hpp" #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp" #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp" #include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp" @@ -265,6 +266,7 @@ _concurrent_mark_in_progress(0), _evacuation_in_progress(0), _full_gc_in_progress(false), + _update_refs_in_progress(false), _free_regions(NULL), _collection_set(NULL), _bytes_allocated_since_cm(0), @@ -1109,6 +1111,8 @@ log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id()); if (!cancelled_concgc()) { + // Allocations might have happened before we STWed here, record peak: + shenandoahPolicy()->record_peak_occupancy(); recycle_dirty_regions(); @@ -2227,6 +2231,14 @@ return _full_gc_in_progress; } +void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { + _update_refs_in_progress = in_progress; +} + +bool ShenandoahHeap::is_update_refs_in_progress() const { + return _update_refs_in_progress; +} + class NMethodOopInitializer : public OopClosure { private: ShenandoahHeap* _heap; @@ -2303,6 +2315,144 @@ return cl.garbage(); } +ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() : + _heap(ShenandoahHeap::heap()) {} + +class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { +private: + ShenandoahHeap* _heap; + ShenandoahHeapRegionSet* _regions; + +public: + ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions) : + AbstractGangTask("Concurrent Update References Task"), + _heap(ShenandoahHeap::heap()), + _regions(regions) { + } + + void work(uint worker_id) { + ShenandoahUpdateHeapRefsClosure cl; + ShenandoahHeapRegion* r = _regions->claim_next(); + while (r != NULL) { + if (! _heap->in_collection_set(r) && + ! r->is_empty()) { + _heap->marked_object_oop_safe_iterate(r, &cl); + } else if (_heap->in_collection_set(r)) { + HeapWord* bottom = r->bottom(); + HeapWord* top = _heap->complete_top_at_mark_start(r->bottom()); + if (top > bottom) { + _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); + } + } + if (_heap->cancelled_concgc()) { + return; + } + r = _regions->claim_next(); + } + } +}; + +void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions) { + ShenandoahUpdateHeapRefsTask task(update_regions); + workers()->run_task(&task); +} + +void ShenandoahHeap::concurrent_update_heap_references() { + _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs); + ShenandoahHeapRegionSet* update_regions = regions(); + update_regions->clear_current_index(); + update_heap_references(update_regions); + _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs); +} + +void ShenandoahHeap::prepare_update_refs() { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + set_evacuation_in_progress_at_safepoint(false); + set_update_refs_in_progress(true); + ensure_parsability(true); + for (uint i = 0; i < _num_regions; i++) { + ShenandoahHeapRegion* r = _ordered_regions->get(i); + r->set_concurrent_iteration_safe_limit(r->top()); + } +} + +void ShenandoahHeap::finish_update_refs() { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + + if (cancelled_concgc()) { + // Finish updating references where we left off. + clear_cancelled_concgc(); + ShenandoahHeapRegionSet* update_regions = regions(); + update_heap_references(update_regions); + } + + assert(! cancelled_concgc(), "Should have been done right before"); + concurrentMark()->update_roots(ShenandoahCollectorPolicy::final_update_refs_roots); + + // Allocations might have happened before we STWed here, record peak: + shenandoahPolicy()->record_peak_occupancy(); + + recycle_dirty_regions(); + set_need_update_refs(false); + + if (ShenandoahVerify) { + verify_update_refs(); + } + + { + // Rebuild the free set + ShenandoahHeapLock hl(this); + _free_regions->clear(); + size_t end = _ordered_regions->active_regions(); + for (size_t i = 0; i < end; i++) { + ShenandoahHeapRegion* r = _ordered_regions->get(i); + if (!r->is_humongous()) { + assert (!in_collection_set(r), "collection set should be clear"); + _free_regions->add_region(r); + } + } + } + set_update_refs_in_progress(false); +} + +class ShenandoahVerifyUpdateRefsClosure : public ExtendedOopClosure { +private: + template + void do_oop_work(T* p) { + T o = oopDesc::load_heap_oop(p); + if (! oopDesc::is_null(o)) { + oop obj = oopDesc::decode_heap_oop_not_null(o); + guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), + "must not be forwarded"); + } + } +public: + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +void ShenandoahHeap::verify_update_refs() { + + ensure_parsability(false); + + ShenandoahVerifyUpdateRefsClosure cl; + + // Verify roots. + { + CodeBlobToOopClosure blobsCl(&cl, false); + CLDToOopClosure cldCl(&cl); + ClassLoaderDataGraph::clear_claimed_marks(); + ShenandoahRootProcessor rp(this, 1); + rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0); + } + + // Verify heap. + for (uint i = 0; i < num_regions(); i++) { + ShenandoahHeapRegion* r = regions()->get(i); + marked_object_oop_iterate(r, &cl); + } +} + #ifdef ASSERT void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { assert(_heap_lock == locked, "must be locked"); diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp @@ -179,6 +179,7 @@ unsigned int _concurrent_mark_in_progress; bool _full_gc_in_progress; + bool _update_refs_in_progress; unsigned int _evacuation_in_progress; bool _need_update_refs; @@ -296,6 +297,12 @@ void prepare_for_concurrent_evacuation(); void evacuate_and_update_roots(); + void update_heap_references(ShenandoahHeapRegionSet* regions); + void concurrent_update_heap_references(); + void prepare_update_refs(); + void finish_update_refs(); + void verify_update_refs(); + private: void set_evacuation_in_progress(bool in_progress); public: @@ -306,6 +313,9 @@ void set_full_gc_in_progress(bool in_progress); bool is_full_gc_in_progress() const; + void set_update_refs_in_progress(bool in_progress); + bool is_update_refs_in_progress() const; + inline bool need_update_refs() const; void set_need_update_refs(bool update_refs); @@ -410,9 +420,26 @@ // TODO: consider moving this into ShenandoahHeapRegion. +private: + template + inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); + + template + inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); + +public: template inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); + template + inline void marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl); + + template + inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl); + + template + inline void marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl); + GCTimer* gc_timer() const; GCTracer* tracer(); diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp @@ -191,6 +191,14 @@ if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful. return forwarded_oop; } else { + // Note: we used to assert the following here. This doesn't work because sometimes, during + // marking/updating-refs, it can happen that a Java thread beats us with an arraycopy, + // which first copies the array, which potentially contains from-space refs, and only afterwards + // updates all from-space refs to to-space refs, which leaves a short window where the new array + // elements can be from-space. + // assert(oopDesc::is_null(result) || + // oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)), + // "expect not forwarded"); return NULL; } } else { @@ -350,6 +358,16 @@ template inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { + marked_object_iterate(region, cl, region->top()); +} + +template +inline void ShenandoahHeap::marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl) { + marked_object_iterate(region, cl, region->concurrent_iteration_safe_limit()); +} + +template +inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj"); CMBitMap* mark_bit_map = _complete_mark_bit_map; @@ -359,7 +377,6 @@ size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */; HeapWord* start = region->bottom() + BrooksPointer::word_size(); - HeapWord* limit = region->top(); HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end()); HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end); @@ -436,4 +453,58 @@ cl->do_object(obj); } +template +class ShenandoahObjectToOopClosure : public ObjectClosure { + T* _cl; +public: + ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} + + void do_object(oop obj) { + obj->oop_iterate(_cl); + } +}; + +template +class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { + T* _cl; + MemRegion _bounds; +public: + ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : + _cl(cl), _bounds(bottom, top) {} + + void do_object(oop obj) { + obj->oop_iterate(_cl, _bounds); + } +}; + +template +inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { + if (region->is_humongous()) { + HeapWord* bottom = region->bottom(); + if (top > bottom) { + // Go to start of humongous region. + uint idx = region->region_number(); + while (! region->is_humongous_start()) { + assert(idx > 0, "sanity"); + idx--; + region = _ordered_regions->get(idx); + } + ShenandoahObjectToOopBoundedClosure objs(cl, bottom, top); + marked_object_iterate(region, &objs); + } + } else { + ShenandoahObjectToOopClosure objs(cl); + marked_object_iterate(region, &objs, top); + } +} + +template +inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl) { + marked_object_oop_iterate(region, cl, region->top()); +} + +template +inline void ShenandoahHeap::marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl) { + marked_object_oop_iterate(region, cl, region->concurrent_iteration_safe_limit()); +} #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp @@ -61,7 +61,6 @@ assert(!PerfDataManager::exists(fullname), "must not exist"); _regions_data[i] = PerfDataManager::create_long_variable(SUN_GC, data_name, PerfData::U_None, CHECK); - } } } @@ -79,8 +78,9 @@ ShenandoahHeap* heap = ShenandoahHeap::heap(); jlong status = 0; - if (heap->concurrent_mark_in_progress()) status |= 1; - if (heap->is_evacuation_in_progress()) status |= 2; + if (heap->concurrent_mark_in_progress()) status |= 1 << 0; + if (heap->is_evacuation_in_progress()) status |= 1 << 1; + if (heap->is_update_refs_in_progress()) status |= 1 << 2; _status->set_value(status); _timestamp->set_value(os::elapsed_counter()); diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp @@ -38,12 +38,13 @@ * - sun.gc.shenandoah.regions.status current GC status: * - bit 0 set when marking in progress * - bit 1 set when evacuation in progress + * - bit 2 set when update refs in progress * * one variable counter per region, with $max_regions (see above) counters: * - sun.gc.shenandoah.regions.region.$i.data * where $ is the region number from 0 <= i < $max_regions * - * in the following format: + * .data is in the following format: * - bits 0-28 used memory in kilobytes * - bits 29-58 live memory in kilobytes * - bits 58-63 status diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp @@ -111,4 +111,18 @@ virtual bool do_metadata() { return true; } }; +class ShenandoahUpdateHeapRefsClosure : public ExtendedOopClosure { +private: + ShenandoahHeap* _heap; +public: + ShenandoahUpdateHeapRefsClosure(); + + template + void do_oop_nv(T* p); + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } +}; + + #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp @@ -32,4 +32,9 @@ ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue); } +template +inline void ShenandoahUpdateHeapRefsClosure::do_oop_nv(T* p) { + _heap->maybe_update_oop_ref(p); +} + #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp @@ -72,6 +72,10 @@ "aggressive (run concurrent GC continuously, evacuate everything), " \ "Defaults to adaptive") \ \ + experimental(ccstr, ShenandoahUpdateRefsEarly, "adaptive", \ + "Run a separate concurrent reference updating phase after" \ + "concurrent evacuation. Possible values: 'on', 'off', 'adaptive'")\ + \ product(uintx, ShenandoahRefProcFrequency, 5, \ "How often should (weak, soft, etc) references be processed. " \ "References get processed at every Nth GC cycle. " \ @@ -121,6 +125,18 @@ "Applies to Shenandoah GC dynamic Heuristic mode only " \ "(ignored otherwise). Defauls to 0%.") \ \ + experimental(uintx, ShenandoahMergeUpdateRefsMinGap, 100, \ + "If GC is currently running in separate update-refs mode " \ + "this numbers gives the threshold when to switch to " \ + "merged update-refs mode. Number is percentage relative to" \ + "duration(marking)+duration(update-refs).") \ + \ + experimental(uintx, ShenandoahMergeUpdateRefsMaxGap, 200, \ + "If GC is currently running in merged update-refs mode " \ + "this numbers gives the threshold when to switch to " \ + "separate update-refs mode. Number is percentage relative " \ + "to duration(marking)+duration(update-refs).") \ + \ experimental(double, ShenandoahGCWorkerPerJavaThread, 0.5, \ "Set GC worker to Java thread ratio when " \ "UseDynamicNumberOfGCThreads is enabled") \ diff --git a/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp b/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp --- a/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp +++ b/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp @@ -28,11 +28,13 @@ class ShenandoahMarkUpdateRefsMetadataClosure; class ShenandoahMarkRefsClosure; class ShenandoahMarkRefsMetadataClosure; +class ShenandoahUpdateHeapRefsClosure; #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(f) \ f(ShenandoahMarkUpdateRefsClosure,_nv) \ f(ShenandoahMarkUpdateRefsMetadataClosure,_nv) \ f(ShenandoahMarkRefsClosure,_nv) \ - f(ShenandoahMarkRefsMetadataClosure,_nv) + f(ShenandoahMarkRefsMetadataClosure,_nv) \ + f(ShenandoahUpdateHeapRefsClosure,_nv) #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_SPECIALIZED_OOP_CLOSURES_HPP diff --git a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp b/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp --- a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp +++ b/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.cpp @@ -131,6 +131,26 @@ sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause); } +void VM_ShenandoahInitUpdateRefs::doit() { + ShenandoahHeap *sh = ShenandoahHeap::heap(); + GCTraceTime time("Pause Init Update Refs", ShenandoahLogInfo, sh->gc_timer(), sh->tracer()->gc_id()); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::init_update_refs); + sh->prepare_update_refs(); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::init_update_refs); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause); +} + +void VM_ShenandoahFinalUpdateRefs::doit() { + ShenandoahHeap *sh = ShenandoahHeap::heap(); + GCTraceTime time("Pause Final Update Refs", ShenandoahLogInfo, sh->gc_timer(), sh->tracer()->gc_id()); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::final_update_refs); + sh->finish_update_refs(); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::final_update_refs); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause); +} + void VM_ShenandoahVerifyHeapAfterEvacuation::doit() { ShenandoahHeap *sh = ShenandoahHeap::heap(); diff --git a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.hpp b/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.hpp --- a/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.hpp +++ b/src/share/vm/gc_implementation/shenandoah/vm_operations_shenandoah.hpp @@ -33,6 +33,8 @@ // - VM_ShenandoahInitMark: initiate concurrent marking // - VM_ShenandoahReferenceOperation: // - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation +// - VM_ShenandoahInitUpdateRefs: initiate update references +// - VM_ShenandoahFinalUpdateRefs: finish up update references // - VM_ShenandoahFullGC: do full GC class VM_ShenandoahOperation : public VM_Operation { @@ -75,6 +77,22 @@ virtual void doit(); }; +class VM_ShenandoahInitUpdateRefs: public VM_ShenandoahOperation { +public: + VM_ShenandoahInitUpdateRefs() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitUpdateRefs; } + const char* name() const { return "Shenandoah Init Update References"; } + virtual void doit(); +}; + +class VM_ShenandoahFinalUpdateRefs: public VM_ShenandoahOperation { +public: + VM_ShenandoahFinalUpdateRefs() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalUpdateRefs; } + const char* name() const { return "Shenandoah Final Update References"; } + virtual void doit(); +}; + class VM_ShenandoahVerifyHeapAfterEvacuation: public VM_ShenandoahOperation { public: VM_ShenandoahVerifyHeapAfterEvacuation() : VM_ShenandoahOperation() {}; diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp +++ b/src/share/vm/runtime/vm_operations.hpp @@ -97,6 +97,8 @@ template(ShenandoahInitMark) \ template(ShenandoahFinalMarkStartEvac) \ template(ShenandoahVerifyHeapAfterEvacuation) \ + template(ShenandoahInitUpdateRefs) \ + template(ShenandoahFinalUpdateRefs) \ template(Exit) \ template(LinuxDllLoad) \ template(RotateGCLog) \