# HG changeset patch # User rkennke # Date 1490712480 -7200 # Tue Mar 28 16:48:00 2017 +0200 # Node ID fa8e8c6307b35741ba87ecc46564196e0c450226 # Parent 01d25ed091668ac76e9e0cc5286df9d4ce8f7b2f imported patch update-refs.patch diff --git a/src/share/vm/gc/shenandoah/shenandoahBarrierSet.cpp b/src/share/vm/gc/shenandoah/shenandoahBarrierSet.cpp --- a/src/share/vm/gc/shenandoah/shenandoahBarrierSet.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahBarrierSet.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "runtime/interfaceSupport.hpp" @@ -187,7 +188,14 @@ } bool ShenandoahBarrierSet::need_update_refs_barrier() { - return UseShenandoahMatrix || (_heap->concurrent_mark_in_progress() && _heap->need_update_refs()); + if (UseShenandoahMatrix) { + return true; + } + if (_heap->shenandoahPolicy()->update_refs_early()) { + return _heap->is_update_refs_in_progress(); + } else { + return _heap->concurrent_mark_in_progress() && _heap->need_update_refs(); + } } void ShenandoahBarrierSet::write_ref_array_work(MemRegion r) { diff --git a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp --- a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -103,6 +103,10 @@ virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0; + virtual bool update_refs_early() { + return ShenandoahUpdateRefsEarly; + } + virtual bool should_start_partial_gc() { return false; } @@ -786,6 +790,10 @@ _phase_names[conc_mark] = "Concurrent Marking"; _phase_names[conc_evac] = "Concurrent Evacuation"; + _phase_names[conc_update_refs] = "Concurrent Update References"; + _phase_names[pre_update_refs] = "Pause Pre Update References"; + _phase_names[post_update_refs] = "Pause Post Update References"; + if (ShenandoahGCHeuristics != NULL) { if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { log_info(gc, init)("Shenandoah heuristics: aggressive"); @@ -885,6 +893,10 @@ return _heuristics->handover_cancelled_marking(); } +bool ShenandoahCollectorPolicy::update_refs_early() { + return _heuristics->update_refs_early(); +} + void ShenandoahCollectorPolicy::record_cm_success() { _heuristics->record_cm_success(); _successful_cm++; diff --git a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp --- a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp @@ -112,6 +112,10 @@ full_gc_adjust_pointers, full_gc_copy_objects, + conc_update_refs, + pre_update_refs, + post_update_refs, + partial_gc, _num_phases }; @@ -183,6 +187,11 @@ void record_bytes_end_CM(size_t bytes); bool should_start_concurrent_mark(size_t used, size_t capacity); bool should_start_partial_gc(); + + // Returns true when there should be a separate concurrent reference + // updating phase after evacuation. + bool update_refs_early(); + bool handover_cancelled_marking(); void record_cm_cancelled(); diff --git a/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp --- a/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp @@ -173,6 +173,24 @@ heap->do_evacuation(); } + // Do an update-refs phase if required. + if (check_cancellation()) return; + + if (heap->shenandoahPolicy()->update_refs_early()) { + + VM_ShenandoahPreUpdateRoots pre_update_roots; + VMThread::execute(&pre_update_roots); + + { + GCTraceTime(Info, gc) time("Concurrent update references ", gc_timer, GCCause::_no_gc, true); + heap->concurrent_update_heap_references(); + } + if (check_cancellation()) return; + + VM_ShenandoahPostUpdateRoots post_update_roots; + VMThread::execute(&post_update_roots); + } + // Prepare for the next normal cycle: if (check_cancellation()) return; diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp @@ -294,6 +294,7 @@ _concurrent_mark_in_progress(0), _evacuation_in_progress(0), _full_gc_in_progress(false), + _update_refs_in_progress(false), _free_regions(NULL), _collection_set(NULL), _bytes_allocated_since_cm(0), @@ -2381,6 +2382,14 @@ return _full_gc_in_progress; } +void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { + _update_refs_in_progress = in_progress; +} + +bool ShenandoahHeap::is_update_refs_in_progress() const { + return _update_refs_in_progress; +} + class NMethodOopInitializer : public OopClosure { private: ShenandoahHeap* _heap; @@ -2581,6 +2590,91 @@ partial_gc()->do_partial_collection(); } +class ShenandoahUpdateHeapRefsClosure : public ExtendedOopClosure { + ShenandoahHeap* _heap; +public: + ShenandoahUpdateHeapRefsClosure() : + _heap(ShenandoahHeap::heap()) {} + + template + inline void do_oop_nv(T* p) { + // tty->print_cr("updating: "PTR_FORMAT, p2i(p)); + _heap->maybe_update_oop_ref(p); + } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } +}; + +class ShenandoahUpdateHeapObjectsClosure : public ObjectClosure { +private: + ShenandoahUpdateHeapRefsClosure _cl; +public: + void do_object(oop obj) { + obj->oop_iterate(&_cl); + } +}; + +class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { +private: + ShenandoahHeap* _heap; + ShenandoahHeapRegionSet* _regions; + +public: + ShenandoahUpdateHeapRefsTask() : + AbstractGangTask("Concurrent Update References Task"), + _heap(ShenandoahHeap::heap()), + _regions(ShenandoahHeap::heap()->regions()) { + _regions->clear_current_index(); + } + + void work(uint worker_id) { + ShenandoahUpdateHeapObjectsClosure cl; + ShenandoahHeapRegion* r = _regions->claim_next(); + while (r != NULL) { + if (! r->is_humongous_continuation() && + ! _heap->in_collection_set(r) && + ! r->is_empty()) { + HeapWord* limit = r->concurrent_iteration_safe_limit(); + _heap->marked_object_iterate(r, &cl, limit); + } else if (_heap->in_collection_set(r)) { + HeapWord* bottom = r->bottom(); + HeapWord* top = _heap->complete_top_at_mark_start(r->bottom()); + if (top > bottom) { + _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); + } + } + r = _regions->claim_next(); + } + } +}; + +void ShenandoahHeap::concurrent_update_heap_references() { + _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs); + ShenandoahUpdateHeapRefsTask task; + workers()->run_task(&task); + _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs); +} + +void ShenandoahHeap::prepare_update_refs() { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + set_evacuation_in_progress_at_safepoint(false); + set_update_refs_in_progress(true); + ensure_parsability(true); + for (uint i = 0; i < _num_regions; i++) { + ShenandoahHeapRegion* r = _ordered_regions->get(i); + r->set_concurrent_iteration_safe_limit(r->top()); + } +} + +void ShenandoahHeap::finish_update_refs() { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + concurrentMark()->update_roots(); + recycle_dirty_regions(); + set_need_update_refs(false); + set_update_refs_in_progress(false); +} + #ifdef ASSERT void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { assert(_heap_lock == locked, "must be locked"); diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp @@ -195,6 +195,7 @@ unsigned int _concurrent_mark_in_progress; bool _full_gc_in_progress; + bool _update_refs_in_progress; unsigned int _evacuation_in_progress; bool _need_update_refs; @@ -304,6 +305,11 @@ void evacuate_and_update_roots(); void do_partial_collection(); + + void concurrent_update_heap_references(); + void prepare_update_refs(); + void finish_update_refs(); + private: void set_evacuation_in_progress(bool in_progress); public: @@ -314,6 +320,9 @@ void set_full_gc_in_progress(bool in_progress); bool is_full_gc_in_progress() const; + void set_update_refs_in_progress(bool in_progress); + bool is_update_refs_in_progress() const; + inline bool need_update_refs() const; void set_need_update_refs(bool update_refs); @@ -429,6 +438,9 @@ template inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); + template + inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); + GCTimer* gc_timer() const; void swap_mark_bitmaps(); diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp b/src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp @@ -197,6 +197,8 @@ if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful. return forwarded_oop; } else { + assert(oopDesc::unsafe_equals(result, ShenandoahBarrierSet::resolve_oop_static_not_null(result)), + "expect not forwarded"); return NULL; } } else { @@ -420,6 +422,11 @@ template inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { + marked_object_iterate(region, cl, region->top()); +} + +template +inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj"); CMBitMap* mark_bit_map = _complete_mark_bit_map; @@ -429,7 +436,6 @@ size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */; HeapWord* start = region->bottom() + BrooksPointer::word_size(); - HeapWord* limit = region->top(); HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end()); HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end); diff --git a/src/share/vm/gc/shenandoah/shenandoah_globals.hpp b/src/share/vm/gc/shenandoah/shenandoah_globals.hpp --- a/src/share/vm/gc/shenandoah/shenandoah_globals.hpp +++ b/src/share/vm/gc/shenandoah/shenandoah_globals.hpp @@ -77,6 +77,10 @@ "dynamic, adaptive, aggressive." \ "Defaults to adaptive") \ \ + experimental(bool, ShenandoahUpdateRefsEarly, false, \ + "Run a separate concurrent reference updating phase after" \ + "concurrent evacuation") \ + \ product(uintx, ShenandoahRefProcFrequency, 5, \ "How often should (weak, soft, etc) references be processed. " \ "References get processed at every Nth GC cycle. " \ diff --git a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp --- a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp +++ b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp @@ -139,6 +139,28 @@ sh->shenandoahPolicy()->record_gc_end(); } +void VM_ShenandoahPreUpdateRoots::doit() { + GCIdMark gc_id_mark(_gc_id); + ShenandoahHeap *sh = ShenandoahHeap::heap(); + GCTraceTime(Info, gc) time("Pause Pre Update Refs", sh->gc_timer()); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::pre_update_refs); + sh->prepare_update_refs(); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::pre_update_refs); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause); +} + +void VM_ShenandoahPostUpdateRoots::doit() { + GCIdMark gc_id_mark(_gc_id); + ShenandoahHeap *sh = ShenandoahHeap::heap(); + GCTraceTime(Info, gc) time("Pause Post Update Refs", sh->gc_timer()); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::total_pause); + sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::post_update_refs); + sh->finish_update_refs(); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::post_update_refs); + sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::total_pause); +} + void VM_ShenandoahVerifyHeapAfterEvacuation::doit() { GCIdMark gc_id_mark(_gc_id); ShenandoahHeap *sh = ShenandoahHeap::heap(); diff --git a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp --- a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp +++ b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp @@ -83,6 +83,22 @@ virtual void doit(); }; +class VM_ShenandoahPreUpdateRoots: public VM_ShenandoahOperation { +public: + VM_ShenandoahPreUpdateRoots() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahPreUpdateRoots; } + const char* name() const { return "Shenandoah Update Root References Pre"; } + virtual void doit(); +}; + +class VM_ShenandoahPostUpdateRoots: public VM_ShenandoahOperation { +public: + VM_ShenandoahPostUpdateRoots() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahPostUpdateRoots; } + const char* name() const { return "Shenandoah Update Root References Post"; } + virtual void doit(); +}; + class VM_ShenandoahVerifyHeapAfterEvacuation: public VM_ShenandoahOperation { public: VM_ShenandoahVerifyHeapAfterEvacuation() : VM_ShenandoahOperation() {}; diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp +++ b/src/share/vm/runtime/vm_operations.hpp @@ -101,6 +101,8 @@ template(ShenandoahStartEvacuation) \ template(ShenandoahVerifyHeapAfterEvacuation) \ template(ShenandoahPartialGC) \ + template(ShenandoahPreUpdateRoots) \ + template(ShenandoahPostUpdateRoots) \ template(Exit) \ template(LinuxDllLoad) \ template(RotateGCLog) \