# HG changeset patch # User rkennke # Date 1524771828 -7200 # Thu Apr 26 21:43:48 2018 +0200 # Node ID c26be43bb3064d78c586b0a44e73dc26fd4aafc4 # Parent 7d099bf9f61cca4da250939ede0e5c58853a4c34 Fold Partial GC into Traversal GC diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp @@ -1117,7 +1117,7 @@ break; } - if (bs->kind() == BarrierSet::Shenandoah && !ShenandoahSATBBarrier && !ShenandoahConditionalSATBBarrier) { + if (bs->kind() == BarrierSet::Shenandoah && !ShenandoahSATBBarrier) { break; } diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp @@ -88,14 +88,6 @@ Register tmp, bool tosca_live, bool expand_call) { - if (ShenandoahConditionalSATBBarrier) { - Label done; - Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ ldrb(tmp, gc_state); - __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done); - satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); - __ bind(done); - } if (ShenandoahSATBBarrier) { satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); } @@ -264,7 +256,7 @@ } void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); + assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); assert(dst != rscratch1, "different regs"); assert(dst != rscratch2, "Need rscratch2"); @@ -277,7 +269,7 @@ // Now check if evacuation is in progress. read_barrier_not_null(masm, dst); - __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL); + __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); __ tst(rscratch1, rscratch2); __ br(Assembler::EQ, done); @@ -306,13 +298,11 @@ } void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { - if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahStoreValEnqueueBarrier) { Label is_null; __ cbz(dst, is_null); write_barrier_impl(masm, dst); __ bind(is_null); - } - if (ShenandoahStoreValEnqueueBarrier) { // Save possibly live regs. RegSet live_regs = RegSet::range(r0, r4) - dst; __ push(live_regs, sp); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -4086,7 +4086,7 @@ #ifdef INCLUDE_ALL_GCS void MacroAssembler::shenandoah_write_barrier(Register dst) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); + assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); assert(dst != rscratch1, "need rscratch1"); assert(dst != rscratch2, "need rscratch2"); @@ -4102,7 +4102,7 @@ } // Evac-check ... - mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL); + mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); tst(rscratch1, rscratch2); br(Assembler::EQ, done); diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -5158,7 +5158,7 @@ StubRoutines::_montgomerySquare = g.generate_multiply(); } - if (UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier)) { + if (UseShenandoahGC && ShenandoahWriteBarrier) { StubRoutines::aarch64::_shenandoah_wb = generate_shenandoah_wb(false, true); StubRoutines::_shenandoah_wb_C = generate_shenandoah_wb(true, !ShenandoahWriteBarrierCsetTestInIR); } diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -1573,7 +1573,7 @@ break; } - if (bs->kind() == BarrierSet::Shenandoah && !(ShenandoahSATBBarrier || ShenandoahConditionalSATBBarrier || ShenandoahStoreValEnqueueBarrier)) { + if (bs->kind() == BarrierSet::Shenandoah && !(ShenandoahSATBBarrier || ShenandoahStoreValEnqueueBarrier)) { break; } @@ -1598,7 +1598,7 @@ if (UseShenandoahGC) { Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL); + __ testb(gc_state, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL); __ jcc(Assembler::zero, done); } else { assert(UseG1GC, "Should be"); diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp --- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp @@ -161,14 +161,6 @@ bool tosca_live, bool expand_call) { - if (ShenandoahConditionalSATBBarrier) { - Label done; - Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ testb(gc_state, ShenandoahHeap::MARKING); - __ jcc(Assembler::zero, done); // Skip SATB barrier when conc-mark is not active - satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); - __ bind(done); - } if (ShenandoahSATBBarrier) { satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call); } @@ -362,14 +354,14 @@ } void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); + assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); #ifdef _LP64 assert(dst != rscratch1, "different regs"); Label done; Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL); + __ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); // Now check if evacuation is in progress. read_barrier_not_null(masm, dst); @@ -447,26 +439,24 @@ } void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { - if (ShenandoahStoreValReadBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier) { storeval_barrier_impl(masm, dst, tmp); } } void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { - assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); + assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); if (dst == noreg) return; #ifdef _LP64 - if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahStoreValEnqueueBarrier) { Label is_null; __ testptr(dst, dst); __ jcc(Assembler::zero, is_null); write_barrier_impl(masm, dst); __ bind(is_null); - } - if (ShenandoahStoreValEnqueueBarrier) { // The set of registers to be saved+restored is the same as in the write-barrier above. // Those are the commonly used registers in the interpreter. __ push(rbx); diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -5371,13 +5371,13 @@ } #else void MacroAssembler::shenandoah_write_barrier(Register dst) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); + assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); Label done; // Check for evacuation-in-progress Address gc_state(r15_thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL); + testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); // The read-barrier. if (ShenandoahWriteBarrierRB) { diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -5137,7 +5137,7 @@ throw_NullPointerException_at_call)); // entry points that are platform specific - if (UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier)) { + if (UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier)) { StubRoutines::x86::_shenandoah_wb = generate_shenandoah_wb(false, true); StubRoutines::_shenandoah_wb_C = generate_shenandoah_wb(true, !ShenandoahWriteBarrierCsetTestInIR); } diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp --- a/src/hotspot/share/c1/c1_LIR.hpp +++ b/src/hotspot/share/c1/c1_LIR.hpp @@ -1460,7 +1460,7 @@ public: LIR_OpShenandoahWriteBarrier(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool need_null_check) : LIR_Op1(lir_shenandoah_wb, obj, result, T_OBJECT, lir_patch_none, info), _need_null_check(need_null_check) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); + assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); } bool need_null_check() const { return _need_null_check; } virtual void emit_code(LIR_Assembler* masm); diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -1650,44 +1650,6 @@ void LIRGenerator::Shenandoah_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info) { - if (ShenandoahConditionalSATBBarrier) { - LIR_Opr gc_state_addr = new_pointer_register(); - __ move(LIR_OprFact::intptrConst((intptr_t) ShenandoahHeap::gc_state_addr()), gc_state_addr); - LIR_Opr gc_state = new_register(T_INT); - __ move(new LIR_Address(gc_state_addr, T_BYTE), gc_state); - __ logical_and(gc_state, LIR_OprFact::intConst(ShenandoahHeap::MARKING), gc_state); - __ cmp(lir_cond_equal, gc_state, LIR_OprFact::intConst(0)); - - LIR_PatchCode pre_val_patch_code = lir_patch_none; - - CodeStub* slow; - - if (do_load) { - assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); - assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); - - if (patch) - pre_val_patch_code = lir_patch_normal; - - pre_val = new_register(T_OBJECT); - - if (!addr_opr->is_address()) { - assert(addr_opr->is_register(), "must be"); - addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); - } - slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info); - } else { - assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); - assert(pre_val->is_register(), "must be"); - assert(pre_val->type() == T_OBJECT, "must be an object"); - assert(info == NULL, "sanity"); - - slow = new G1PreBarrierStub(pre_val); - } - - __ branch(lir_cond_notEqual, T_CHAR, slow); - __ branch_destination(slow->continuation()); - } if (ShenandoahSATBBarrier) { G1BarrierSet_pre_barrier(addr_opr, pre_val, do_load, patch, info); } @@ -2077,7 +2039,7 @@ } LIR_Opr LIRGenerator::shenandoah_write_barrier_impl(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); + assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); LIR_Opr result = new_register(T_OBJECT); __ shenandoah_wb(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check); return result; @@ -2085,7 +2047,7 @@ LIR_Opr LIRGenerator::shenandoah_storeval_barrier(LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { if (UseShenandoahGC) { - if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahStoreValEnqueueBarrier) { // TODO: Maybe we can simply avoid this stuff on constants? if (! obj->is_register()) { LIR_Opr result = new_register(T_OBJECT); @@ -2093,8 +2055,6 @@ obj = result; } obj = shenandoah_write_barrier_impl(obj, info, need_null_check); - } - if (ShenandoahStoreValEnqueueBarrier) { G1BarrierSet_pre_barrier(LIR_OprFact::illegalOpr, obj, false, false, NULL); } if (ShenandoahStoreValReadBarrier) { diff --git a/src/hotspot/share/gc/shared/markBitMap.cpp b/src/hotspot/share/gc/shared/markBitMap.cpp --- a/src/hotspot/share/gc/shared/markBitMap.cpp +++ b/src/hotspot/share/gc/shared/markBitMap.cpp @@ -79,3 +79,12 @@ _bm.clear_large_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end())); } + +void MarkBitMap::copy_from(MarkBitMap* other, MemRegion mr) { + guarantee(startWord() == other->startWord(), "bitmaps must cover same region"); + guarantee(endWord() == other->endWord(), "bitmaps must cover same region"); + mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); + size_t start_offset = heapWordToOffset(mr.start()); + size_t end_offset = heapWordToOffset(mr.end()); + _bm.copy_from(other->_bm, start_offset, end_offset); +} diff --git a/src/hotspot/share/gc/shared/markBitMap.hpp b/src/hotspot/share/gc/shared/markBitMap.hpp --- a/src/hotspot/share/gc/shared/markBitMap.hpp +++ b/src/hotspot/share/gc/shared/markBitMap.hpp @@ -107,6 +107,9 @@ // Clear range. For larger regions, use *_large. void clear_range(MemRegion mr); void clear_range_large(MemRegion mr); + + // Copies a part of the 'other' bitmap into the corresponding part of this bitmap. + void copy_from(MarkBitMap* other, MemRegion mr); }; #endif // SHARE_VM_GC_SHARED_CMBITMAP_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -98,12 +98,10 @@ // C2 barrier verification is only reliable when all default barriers are enabled if (ShenandoahVerifyOptoBarriers && (!FLAG_IS_DEFAULT(ShenandoahSATBBarrier) || - !FLAG_IS_DEFAULT(ShenandoahConditionalSATBBarrier) || !FLAG_IS_DEFAULT(ShenandoahKeepAliveBarrier) || !FLAG_IS_DEFAULT(ShenandoahWriteBarrier) || !FLAG_IS_DEFAULT(ShenandoahReadBarrier) || !FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier) || - !FLAG_IS_DEFAULT(ShenandoahStoreValWriteBarrier) || !FLAG_IS_DEFAULT(ShenandoahStoreValReadBarrier) || !FLAG_IS_DEFAULT(ShenandoahCASBarrier) || !FLAG_IS_DEFAULT(ShenandoahAcmpBarrier) || diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp @@ -28,6 +28,8 @@ #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "memory/resourceArea.hpp" void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) { @@ -44,6 +46,10 @@ msg.append(" %3s marked complete\n", heap->is_marked_complete(obj) ? "" : "not"); msg.append(" %3s marked next\n", heap->is_marked_next(obj) ? "" : "not"); msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not"); + if (heap->traversal_gc() != NULL) { + msg.append(" %3s in root set\n", heap->traversal_gc()->root_regions()->is_in((HeapWord*) obj) ? "" : "not"); + msg.append(" %3s in traversal set\n", heap->traversal_gc()->traversal_set()->is_in((HeapWord*) obj) ? "" : "not"); + } msg.append(" region: %s", ss.as_string()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp @@ -33,7 +33,7 @@ ShenandoahSATBMarkQueueSet ShenandoahBarrierSet::_satb_mark_queue_set; -template +template class ShenandoahUpdateRefsForOopClosure: public ExtendedOopClosure { private: ShenandoahHeap* _heap; @@ -41,9 +41,8 @@ inline void do_oop_work(T* p) { oop o; if (STOREVAL_WRITE_BARRIER) { - bool evac; - o = _heap->evac_update_with_forwarded(p, evac); - if ((ALWAYS_ENQUEUE || evac) && !CompressedOops::is_null(o)) { + o = _heap->evac_update_with_forwarded(p); + if (!CompressedOops::is_null(o)) { ShenandoahBarrierSet::enqueue(o); } } else { @@ -99,10 +98,10 @@ ShouldNotReachHere(); } -template +template void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) { assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled"); - ShenandoahUpdateRefsForOopClosure cl; + ShenandoahUpdateRefsForOopClosure cl; ShenandoahEvacOOMScope oom_evac_scope; T* dst = (T*) start; for (size_t i = 0; i < count; i++) { @@ -116,31 +115,32 @@ if (!need_update_refs_barrier()) return; if (UseShenandoahMatrix) { - assert(! _heap->is_concurrent_traversal_in_progress(), "traversal GC should take another branch"); - if (_heap->is_concurrent_partial_in_progress()) { + if (_heap->is_concurrent_traversal_in_progress()) { if (UseCompressedOops) { - write_ref_array_loop(start, count); + write_ref_array_loop(start, count); } else { - write_ref_array_loop(start, count); + write_ref_array_loop(start, count); } } else { if (UseCompressedOops) { - write_ref_array_loop(start, count); + write_ref_array_loop(start, count); } else { - write_ref_array_loop(start, count); + write_ref_array_loop(start, count); } } - } else if (_heap->is_concurrent_traversal_in_progress()) { - if (UseCompressedOops) { - write_ref_array_loop(start, count); + } else { + if (_heap->is_concurrent_traversal_in_progress()) { + if (UseCompressedOops) { + write_ref_array_loop(start, count); + } else { + write_ref_array_loop(start, count); + } } else { - write_ref_array_loop(start, count); - } - } else { - if (UseCompressedOops) { - write_ref_array_loop(start, count); - } else { - write_ref_array_loop(start, count); + if (UseCompressedOops) { + write_ref_array_loop(start, count); + } else { + write_ref_array_loop(start, count); + } } } } @@ -164,8 +164,7 @@ template void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, int count) { shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_concgc()); - if (ShenandoahSATBBarrier || - (ShenandoahConditionalSATBBarrier && _heap->is_concurrent_mark_in_progress())) { + if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) { T* elem_ptr = dst; for (int i = 0; i < count; i++, elem_ptr++) { T heap_oop = RawAccess<>::oop_load(elem_ptr); @@ -236,21 +235,19 @@ oop obj = oop(mr.start()); assert(oopDesc::is_oop(obj), "must be an oop"); if (UseShenandoahMatrix) { - assert(! _heap->is_concurrent_traversal_in_progress(), "traversal GC should take another branch"); - if (_heap->is_concurrent_partial_in_progress()) { - ShenandoahUpdateRefsForOopClosure cl; + if (_heap->is_concurrent_traversal_in_progress()) { + ShenandoahUpdateRefsForOopClosure cl; obj->oop_iterate(&cl); } else { - ShenandoahUpdateRefsForOopClosure cl; + ShenandoahUpdateRefsForOopClosure cl; obj->oop_iterate(&cl); } } else { - assert(! _heap->is_concurrent_partial_in_progress(), "partial GC needs matrix"); if (_heap->is_concurrent_traversal_in_progress()) { - ShenandoahUpdateRefsForOopClosure cl; + ShenandoahUpdateRefsForOopClosure cl; obj->oop_iterate(&cl); } else { - ShenandoahUpdateRefsForOopClosure cl; + ShenandoahUpdateRefsForOopClosure cl; obj->oop_iterate(&cl); } } @@ -290,20 +287,15 @@ IRT_END oop ShenandoahBarrierSet::write_barrier_impl(oop obj) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier), "should be enabled"); + assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled"); if (!CompressedOops::is_null(obj)) { - bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL); + bool evac_in_progress = _heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); oop fwd = resolve_forwarded_not_null(obj); if (evac_in_progress && _heap->in_collection_set(obj) && oopDesc::unsafe_equals(obj, fwd)) { ShenandoahEvacOOMScope oom_evac_scope; - bool evac; - oop copy = _heap->evacuate_object(obj, Thread::current(), evac); - if (evac && _heap->is_concurrent_partial_in_progress()) { - enqueue(copy); - } - return copy; + return _heap->evacuate_object(obj, Thread::current()); } else { return fwd; } @@ -321,8 +313,11 @@ } oop ShenandoahBarrierSet::storeval_barrier(oop obj) { - if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) { - obj = write_barrier(obj); + if (ShenandoahStoreValEnqueueBarrier) { + if (!CompressedOops::is_null(obj)) { + obj = write_barrier(obj); + enqueue(obj); + } } if (ShenandoahStoreValEnqueueBarrier && !CompressedOops::is_null(obj)) { enqueue(obj); @@ -334,12 +329,8 @@ } void ShenandoahBarrierSet::keep_alive_barrier(oop obj) { - if (ShenandoahKeepAliveBarrier) { - if (_heap->is_concurrent_mark_in_progress()) { - enqueue(obj); - } else if (_heap->is_concurrent_partial_in_progress()) { - write_barrier_impl(obj); - } + if (ShenandoahKeepAliveBarrier && _heap->is_concurrent_mark_in_progress()) { + enqueue(obj); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp @@ -35,8 +35,7 @@ enum ArrayCopyStoreValMode { NONE, READ_BARRIER, - WRITE_BARRIER_MAYBE_ENQUEUE, - WRITE_BARRIER_ALWAYS_ENQUEUE + WRITE_BARRIER }; static ShenandoahSATBMarkQueueSet _satb_mark_queue_set; @@ -114,7 +113,7 @@ private: bool need_update_refs_barrier(); - template + template void write_ref_array_loop(HeapWord* start, size_t count); oop write_barrier_impl(oop obj); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp @@ -136,10 +136,8 @@ return arraycopy_loop(src, dst, length, bound); case READ_BARRIER: return arraycopy_loop(src, dst, length, bound); - case WRITE_BARRIER_MAYBE_ENQUEUE: - return arraycopy_loop(src, dst, length, bound); - case WRITE_BARRIER_ALWAYS_ENQUEUE: - return arraycopy_loop(src, dst, length, bound); + case WRITE_BARRIER: + return arraycopy_loop(src, dst, length, bound); default: ShouldNotReachHere(); return true; // happy compiler @@ -221,25 +219,12 @@ case READ_BARRIER: obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); break; - case WRITE_BARRIER_MAYBE_ENQUEUE: + case WRITE_BARRIER: if (_heap->in_collection_set(obj)) { oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); if (oopDesc::unsafe_equals(forw, obj)) { bool evac; - forw = _heap->evacuate_object(forw, thread, evac); - if (evac) { - enqueue(forw); - } - } - obj = forw; - } - break; - case WRITE_BARRIER_ALWAYS_ENQUEUE: - if (_heap->in_collection_set(obj)) { - oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (oopDesc::unsafe_equals(forw, obj)) { - bool evac; - forw = _heap->evacuate_object(forw, thread, evac); + forw = _heap->evacuate_object(forw, thread); } obj = forw; } @@ -290,14 +275,12 @@ dst = ((T*)(void*) dst_obj) + dst_offset; } - bool satb = (ShenandoahSATBBarrier || ShenandoahConditionalSATBBarrier) && heap->is_concurrent_mark_in_progress(); + bool satb = ShenandoahSATBBarrier && heap->is_concurrent_mark_in_progress(); bool checkcast = HasDecorator::value; ArrayCopyStoreValMode storeval_mode; if (heap->has_forwarded_objects()) { - if (heap->is_concurrent_partial_in_progress()) { - storeval_mode = WRITE_BARRIER_MAYBE_ENQUEUE; - } else if (heap->is_concurrent_traversal_in_progress()) { - storeval_mode = WRITE_BARRIER_ALWAYS_ENQUEUE; + if (heap->is_concurrent_traversal_in_progress()) { + storeval_mode = WRITE_BARRIER; } else if (heap->is_concurrent_mark_in_progress() || heap->is_update_refs_in_progress()) { storeval_mode = READ_BARRIER; } else { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -28,7 +28,7 @@ #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "runtime/os.hpp" #include "utilities/quickSort.hpp" @@ -179,16 +179,8 @@ return _update_refs_early; } - virtual bool should_start_partial_gc() { - return false; - } - - virtual bool can_do_partial_gc() { - return false; - } - - virtual bool should_start_traversal_gc() { - return false; + virtual ShenandoahHeap::GCCycleMode should_start_traversal_gc() { + return ShenandoahHeap::NONE; } virtual bool can_do_traversal_gc() { @@ -408,12 +400,10 @@ // Disable known barriers by default. SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier); - SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahConditionalSATBBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier); - SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValWriteBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier); SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier); @@ -885,21 +875,134 @@ } }; -class ShenandoahPartialHeuristics : public ShenandoahAdaptiveHeuristics { +class ShenandoahTraversalHeuristics : public ShenandoahHeuristics { +protected: + +public: + ShenandoahTraversalHeuristics() : ShenandoahHeuristics() { + FLAG_SET_DEFAULT(UseShenandoahMatrix, false); + FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); + FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false); + FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true); + FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false); + FLAG_SET_DEFAULT(ShenandoahBarriersForConst, true); + FLAG_SET_DEFAULT(ShenandoahWriteBarrierRB, false); + FLAG_SET_DEFAULT(ShenandoahAllocImplicitLive, false); + FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false); + FLAG_SET_DEFAULT(ShenandoahRecycleClearsBitmap, true); + + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1); + SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1); + + } + + virtual bool should_start_normal_gc() const { + return false; + } + + virtual bool is_experimental() { + return true; + } + + virtual bool is_diagnostic() { + return false; + } + + virtual bool can_do_traversal_gc() { + return true; + } + + virtual const char* name() { + return "traversal"; + } + + virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + // No root regions in this mode. + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); + ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions(); + root_regions->clear(); + + ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set(); + traversal_set->clear(); + for (size_t i = 0; i < heap->num_regions(); i++) { + ShenandoahHeapRegion* r = heap->get_region(i); + assert(!collection_set->is_in(r), "must not yet be in cset"); + if (r->is_regular() && r->used() > 0) { + size_t garbage_percent = r->garbage() * 100 / ShenandoahHeapRegion::region_size_bytes(); + if (garbage_percent > ShenandoahGarbageThreshold) { + collection_set->add_region(r); + } + } + r->clear_live_data(); + traversal_set->add_region(r); + } + collection_set->update_region_status(); + } + + virtual ShenandoahHeap::GCCycleMode should_start_traversal_gc() { + + ShenandoahHeap* heap = ShenandoahHeap::heap(); + + if (heap->has_forwarded_objects()) return ShenandoahHeap::NONE; + + double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; + bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval); + if (periodic_gc) { + log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms", + last_time_ms, ShenandoahGuaranteedGCInterval); + return ShenandoahHeap::MAJOR; + } + + size_t capacity = heap->capacity(); + size_t used = heap->used(); + return 100 - (used * 100 / capacity) < ShenandoahFreeThreshold ? ShenandoahHeap::MAJOR : ShenandoahHeap::NONE; + } + +protected: + virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, + RegionData* data, size_t data_size, + size_t free) { + ShouldNotReachHere(); + } +}; + +class ShenandoahPartialHeuristics : public ShenandoahTraversalHeuristics { protected: size_t* _from_idxs; + bool is_minor_gc() const { return ShenandoahHeap::heap()->is_minor_gc(); } + + // Utility method to remove any cset regions from root set and + // add all cset regions to the traversal set. + void filter_regions() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); + size_t num_regions = heap->num_regions(); + ShenandoahCollectionSet* collection_set = heap->collection_set(); + ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions(); + ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set(); + traversal_set->clear(); + + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* region = heap->get_region(i); + if (collection_set->is_in(i)) { + if (root_regions->is_in(i)) { + root_regions->remove_region(region); + } + traversal_set->add_region_check_for_duplicates(region); + assert(traversal_set->is_in(i), "must be in traversal set now"); + } + } + } + public: - ShenandoahPartialHeuristics() : ShenandoahAdaptiveHeuristics() { + ShenandoahPartialHeuristics() : + ShenandoahTraversalHeuristics() { + FLAG_SET_DEFAULT(UseShenandoahMatrix, true); - // Set up special barriers for concurrent partial GC. - FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, true); - FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); - FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier, true); - FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false); - - SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1); // TODO: Disable this optimization for now, as it also requires the matrix barriers. #ifdef COMPILER2 FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0); @@ -915,15 +1018,19 @@ } bool should_start_update_refs() { - return true; + return false; } bool update_refs() const { - return true; + return false; } - bool can_do_partial_gc() { - return true; + virtual bool should_unload_classes() { + return ShenandoahUnloadClassesFrequency != 0; + } + + virtual bool should_process_references() { + return ShenandoahRefProcFrequency != 0; } bool should_start_normal_gc() const { @@ -938,9 +1045,6 @@ return true; } - virtual bool should_start_partial_gc() = 0; - virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) = 0; - }; class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics { @@ -949,12 +1053,17 @@ return "connectedness"; } - bool should_start_partial_gc() { + ShenandoahHeap::GCCycleMode should_start_traversal_gc() { + ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc(); + if (cycle_mode != ShenandoahHeap::NONE) { + return cycle_mode; + } + ShenandoahHeap* heap = ShenandoahHeap::heap(); if (heap->has_forwarded_objects()) { // Cannot start partial if heap is not completely updated. - return false; + return ShenandoahHeap::NONE; } size_t capacity = heap->capacity(); @@ -963,7 +1072,7 @@ if (used < prev_used) { // Major collection must have happened, "used" data is unreliable, wait for update. - return false; + return ShenandoahHeap::NONE; } size_t threshold = heap->capacity() * ShenandoahConnectednessPercentage / 100; @@ -980,12 +1089,19 @@ } else { log_trace(gc,ergo)("%s", msg.buffer()); } - return result; + return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE; } void choose_collection_set(ShenandoahCollectionSet* collection_set) { + if (!is_minor_gc()) { + return ShenandoahPartialHeuristics::choose_collection_set(collection_set); + } + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); ShenandoahConnectionMatrix* matrix = heap->connection_matrix(); + ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions(); + root_regions->clear(); size_t num_regions = heap->num_regions(); RegionConnections* connects = get_region_connects_cache(num_regions); @@ -993,7 +1109,6 @@ for (uint to_idx = 0; to_idx < num_regions; to_idx++) { ShenandoahHeapRegion* region = heap->get_region(to_idx); - region->set_root(false); if (!region->is_regular()) continue; uint count = matrix->count_connected_to(to_idx, num_regions); @@ -1029,13 +1144,11 @@ maybe_add_heap_region(region, collection_set); for (size_t i = 0; i < from_idx_count; i++) { ShenandoahHeapRegion* r = heap->get_region(_from_idxs[i]); - if (!r->is_root()) { - r->set_root(true); - } + root_regions->add_region_check_for_duplicates(r); } } } - + filter_regions(); collection_set->update_region_status(); } }; @@ -1052,7 +1165,12 @@ } virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) { + if (!is_minor_gc()) { + return ShenandoahPartialHeuristics::choose_collection_set(collection_set); + } + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); ShenandoahConnectionMatrix* matrix = heap->connection_matrix(); uint64_t alloc_seq_at_last_gc_end = heap->alloc_seq_at_last_gc_end(); uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start(); @@ -1076,10 +1194,8 @@ guarantee(used >= prev_used, "Invariant"); size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions); - for (uint to_idx = 0; to_idx < num_regions; to_idx++) { - ShenandoahHeapRegion* region = heap->get_region(to_idx); - region->set_root(false); - } + ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions(); + root_regions->clear(); uint count = 0; @@ -1099,24 +1215,28 @@ for (uint f = 0; f < from_idx_count; f++) { ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]); - if (!r->is_root()) { - r->set_root(true); - } + root_regions->add_region_check_for_duplicates(r); } } } + filter_regions(); collection_set->update_region_status(); log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT, num_regions, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count()); } - bool should_start_partial_gc() { + ShenandoahHeap::GCCycleMode should_start_traversal_gc() { + ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc(); + if (cycle_mode != ShenandoahHeap::NONE) { + return cycle_mode; + } + ShenandoahHeap* heap = ShenandoahHeap::heap(); if (heap->has_forwarded_objects()) { // Cannot start partial if heap is not completely updated. - return false; + return ShenandoahHeap::NONE; } size_t capacity = heap->capacity(); @@ -1125,7 +1245,7 @@ if (used < prev_used) { // Major collection must have happened, "used" data is unreliable, wait for update. - return false; + return ShenandoahHeap::NONE; } size_t threshold = heap->capacity() * ShenandoahGenerationalYoungGenPercentage / 100; @@ -1144,7 +1264,7 @@ } else { log_trace(gc,ergo)("%s", msg.buffer()); } - return result; + return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE; } }; @@ -1159,7 +1279,12 @@ } virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) { + if (!is_minor_gc()) { + return ShenandoahPartialHeuristics::choose_collection_set(collection_set); + } + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); ShenandoahConnectionMatrix* matrix = heap->connection_matrix(); uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start(); @@ -1187,10 +1312,9 @@ guarantee(used >= prev_used, "Invariant"); size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_count); - for (uint to_idx = 0; to_idx < num_regions; to_idx++) { - ShenandoahHeapRegion* region = heap->get_region(to_idx); - region->set_root(false); - } + ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions(); + root_regions->clear(); + uint count = 0; for (uint i = 0; (i < sorted_count) && (count < target); i++) { @@ -1208,24 +1332,28 @@ } for (uint f = 0; f < from_idx_count; f++) { ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]); - if (!r->is_root()) { - r->set_root(true); - } + root_regions->add_region_check_for_duplicates(r); } } } + filter_regions(); collection_set->update_region_status(); log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT, num_regions, target, ShenandoahLRUOldGenPercentage, collection_set->count()); } - bool should_start_partial_gc() { + ShenandoahHeap::GCCycleMode should_start_traversal_gc() { + ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc(); + if (cycle_mode != ShenandoahHeap::NONE) { + return cycle_mode; + } + ShenandoahHeap* heap = ShenandoahHeap::heap(); if (heap->has_forwarded_objects()) { // Cannot start partial if heap is not completely updated. - return false; + return ShenandoahHeap::NONE; } size_t capacity = heap->capacity(); @@ -1234,7 +1362,7 @@ if (used < prev_used) { // Major collection must have happened, "used" data is unreliable, wait for update. - return false; + return ShenandoahHeap::NONE; } // For now don't start until we are 40% full @@ -1254,97 +1382,14 @@ } else { log_trace(gc,ergo)("%s", msg.buffer()); } - return result; + return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE; } }; -class ShenandoahTraversalHeuristics : public ShenandoahHeuristics { -public: - ShenandoahTraversalHeuristics() : ShenandoahHeuristics() { - FLAG_SET_DEFAULT(UseShenandoahMatrix, false); - FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); - FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, false); - FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false); - FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier, false); - FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true); - FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false); - FLAG_SET_DEFAULT(ShenandoahBarriersForConst, true); - FLAG_SET_DEFAULT(ShenandoahWriteBarrierRB, false); - FLAG_SET_DEFAULT(ShenandoahAllocImplicitLive, false); - FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false); - } - - virtual bool should_start_normal_gc() const { - return false; - } - - virtual bool is_experimental() { - return true; - } - - virtual bool is_diagnostic() { - return false; - } - - virtual bool can_do_traversal_gc() { - return true; - } - - virtual const char* name() { - return "traversal"; - } - - virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - for (size_t i = 0; i < heap->num_regions(); i++) { - ShenandoahHeapRegion* r = heap->get_region(i); - assert(!r->is_root(), "must not be root region"); - assert(!collection_set->is_in(r), "must not yet be in cset"); - if (r->is_regular() && r->used() > 0) { - size_t garbage_percent = r->garbage() * 100 / ShenandoahHeapRegion::region_size_bytes(); - if (garbage_percent > ShenandoahGarbageThreshold) { - collection_set->add_region(r); - } - } - heap->set_next_top_at_mark_start(r->bottom(), r->top()); - heap->set_complete_top_at_mark_start(r->bottom(), r->top()); // For debugging purposes - r->clear_live_data(); - } - collection_set->update_region_status(); - } - - virtual bool should_start_traversal_gc() { - - ShenandoahHeap* heap = ShenandoahHeap::heap(); - - if (heap->has_forwarded_objects()) return false; - - double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; - bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval); - if (periodic_gc) { - log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms", - last_time_ms, ShenandoahGuaranteedGCInterval); - return true; - } - - size_t capacity = heap->capacity(); - size_t used = heap->used(); - return 100 - (used * 100 / capacity) < ShenandoahFreeThreshold; - } - - virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, - RegionData* data, size_t data_size, - size_t free) { - ShouldNotReachHere(); - } -}; - - ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : _cycle_counter(0), _success_concurrent_gcs(0), - _success_partial_gcs(0), _success_degenerated_gcs(0), _success_full_gcs(0), _explicit_concurrent(0), @@ -1362,7 +1407,6 @@ _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer(); if (ShenandoahGCHeuristics != NULL) { - _minor_heuristics = NULL; if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { _heuristics = new ShenandoahAggressiveHeuristics(); } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { @@ -1374,14 +1418,11 @@ } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { _heuristics = new ShenandoahCompactHeuristics(); } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) { - _heuristics = new ShenandoahAdaptiveHeuristics(); - _minor_heuristics = new ShenandoahPartialConnectedHeuristics(); + _heuristics = new ShenandoahPartialConnectedHeuristics(); } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) { - _heuristics = new ShenandoahAdaptiveHeuristics(); - _minor_heuristics = new ShenandoahGenerationalPartialHeuristics(); + _heuristics = new ShenandoahGenerationalPartialHeuristics(); } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) { - _heuristics = new ShenandoahAdaptiveHeuristics(); - _minor_heuristics = new ShenandoahLRUPartialHeuristics(); + _heuristics = new ShenandoahLRUPartialHeuristics(); } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) { _heuristics = new ShenandoahTraversalHeuristics(); } else { @@ -1398,36 +1439,12 @@ err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", _heuristics->name())); } - if (_minor_heuristics != NULL && _minor_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { - vm_exit_during_initialization( - err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", - _minor_heuristics->name())); - } - if (_minor_heuristics != NULL && _minor_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { - vm_exit_during_initialization( - err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", - _minor_heuristics->name())); - } - if (ShenandoahConditionalSATBBarrier && ShenandoahSATBBarrier) { - vm_exit_during_initialization("Cannot use both ShenandoahSATBBarrier and ShenandoahConditionalSATBBarrier"); - } - if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValReadBarrier) { - vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValReadBarrier"); - } if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) { vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier"); } - if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValEnqueueBarrier) { - vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValEnqueueBarrier"); - } - if (_minor_heuristics != NULL) { - log_info(gc, init)("Shenandoah heuristics: %s minor with %s major", - _minor_heuristics->name(), _heuristics->name()); - } else { - log_info(gc, init)("Shenandoah heuristics: %s", - _heuristics->name()); - } + log_info(gc, init)("Shenandoah heuristics: %s", + _heuristics->name()); _heuristics->print_thresholds(); } else { ShouldNotReachHere(); @@ -1463,9 +1480,6 @@ void ShenandoahCollectorPolicy::post_heap_initialize() { _heuristics->initialize(); - if (_minor_heuristics != NULL) { - _minor_heuristics->initialize(); - } } void ShenandoahCollectorPolicy::record_explicit_to_concurrent() { @@ -1499,10 +1513,6 @@ _success_concurrent_gcs++; } -void ShenandoahCollectorPolicy::record_success_partial() { - _success_partial_gcs++; -} - void ShenandoahCollectorPolicy::record_success_degenerated() { _heuristics->record_success_degenerated(); _success_degenerated_gcs++; @@ -1522,16 +1532,10 @@ } bool ShenandoahCollectorPolicy::update_refs() { - if (_minor_heuristics != NULL && _minor_heuristics->update_refs()) { - return true; - } return _heuristics->update_refs(); } bool ShenandoahCollectorPolicy::should_start_update_refs() { - if (_minor_heuristics != NULL && _minor_heuristics->should_start_update_refs()) { - return true; - } return _heuristics->should_start_update_refs(); } @@ -1541,10 +1545,7 @@ void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, bool minor) { - if (minor) - _minor_heuristics->choose_collection_set(collection_set); - else - _heuristics->choose_collection_set(collection_set); + _heuristics->choose_collection_set(collection_set); } bool ShenandoahCollectorPolicy::should_process_references() { @@ -1563,23 +1564,7 @@ _heuristics->record_phase_time(phase, secs); } -bool ShenandoahCollectorPolicy::should_start_partial_gc() { - if (_minor_heuristics != NULL) { - return _minor_heuristics->should_start_partial_gc(); - } else { - return false; // no minor heuristics -> no partial gc - } -} - -bool ShenandoahCollectorPolicy::can_do_partial_gc() { - if (_minor_heuristics != NULL) { - return _minor_heuristics->can_do_partial_gc(); - } else { - return false; // no minor heuristics -> no partial gc - } -} - -bool ShenandoahCollectorPolicy::should_start_traversal_gc() { +ShenandoahHeap::GCCycleMode ShenandoahCollectorPolicy::should_start_traversal_gc() { return _heuristics->should_start_traversal_gc(); } @@ -1611,9 +1596,6 @@ out->print_cr("to avoid Degenerated and Full GC cycles."); out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " successful partial concurrent GCs", _success_partial_gcs); - out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs", _success_concurrent_gcs); out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_concurrent); out->cr(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp @@ -41,7 +41,6 @@ class ShenandoahCollectorPolicy: public CollectorPolicy { private: - size_t _success_partial_gcs; size_t _success_concurrent_gcs; size_t _success_degenerated_gcs; size_t _success_full_gcs; @@ -55,7 +54,6 @@ ShenandoahSharedFlag _in_shutdown; ShenandoahHeuristics* _heuristics; - ShenandoahHeuristics* _minor_heuristics; ShenandoahTracer* _tracer; size_t _cycle_counter; @@ -88,7 +86,6 @@ void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs); - void record_success_partial(); void record_success_concurrent(); void record_success_degenerated(); void record_success_full(); @@ -99,9 +96,7 @@ void record_explicit_to_full(); bool should_start_normal_gc(); - bool should_start_partial_gc(); - bool can_do_partial_gc(); - bool should_start_traversal_gc(); + ShenandoahHeap::GCCycleMode should_start_traversal_gc(); bool can_do_traversal_gc(); // Returns true when there should be a separate concurrent reference diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp @@ -310,7 +310,6 @@ update_code_cache = ShenandoahConcurrentEvacCodeRoots; break; case ShenandoahPhaseTimings::full_gc_roots: - case ShenandoahPhaseTimings::final_partial_gc_work: update_code_cache = true; break; default: @@ -752,8 +751,6 @@ void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { ShenandoahHeap* sh = ShenandoahHeap::heap(); - assert(!sh->is_concurrent_partial_in_progress(), "cannot process weakrefs during conc-partial yet"); - ReferenceProcessor* rp = sh->ref_processor(); ShenandoahPhaseTimings::Phase phase_process = diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.inline.hpp @@ -42,6 +42,7 @@ shenandoah_assert_marked_next(NULL, obj); shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_concgc()); + cl->set_base_object(obj); if (task->is_not_chunked()) { if (COUNT_LIVENESS) count_liveness(live_data, obj); if (obj->is_instance()) { @@ -62,6 +63,7 @@ // Case 4: Array chunk, has sensible chunk id. Process it. do_chunked_array(q, cl, obj, task->chunk(), task->pow()); } + cl->set_base_object(NULL); } inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.cpp @@ -30,7 +30,6 @@ #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" @@ -122,15 +121,15 @@ cause = _explicit_gc_cause; } else { // Potential normal cycle: ask heuristics if it wants to act - if (policy->should_start_partial_gc()) { - mode = concurrent_partial; - cause = GCCause::_shenandoah_partial_gc; - } else if (policy->should_start_traversal_gc()) { + ShenandoahHeap::GCCycleMode traversal_mode = policy->should_start_traversal_gc(); + if (traversal_mode != ShenandoahHeap::NONE) { mode = concurrent_traversal; cause = GCCause::_shenandoah_traversal_gc; + heap->set_cycle_mode(traversal_mode); } else if (policy->should_start_normal_gc()) { mode = concurrent_normal; cause = GCCause::_shenandoah_concurrent_gc; + heap->set_cycle_mode(ShenandoahHeap::MAJOR); } // Ask policy if this cycle wants to process references or unload classes @@ -158,9 +157,6 @@ switch (mode) { case none: break; - case concurrent_partial: - service_concurrent_partial_cycle(cause); - break; case concurrent_traversal: service_concurrent_traversal_cycle(cause); break; @@ -177,6 +173,8 @@ ShouldNotReachHere(); } + heap->set_cycle_mode(ShenandoahHeap::NONE); + if (gc_requested) { heap->set_used_at_last_gc(); @@ -242,51 +240,25 @@ } } -void ShenandoahConcurrentThread::service_concurrent_partial_cycle(GCCause::Cause cause) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahPartialGC* partial_gc = heap->partial_gc(); - - if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; - - GCIdMark gc_id_mark; - ShenandoahGCSession session; - - TraceCollectorStats tcs(heap->monitoring_support()->partial_collection_counters()); - - heap->vmop_entry_init_partial(); - if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return; - - if (!partial_gc->has_work()) return; - - heap->entry_partial(); - if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return; - - heap->vmop_entry_final_partial(); - if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_partial)) return; - - heap->entry_cleanup(); - - heap->shenandoahPolicy()->record_success_partial(); -} - void ShenandoahConcurrentThread::service_concurrent_traversal_cycle(GCCause::Cause cause) { GCIdMark gc_id_mark; ShenandoahGCSession session; ShenandoahHeap* heap = ShenandoahHeap::heap(); - TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); + bool is_minor = heap->is_minor_gc(); + TraceCollectorStats tcs(is_minor ? heap->monitoring_support()->partial_collection_counters() + : heap->monitoring_support()->concurrent_collection_counters()); heap->vmop_entry_init_traversal(); if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; heap->entry_traversal(); - if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_traversal)) return; heap->vmop_entry_final_traversal(); - heap->entry_cleanup_bitmaps(); + heap->entry_cleanup_traversal(); heap->shenandoahPolicy()->record_success_concurrent(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentThread.hpp @@ -49,7 +49,6 @@ private: typedef enum { none, - concurrent_partial, concurrent_traversal, concurrent_normal, stw_degenerated, @@ -82,7 +81,6 @@ void service_concurrent_normal_cycle(GCCause::Cause cause); void service_stw_full_cycle(GCCause::Cause cause); void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point); - void service_concurrent_partial_cycle(GCCause::Cause cause); void service_concurrent_traversal_cycle(GCCause::Cause cause); bool try_set_alloc_failure_gc(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -25,6 +25,8 @@ #include "logging/logStream.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), @@ -183,7 +185,8 @@ // We're updating TAMS for evacuation-allocs, such that we will not // treat evacuated objects as implicitely live and traverse through them. // See top of shenandoahTraversal.cpp for an explanation. - _heap->set_next_top_at_mark_start(r->bottom(), r->end()); + _heap->set_next_top_at_mark_start(r->bottom(), r->top()); + _heap->traversal_gc()->traversal_set()->add_region_check_for_duplicates(r); OrderAccess::fence(); } break; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -45,7 +45,6 @@ #include "gc/shenandoah/shenandoahMemoryPool.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahPacer.hpp" #include "gc/shenandoah/shenandoahPacer.inline.hpp" #include "gc/shenandoah/shenandoahRootProcessor.hpp" @@ -202,6 +201,119 @@ assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, "misaligned heap: "PTR_FORMAT, p2i(base())); + // The call below uses stuff (the SATB* things) that are in G1, but probably + // belong into a shared location. + ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, + SATB_Q_FL_lock, + 20 /*G1SATBProcessCompletedThreshold */, + Shared_SATB_Q_lock); + + // Reserve space for prev and next bitmap. + size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); + _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); + _bitmap_size = align_up(_bitmap_size, bitmap_page_size); + _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); + + size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); + + guarantee(bitmap_bytes_per_region != 0, + "Bitmap bytes per region should not be zero"); + guarantee(is_power_of_2(bitmap_bytes_per_region), + "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); + + if (bitmap_page_size > bitmap_bytes_per_region) { + _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; + _bitmap_bytes_per_slice = bitmap_page_size; + } else { + _bitmap_regions_per_slice = 1; + _bitmap_bytes_per_slice = bitmap_bytes_per_region; + } + + guarantee(_bitmap_regions_per_slice >= 1, + "Should have at least one region per slice: " SIZE_FORMAT, + _bitmap_regions_per_slice); + + guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, + "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, + _bitmap_bytes_per_slice, bitmap_page_size); + + ReservedSpace bitmap0(_bitmap_size, bitmap_page_size); + MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); + _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); + + ReservedSpace bitmap1(_bitmap_size, bitmap_page_size); + MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC); + _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize); + + size_t bitmap_init_commit = _bitmap_bytes_per_slice * + align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; + bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); + os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false, + "couldn't allocate initial bitmap"); + os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false, + "couldn't allocate initial bitmap"); + + size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); + + if (ShenandoahVerify) { + ReservedSpace verify_bitmap(_bitmap_size, page_size); + os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false, + "couldn't allocate verification bitmap"); + MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); + MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); + _verification_bit_map.initialize(_heap_region, verify_bitmap_region); + _verifier = new ShenandoahVerifier(this, &_verification_bit_map); + } + + if (ShenandoahAlwaysPreTouch) { + assert (!AlwaysPreTouch, "Should have been overridden"); + + // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, + // before initialize() below zeroes it with initializing thread. For any given region, + // we touch the region and the corresponding bitmaps from the same thread. + + log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages", + _num_regions, page_size); + ShenandoahPretouchTask cl(region_iterator(), bitmap0.base(), bitmap1.base(), _bitmap_size, page_size); + _workers->run_task(&cl); + } + + _mark_bit_map0.initialize(_heap_region, _bitmap0_region); + _complete_mark_bit_map = &_mark_bit_map0; + + _mark_bit_map1.initialize(_heap_region, _bitmap1_region); + _next_mark_bit_map = &_mark_bit_map1; + + // Reserve aux bitmap for use in object_iterate(). We don't commit it here. + ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); + MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); + _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); + _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); + + if (UseShenandoahMatrix) { + _connection_matrix = new ShenandoahConnectionMatrix(_num_regions); + } else { + _connection_matrix = NULL; + } + + _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ? + new ShenandoahTraversalGC(this, _num_regions) : + NULL; + + _monitoring_support = new ShenandoahMonitoringSupport(this); + + _phase_timings = new ShenandoahPhaseTimings(); + + if (ShenandoahAllocationTrace) { + _alloc_tracker = new ShenandoahAllocTracker(); + } + + ShenandoahStringDedup::initialize(); + + _concurrent_gc_thread = new ShenandoahConcurrentThread(); + + ShenandoahCodeRoots::initialize(); + LogTarget(Trace, gc, region) lt; if (lt.is_enabled()) { ResourceMark rm; @@ -212,123 +324,6 @@ _free_set->print_on(&ls); } - // The call below uses stuff (the SATB* things) that are in G1, but probably - // belong into a shared location. - ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, - SATB_Q_FL_lock, - 20 /*G1SATBProcessCompletedThreshold */, - Shared_SATB_Q_lock); - - // Reserve space for prev and next bitmap. - size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); - _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); - _bitmap_size = align_up(_bitmap_size, bitmap_page_size); - _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); - - size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); - - guarantee(bitmap_bytes_per_region != 0, - "Bitmap bytes per region should not be zero"); - guarantee(is_power_of_2(bitmap_bytes_per_region), - "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); - - if (bitmap_page_size > bitmap_bytes_per_region) { - _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; - _bitmap_bytes_per_slice = bitmap_page_size; - } else { - _bitmap_regions_per_slice = 1; - _bitmap_bytes_per_slice = bitmap_bytes_per_region; - } - - guarantee(_bitmap_regions_per_slice >= 1, - "Should have at least one region per slice: " SIZE_FORMAT, - _bitmap_regions_per_slice); - - guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, - "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, - _bitmap_bytes_per_slice, bitmap_page_size); - - ReservedSpace bitmap0(_bitmap_size, bitmap_page_size); - MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); - _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); - - ReservedSpace bitmap1(_bitmap_size, bitmap_page_size); - MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC); - _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize); - - size_t bitmap_init_commit = _bitmap_bytes_per_slice * - align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; - bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); - os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false, - "couldn't allocate initial bitmap"); - os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false, - "couldn't allocate initial bitmap"); - - size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); - - if (ShenandoahVerify) { - ReservedSpace verify_bitmap(_bitmap_size, page_size); - os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false, - "couldn't allocate verification bitmap"); - MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); - MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); - _verification_bit_map.initialize(_heap_region, verify_bitmap_region); - _verifier = new ShenandoahVerifier(this, &_verification_bit_map); - } - - if (ShenandoahAlwaysPreTouch) { - assert (!AlwaysPreTouch, "Should have been overridden"); - - // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, - // before initialize() below zeroes it with initializing thread. For any given region, - // we touch the region and the corresponding bitmaps from the same thread. - - log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages", - _num_regions, page_size); - ShenandoahPretouchTask cl(region_iterator(), bitmap0.base(), bitmap1.base(), _bitmap_size, page_size); - _workers->run_task(&cl); - } - - _mark_bit_map0.initialize(_heap_region, _bitmap0_region); - _complete_mark_bit_map = &_mark_bit_map0; - - _mark_bit_map1.initialize(_heap_region, _bitmap1_region); - _next_mark_bit_map = &_mark_bit_map1; - - // Reserve aux bitmap for use in object_iterate(). We don't commit it here. - ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); - MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); - _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); - _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); - - if (UseShenandoahMatrix) { - _connection_matrix = new ShenandoahConnectionMatrix(_num_regions); - } else { - _connection_matrix = NULL; - } - - _partial_gc = _shenandoah_policy->can_do_partial_gc() ? - new ShenandoahPartialGC(this, _num_regions) : - NULL; - - _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ? - new ShenandoahTraversalGC(this, _num_regions) : - NULL; - - _monitoring_support = new ShenandoahMonitoringSupport(this); - - _phase_timings = new ShenandoahPhaseTimings(); - - if (ShenandoahAllocationTrace) { - _alloc_tracker = new ShenandoahAllocTracker(); - } - - ShenandoahStringDedup::initialize(); - - _concurrent_gc_thread = new ShenandoahConcurrentThread(); - - ShenandoahCodeRoots::initialize(); - log_info(gc, init)("Safepointing mechanism: %s", SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" : (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown")); @@ -361,6 +356,7 @@ _alloc_seq_at_last_gc_start(0), _alloc_seq_at_last_gc_end(0), _safepoint_workers(NULL), + _gc_cycle_mode(), #ifdef ASSERT _heap_expansion_count(0), #endif @@ -430,6 +426,51 @@ _workers->run_task(&task); } +class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask { +private: + ShenandoahRegionIterator _regions; + +public: + ShenandoahResetNextBitmapTraversalTask(ShenandoahRegionIterator regions) : + AbstractGangTask("Parallel Reset Bitmap Task for Traversal"), + _regions(regions) { + } + + void work(uint worker_id) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set(); + ShenandoahHeapRegion* region = _regions.next(); + while (region != NULL) { + if (heap->is_bitmap_slice_committed(region)) { + if (traversal_set->is_in(region) && !region->is_trash()) { + ShenandoahHeapLocker locker(heap->lock()); + HeapWord* bottom = region->bottom(); + HeapWord* top = heap->next_top_at_mark_start(bottom); + assert(top <= region->top(), + "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT, + p2i(top), p2i(region->top())); + if (top > bottom) { + heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top)); + heap->set_complete_top_at_mark_start(bottom, top); + heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); + heap->set_next_top_at_mark_start(bottom, bottom); + } + } + assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()), + "need clear next bitmap"); + } + region = _regions.next(); + } + } +}; + +void ShenandoahHeap::reset_next_mark_bitmap_traversal() { + assert_gc_workers(_workers->active_workers()); + + ShenandoahResetNextBitmapTraversalTask task(region_iterator()); + _workers->run_task(&task); +} + bool ShenandoahHeap::is_next_bitmap_clear() { for (size_t idx = 0; idx < _num_regions; idx++) { ShenandoahHeapRegion* r = get_region(idx); @@ -460,7 +501,6 @@ if (is_concurrent_mark_in_progress()) st->print("marking, "); if (is_evacuation_in_progress()) st->print("evacuating, "); if (is_update_refs_in_progress()) st->print("updating refs, "); - if (is_concurrent_partial_in_progress()) st->print("partial, "); if (is_concurrent_traversal_in_progress()) st->print("traversal, "); if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); if (is_full_gc_in_progress()) st->print("full gc, "); @@ -800,8 +840,7 @@ shenandoah_assert_marked_complete(p, obj); oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); if (oopDesc::unsafe_equals(resolved, obj)) { - bool evac; - resolved = _heap->evacuate_object(obj, _thread, evac); + resolved = _heap->evacuate_object(obj, _thread); } RawAccess::oop_store(p, resolved); } @@ -835,8 +874,7 @@ if (_heap->in_collection_set(obj)) { oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); if (oopDesc::unsafe_equals(resolved, obj)) { - bool evac; - _heap->evacuate_object(obj, _thread, evac); + _heap->evacuate_object(obj, _thread); } } } @@ -862,8 +900,7 @@ void do_object(oop p) { shenandoah_assert_marked_complete(NULL, p); if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) { - bool evac; - _heap->evacuate_object(p, _thread, evac); + _heap->evacuate_object(p, _thread); } } }; @@ -1613,6 +1650,19 @@ shenandoahPolicy()->record_peak_occupancy(); } +void ShenandoahHeap::op_cleanup_traversal() { + + { + ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps); + reset_next_mark_bitmap_traversal(); + } + + op_cleanup(); + + // Allocations happen during bitmap cleanup, record peak after the phase: + shenandoahPolicy()->record_peak_occupancy(); +} + void ShenandoahHeap::op_preclean() { concurrentMark()->preclean_weak_refs(); @@ -1620,18 +1670,6 @@ shenandoahPolicy()->record_peak_occupancy(); } -void ShenandoahHeap::op_init_partial() { - partial_gc()->init_partial_collection(); -} - -void ShenandoahHeap::op_partial() { - partial_gc()->concurrent_partial_collection(); -} - -void ShenandoahHeap::op_final_partial() { - partial_gc()->final_partial_collection(); -} - void ShenandoahHeap::op_init_traversal() { traversal_gc()->init_traversal_collection(); } @@ -1658,7 +1696,6 @@ size_t used_before = used(); switch (point) { - case _degenerated_partial: case _degenerated_evac: // Not possible to degenerate from here, upgrade to Full GC right away. cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc); @@ -1680,7 +1717,7 @@ collection_set()->clear(); } op_final_traversal(); - op_cleanup_bitmaps(); + op_cleanup_traversal(); return; case _degenerated_outside_cycle: @@ -1820,12 +1857,6 @@ ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); } -void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) { - - set_gc_state_mask(PARTIAL | HAS_FORWARDED, in_progress); - ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); -} - void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) { set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress); ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); @@ -2197,10 +2228,6 @@ return _connection_matrix; } -ShenandoahPartialGC* ShenandoahHeap::partial_gc() { - return _partial_gc; -} - ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() { return _traversal_gc; } @@ -2513,26 +2540,6 @@ VMThread::execute(&op); } -void ShenandoahHeap::vmop_entry_init_partial() { - TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); - ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); - ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_partial_gc_gross); - - try_inject_alloc_failure(); - VM_ShenandoahInitPartialGC op; - VMThread::execute(&op); -} - -void ShenandoahHeap::vmop_entry_final_partial() { - TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); - ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); - ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_partial_gc_gross); - - try_inject_alloc_failure(); - VM_ShenandoahFinalPartialGC op; - VMThread::execute(&op); -} - void ShenandoahHeap::vmop_entry_init_traversal() { TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross); @@ -2641,32 +2648,6 @@ op_final_updaterefs(); } -void ShenandoahHeap::entry_init_partial() { - ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); - ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_partial_gc); - - static const char* msg = "Pause Init Partial"; - GCTraceTime(Info, gc) time(msg, gc_timer()); - EventMark em("%s", msg); - - ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_partial()); - - op_init_partial(); -} - -void ShenandoahHeap::entry_final_partial() { - ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); - ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_partial_gc); - - static const char* msg = "Pause Final Partial"; - GCTraceTime(Info, gc) time(msg, gc_timer()); - EventMark em("%s", msg); - - ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_partial()); - - op_final_partial(); -} - void ShenandoahHeap::entry_init_traversal() { ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause); ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc); @@ -2777,6 +2758,19 @@ op_cleanup(); } +void ShenandoahHeap::entry_cleanup_traversal() { + ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup); + + static const char* msg = "Concurrent cleanup"; + GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true); + EventMark em("%s", msg); + + ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal()); + + try_inject_alloc_failure(); + op_cleanup_traversal(); +} + void ShenandoahHeap::entry_cleanup_bitmaps() { ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup); @@ -2805,25 +2799,13 @@ } } -void ShenandoahHeap::entry_partial() { - static const char* msg = "Concurrent partial"; - GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true); - EventMark em("%s", msg); - - TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); - - ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_partial()); - - try_inject_alloc_failure(); - op_partial(); -} - void ShenandoahHeap::entry_traversal() { static const char* msg = "Concurrent traversal"; GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true); EventMark em("%s", msg); - TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); + TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters() + : monitoring_support()->concurrent_collection_counters()); ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal()); @@ -2903,6 +2885,18 @@ } } +bool ShenandoahHeap::is_minor_gc() const { + return _gc_cycle_mode.get() == MINOR; +} + +bool ShenandoahHeap::is_major_gc() const { + return _gc_cycle_mode.get() == MAJOR; +} + +void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) { + _gc_cycle_mode.set(gc_cycle_mode); +} + char ShenandoahHeap::gc_state() const { return _gc_state.raw_value(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -38,6 +38,7 @@ class ShenandoahAllocTracker; class ShenandoahCollectorPolicy; class ShenandoahConnectionMatrix; +class ShenandoahFastRegionSet; class ShenandoahPhaseTimings; class ShenandoahHeap; class ShenandoahHeapRegion; @@ -47,7 +48,6 @@ class ShenandoahFreeSet; class ShenandoahConcurrentMark; class ShenandoahMarkCompact; -class ShenandoahPartialGC; class ShenandoahPacer; class ShenandoahTraversalGC; class ShenandoahVerifier; @@ -179,11 +179,8 @@ // Heap is under updating: needs SVRB/SVWB barriers. UPDATEREFS_BITPOS = 3, - // Heap is under partial collection - PARTIAL_BITPOS = 4, - // Heap is under traversal collection - TRAVERSAL_BITPOS = 5, + TRAVERSAL_BITPOS = 4, }; enum GCState { @@ -192,13 +189,11 @@ MARKING = 1 << MARKING_BITPOS, EVACUATION = 1 << EVACUATION_BITPOS, UPDATEREFS = 1 << UPDATEREFS_BITPOS, - PARTIAL = 1 << PARTIAL_BITPOS, TRAVERSAL = 1 << TRAVERSAL_BITPOS, }; enum ShenandoahDegenPoint { _degenerated_unset, - _degenerated_partial, _degenerated_traversal, _degenerated_outside_cycle, _degenerated_mark, @@ -207,12 +202,16 @@ _DEGENERATED_LIMIT, }; + enum GCCycleMode { + NONE, + MINOR, + MAJOR + }; + static const char* degen_point_to_string(ShenandoahDegenPoint point) { switch (point) { case _degenerated_unset: return ""; - case _degenerated_partial: - return "Partial"; case _degenerated_traversal: return "Traversal"; case _degenerated_outside_cycle: @@ -250,7 +249,6 @@ ShenandoahConcurrentMark* _scm; ShenandoahMarkCompact* _full_gc; - ShenandoahPartialGC* _partial_gc; ShenandoahTraversalGC* _traversal_gc; ShenandoahVerifier* _verifier; ShenandoahPacer* _pacer; @@ -314,6 +312,8 @@ ShenandoahEvacOOMHandler _oom_evac_handler; + ShenandoahSharedEnumFlag _gc_cycle_mode; + #ifdef ASSERT int _heap_expansion_count; #endif @@ -404,7 +404,7 @@ inline bool requires_marking(const void* entry) const; template - inline oop evac_update_with_forwarded(T* p, bool &evac); + inline oop evac_update_with_forwarded(T* p); template inline oop maybe_update_with_forwarded(T* p); @@ -439,7 +439,6 @@ void set_degenerated_gc_in_progress(bool in_progress); void set_full_gc_in_progress(bool in_progress); void set_full_gc_move_in_progress(bool in_progress); - void set_concurrent_partial_in_progress(bool in_progress); void set_concurrent_traversal_in_progress(bool in_progress); void set_has_forwarded_objects(bool cond); @@ -454,7 +453,6 @@ inline bool is_degenerated_gc_in_progress() const; inline bool is_full_gc_in_progress() const; inline bool is_full_gc_move_in_progress() const; - inline bool is_concurrent_partial_in_progress() const; inline bool is_concurrent_traversal_in_progress() const; inline bool has_forwarded_objects() const; inline bool is_gc_in_progress_mask(uint mask) const; @@ -464,6 +462,10 @@ bool process_references() const; bool unload_classes() const; + bool is_minor_gc() const; + bool is_major_gc() const; + void set_cycle_mode(GCCycleMode gc_cycle_mode); + inline bool region_in_collection_set(size_t region_index) const; // Mainly there to avoid accidentally calling the templated @@ -483,9 +485,8 @@ // Evacuates object src. Returns the evacuated object if this thread // succeeded, otherwise rolls back the evacuation and returns the - // evacuated object by the competing thread. 'succeeded' is an out - // param and set to true if this thread succeeded, otherwise to false. - inline oop evacuate_object(oop src, Thread* thread, bool& evacuated); + // evacuated object by the competing thread. + inline oop evacuate_object(oop src, Thread* thread); inline bool cancelled_concgc() const; inline bool check_cancelled_concgc_and_yield(bool sts_active = true); inline bool try_cancel_concgc(); @@ -515,6 +516,7 @@ void handle_heap_shrinkage(double shrink_before); void reset_next_mark_bitmap(); + void reset_next_mark_bitmap_traversal(); MarkBitMap* complete_mark_bit_map(); MarkBitMap* next_mark_bit_map(); @@ -547,7 +549,6 @@ ShenandoahMonitoringSupport* monitoring_support(); ShenandoahConcurrentMark* concurrentMark() { return _scm; } ShenandoahMarkCompact* full_gc() { return _full_gc; } - ShenandoahPartialGC* partial_gc(); ShenandoahTraversalGC* traversal_gc(); ShenandoahVerifier* verifier(); ShenandoahPacer* pacer() const; @@ -687,8 +688,6 @@ void vmop_entry_final_evac(); void vmop_entry_init_updaterefs(); void vmop_entry_final_updaterefs(); - void vmop_entry_init_partial(); - void vmop_entry_final_partial(); void vmop_entry_init_traversal(); void vmop_entry_final_traversal(); void vmop_entry_full(GCCause::Cause cause); @@ -701,8 +700,6 @@ void entry_final_evac(); void entry_init_updaterefs(); void entry_final_updaterefs(); - void entry_init_partial(); - void entry_final_partial(); void entry_init_traversal(); void entry_final_traversal(); void entry_full(GCCause::Cause cause); @@ -714,9 +711,9 @@ void entry_preclean(); void entry_cleanup(); void entry_cleanup_bitmaps(); + void entry_cleanup_traversal(); void entry_evac(); void entry_updaterefs(); - void entry_partial(); void entry_traversal(); private: @@ -726,8 +723,6 @@ void op_final_evac(); void op_init_updaterefs(); void op_final_updaterefs(); - void op_init_partial(); - void op_final_partial(); void op_init_traversal(); void op_final_traversal(); void op_full(GCCause::Cause cause); @@ -741,7 +736,7 @@ void op_evac(); void op_updaterefs(); void op_cleanup_bitmaps(); - void op_partial(); + void op_cleanup_traversal(); void op_traversal(); private: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -36,7 +36,7 @@ #include "gc/shenandoah/shenandoahConcurrentThread.hpp" #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" -#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" @@ -132,15 +132,14 @@ } template -inline oop ShenandoahHeap::evac_update_with_forwarded(T* p, bool &evac) { - evac = false; +inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop heap_oop = CompressedOops::decode_not_null(o); if (in_collection_set(heap_oop)) { oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); if (oopDesc::unsafe_equals(forwarded_oop, heap_oop)) { - forwarded_oop = evacuate_object(heap_oop, Thread::current(), evac); + forwarded_oop = evacuate_object(heap_oop, Thread::current()); } oop prev = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop); if (oopDesc::unsafe_equals(prev, heap_oop)) { @@ -273,9 +272,7 @@ } } -inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) { - evacuated = false; - +inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { if (ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) { // This thread went through the OOM during evac protocol and it is safe to return // the forward pointer. It must not attempt to evacuate any more. @@ -330,7 +327,6 @@ if (oopDesc::unsafe_equals(result, p)) { // Successfully evacuated. Our copy is now the public one! - evacuated = true; log_develop_trace(gc, compaction)("Copy object: " PTR_FORMAT " -> " PTR_FORMAT " succeeded", p2i(p), p2i(copy)); @@ -391,17 +387,13 @@ } inline bool ShenandoahHeap::is_idle() const { - return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | PARTIAL | TRAVERSAL); + return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS | TRAVERSAL); } inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { return _gc_state.is_set(MARKING); } -inline bool ShenandoahHeap::is_concurrent_partial_in_progress() const { - return _gc_state.is_set(PARTIAL); -} - inline bool ShenandoahHeap::is_concurrent_traversal_in_progress() const { return _gc_state.is_set(TRAVERSAL); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -24,10 +24,12 @@ #include "precompiled.hpp" #include "memory/allocation.hpp" #include "gc/shenandoah/brooksPointer.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" #include "gc/shenandoah/shenandoahConnectionMatrix.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" +#include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "gc/shared/space.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" @@ -59,7 +61,6 @@ _gclab_allocs(0), _shared_allocs(0), _reserved(MemRegion(start, size_words)), - _root(false), _new_top(NULL), _seqnum_first_alloc_mutator(0), _seqnum_last_alloc_mutator(0), @@ -435,7 +436,7 @@ st->print("|G %3d%%", (int) ((double) get_gclab_allocs() * 100 / capacity())); st->print("|S %3d%%", (int) ((double) get_shared_allocs() * 100 / capacity())); st->print("|L %3d%%", (int) ((double) get_live_data_bytes() * 100 / capacity())); - if (is_root()) { + if (_heap->traversal_gc() != NULL && _heap->traversal_gc()->root_regions()->is_in(region_number())) { st->print("|R"); } else { st->print("| "); @@ -505,16 +506,27 @@ ContiguousSpace::mangle_unused_area_complete(); } clear_live_data(); - _root = false; reset_alloc_metadata(); // Reset C-TAMS pointer to ensure size-based iteration, everything // in that regions is going to be new objects. - _heap->set_complete_top_at_mark_start(bottom(), bottom()); + if (ShenandoahRecycleClearsBitmap && !_heap->is_full_gc_in_progress()) { + HeapWord* r_bottom = bottom(); + HeapWord* top = _heap->complete_top_at_mark_start(r_bottom); + if (top > r_bottom) { + _heap->complete_mark_bit_map()->clear_range_large(MemRegion(r_bottom, top)); + } + + assert(_heap->is_next_bitmap_clear_range(bottom(), end()), "must be clear"); + _heap->set_next_top_at_mark_start(bottom(), bottom()); + } + // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region. assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear"); + _heap->set_complete_top_at_mark_start(bottom(), bottom()); + if (UseShenandoahMatrix) { _heap->connection_matrix()->clear_region(region_number()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -221,8 +221,6 @@ size_t _gclab_allocs; size_t _shared_allocs; - bool _root; - HeapWord* _new_top; size_t _critical_pins; @@ -376,13 +374,6 @@ void set_new_top(HeapWord* new_top) { _new_top = new_top; } HeapWord* new_top() const { return _new_top; } - void set_root(bool r) { - _root = r; - } - bool is_root() const { - return _root; - } - inline void adjust_alloc_metadata(ShenandoahHeap::AllocType type, size_t); void reset_alloc_metadata_to_shared(); void reset_alloc_metadata(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp @@ -36,7 +36,6 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahRootProcessor.hpp" #include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" @@ -129,12 +128,6 @@ } assert(!heap->is_update_refs_in_progress(), "sanity"); - // a3. Cancel concurrent partial GC, if in progress - if (heap->is_concurrent_partial_in_progress()) { - heap->partial_gc()->reset(); - heap->set_concurrent_partial_in_progress(false); - } - // a3. Cancel concurrent traversal GC, if in progress if (heap->is_concurrent_traversal_in_progress()) { heap->traversal_gc()->reset(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.hpp @@ -47,6 +47,8 @@ template void work(T *p); + + inline void set_base_object(oop obj) { /* Not needed */ } }; class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure { @@ -210,35 +212,18 @@ virtual void do_oop(oop* p) { do_oop_nv(p); } }; -class ShenandoahPartialEvacuateUpdateHeapClosure : public ExtendedOopClosure { -private: - ShenandoahPartialGC* _partial_gc; - Thread* _thread; - ShenandoahObjToScanQueue* _queue; -public: - ShenandoahPartialEvacuateUpdateHeapClosure(ShenandoahObjToScanQueue* q) : - _partial_gc(ShenandoahHeap::heap()->partial_gc()), - _thread(Thread::current()), _queue(q) {} - - template - void do_oop_nv(T* p); - - void do_oop(oop* p) { do_oop_nv(p); } - void do_oop(narrowOop* p) { do_oop_nv(p); } -}; - class ShenandoahTraversalSuperClosure : public MetadataAwareOopClosure { private: ShenandoahTraversalGC* _traversal_gc; Thread* _thread; ShenandoahObjToScanQueue* _queue; ShenandoahStrDedupQueue* _dedup_queue; - + oop _base_obj; protected: ShenandoahTraversalSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : MetadataAwareOopClosure(rp), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), - _thread(Thread::current()), _queue(q), _dedup_queue(NULL) { + _thread(Thread::current()), _queue(q), _dedup_queue(NULL), _base_obj(NULL) { } ShenandoahTraversalSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp, ShenandoahStrDedupQueue* dq) : @@ -247,8 +232,13 @@ _thread(Thread::current()), _queue(q), _dedup_queue(dq) { } - template + template void work(T* p); + +public: + inline void set_base_object(oop obj) { + _base_obj = obj; + } }; class ShenandoahTraversalClosure : public ShenandoahTraversalSuperClosure { @@ -257,7 +247,7 @@ ShenandoahTraversalSuperClosure(q, rp) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -272,7 +262,7 @@ ShenandoahTraversalSuperClosure(q, rp) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -287,7 +277,7 @@ ShenandoahTraversalSuperClosure(q, rp, dq) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -302,7 +292,7 @@ ShenandoahTraversalSuperClosure(q, rp, dq) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -317,7 +307,7 @@ ShenandoahTraversalSuperClosure(q, rp) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -332,7 +322,7 @@ ShenandoahTraversalSuperClosure(q, rp) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -347,7 +337,7 @@ ShenandoahTraversalSuperClosure(q, rp, dq) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } @@ -362,7 +352,127 @@ ShenandoahTraversalSuperClosure(q, rp, dq) {} template - inline void do_oop_nv(T* p) { work(p); } + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return true; } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return false; } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalMetadataMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return true; } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalDedupMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalDedupMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp, ShenandoahStrDedupQueue* dq) : + ShenandoahTraversalSuperClosure(q, rp, dq) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return false; } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataDedupMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalMetadataDedupMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp, ShenandoahStrDedupQueue* dq) : + ShenandoahTraversalSuperClosure(q, rp, dq) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return true; } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalDegenMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalDegenMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return false; } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataDegenMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalMetadataDegenMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : + ShenandoahTraversalSuperClosure(q, rp) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return true; } + virtual bool do_metadata() { return true; } +}; + +class ShenandoahTraversalDedupDegenMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalDedupDegenMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp, ShenandoahStrDedupQueue* dq) : + ShenandoahTraversalSuperClosure(q, rp, dq) {} + + template + inline void do_oop_nv(T* p) { work(p); } + + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + virtual void do_oop(oop* p) { do_oop_nv(p); } + + inline bool do_metadata_nv() { return false; } + virtual bool do_metadata() { return false; } +}; + +class ShenandoahTraversalMetadataDedupDegenMatrixClosure : public ShenandoahTraversalSuperClosure { +public: + ShenandoahTraversalMetadataDedupDegenMatrixClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp, ShenandoahStrDedupQueue* dq) : + ShenandoahTraversalSuperClosure(q, rp, dq) {} + + template + inline void do_oop_nv(T* p) { work(p); } virtual void do_oop(narrowOop* p) { do_oop_nv(p); } virtual void do_oop(oop* p) { do_oop_nv(p); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOopClosures.inline.hpp @@ -26,7 +26,6 @@ #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" -#include "gc/shenandoah/shenandoahPartialGC.inline.hpp" #include "gc/shenandoah/shenandoahTraversalGC.inline.hpp" template @@ -44,14 +43,9 @@ } } -template -inline void ShenandoahPartialEvacuateUpdateHeapClosure::do_oop_nv(T* p) { - _partial_gc->process_oop(p, _thread, _queue); -} - -template +template inline void ShenandoahTraversalSuperClosure::work(T* p) { - _traversal_gc->process_oop(p, _thread, _queue, _dedup_queue); + _traversal_gc->process_oop(p, _thread, _queue, _base_obj, _dedup_queue); } #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.cpp deleted file mode 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.cpp +++ /dev/null @@ -1,573 +0,0 @@ -/* - * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" - -#include "gc/shared/gcTraceTime.inline.hpp" -#include "gc/shared/workgroup.hpp" -#include "gc/shared/taskqueue.inline.hpp" -#include "gc/shenandoah/shenandoahBarrierSet.hpp" -#include "gc/shenandoah/shenandoahCollectionSet.hpp" -#include "gc/shenandoah/shenandoahCollectorPolicy.hpp" -#include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp" -#include "gc/shenandoah/shenandoahFreeSet.hpp" -#include "gc/shenandoah/shenandoahPhaseTimings.hpp" -#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" -#include "gc/shenandoah/shenandoahHeap.hpp" -#include "gc/shenandoah/shenandoahHeap.inline.hpp" -#include "gc/shenandoah/shenandoahOopClosures.inline.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" -#include "gc/shenandoah/shenandoahRootProcessor.hpp" -#include "gc/shenandoah/shenandoahTaskqueue.hpp" -#include "gc/shenandoah/shenandoahUtils.hpp" -#include "gc/shenandoah/shenandoahVerifier.hpp" -#include "gc/shenandoah/shenandoahWorkGroup.hpp" -#include "gc/shenandoah/shenandoahWorkerPolicy.hpp" - -#include "memory/iterator.hpp" -#include "runtime/safepoint.hpp" - -class ShenandoahPartialEvacuateUpdateRootsClosure : public OopClosure { - ShenandoahPartialGC* _partial_gc; - Thread* _thread; - ShenandoahObjToScanQueue* _queue; -private: - template - void do_oop_work(T* p) { _partial_gc->process_oop(p, _thread, _queue); } -public: - ShenandoahPartialEvacuateUpdateRootsClosure(ShenandoahObjToScanQueue* q) : - _partial_gc(ShenandoahHeap::heap()->partial_gc()), - _thread(Thread::current()), _queue(q) {} - void do_oop(oop* p) { - assert(! ShenandoahHeap::heap()->is_in_reserved(p), "sanity"); - do_oop_work(p); - } - void do_oop(narrowOop* p) { do_oop_work(p); } -}; - -class ShenandoahPartialSATBBufferClosure : public SATBBufferClosure { -private: - ShenandoahObjToScanQueue* _queue; - ShenandoahPartialGC* _partial_gc; - Thread* _thread; -public: - ShenandoahPartialSATBBufferClosure(ShenandoahObjToScanQueue* q) : - _queue(q), _partial_gc(ShenandoahHeap::heap()->partial_gc()), _thread(Thread::current()) { } - - void do_buffer(void** buffer, size_t size) { - for (size_t i = 0; i < size; ++i) { - oop* p = (oop*) &buffer[i]; - oop obj = RawAccess<>::oop_load(p); - _queue->push(obj); - } - } -}; - -class ShenandoahPartialSATBThreadsClosure : public ThreadClosure { - ShenandoahPartialSATBBufferClosure* _satb_cl; - int _thread_parity; - - public: - ShenandoahPartialSATBThreadsClosure(ShenandoahPartialSATBBufferClosure* satb_cl) : - _satb_cl(satb_cl), - _thread_parity(Threads::thread_claim_parity()) {} - - void do_thread(Thread* thread) { - if (thread->is_Java_thread()) { - if (thread->claim_oops_do(true, _thread_parity)) { - JavaThread* jt = (JavaThread*)thread; - ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl); - } - } else if (thread->is_VM_thread()) { - if (thread->claim_oops_do(true, _thread_parity)) { - ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); - } - } - } -}; - -class ShenandoahInitPartialCollectionTask : public AbstractGangTask { -private: - ShenandoahRootProcessor* _rp; - ShenandoahHeap* _heap; -public: - ShenandoahInitPartialCollectionTask(ShenandoahRootProcessor* rp) : - AbstractGangTask("Shenandoah Init Partial Collection"), - _rp(rp), - _heap(ShenandoahHeap::heap()) {} - - void work(uint worker_id) { - ShenandoahObjToScanQueueSet* queues = _heap->partial_gc()->task_queues(); - ShenandoahObjToScanQueue* q = queues->queue(worker_id); - - ShenandoahEvacOOMScope oom_evac_scope; - - // Step 1: Process ordinary GC roots. - { - ShenandoahPartialEvacuateUpdateRootsClosure roots_cl(q); - CLDToOopClosure cld_cl(&roots_cl); - MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); - _rp->process_all_roots(&roots_cl, &roots_cl, &cld_cl, &code_cl, NULL, worker_id); - } - } -}; - -class ShenandoahConcurrentPartialCollectionTask : public AbstractGangTask { -private: - ParallelTaskTerminator* _terminator; - ShenandoahHeapRegionSet* _root_regions; - ShenandoahHeap* _heap; -public: - ShenandoahConcurrentPartialCollectionTask(ParallelTaskTerminator* terminator, - ShenandoahHeapRegionSet* root_regions) : - AbstractGangTask("Shenandoah Concurrent Partial Collection"), - _terminator(terminator), _root_regions(root_regions), - _heap(ShenandoahHeap::heap()) {} - - void work(uint worker_id) { - ShenandoahEvacOOMScope oom_evac_scope; - ShenandoahPartialGC* partial_gc = _heap->partial_gc(); - ShenandoahObjToScanQueueSet* queues = partial_gc->task_queues(); - ShenandoahObjToScanQueue* q = queues->queue(worker_id); - - if (partial_gc->check_and_handle_cancelled_gc(_terminator)) return; - - ShenandoahPartialEvacuateUpdateHeapClosure cl(q); - - // Step 2: Process all root regions. - { - ShenandoahHeapRegionSetIterator iter = _root_regions->iterator(); - ShenandoahHeapRegion* r = iter.next(); - while (r != NULL) { - assert(r->is_root(), "must be root region"); - _heap->marked_object_oop_safe_iterate(r, &cl); - if (ShenandoahPacing) { - _heap->pacer()->report_partial(r->get_live_data_words()); - } - if (partial_gc->check_and_handle_cancelled_gc(_terminator)) return; - r = iter.next(); - } - } - if (partial_gc->check_and_handle_cancelled_gc(_terminator)) return; - - // Step 3: Drain all outstanding work in queues. - partial_gc->main_loop(worker_id, _terminator); - } -}; - -class ShenandoahFinalPartialCollectionTask : public AbstractGangTask { -private: - ParallelTaskTerminator* _terminator; - ShenandoahHeap* _heap; -public: - ShenandoahFinalPartialCollectionTask(ParallelTaskTerminator* terminator) : - AbstractGangTask("Shenandoah Final Partial Collection"), - _terminator(terminator), - _heap(ShenandoahHeap::heap()) {} - - void work(uint worker_id) { - ShenandoahEvacOOMScope oom_evac_scope; - - ShenandoahPartialGC* partial_gc = _heap->partial_gc(); - - ShenandoahObjToScanQueueSet* queues = partial_gc->task_queues(); - ShenandoahObjToScanQueue* q = queues->queue(worker_id); - - // Drain outstanding SATB queues. - { - ShenandoahPartialSATBBufferClosure satb_cl(q); - // Process remaining finished SATB buffers. - SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); - while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)); - // Process remaining threads SATB buffers. - ShenandoahPartialSATBThreadsClosure tc(&satb_cl); - Threads::threads_do(&tc); - } - - // Finally drain all outstanding work in queues. - partial_gc->main_loop(worker_id, _terminator); - - } -}; - -class ShenandoahPartialCollectionCleanupTask : public AbstractGangTask { -private: - ShenandoahHeap* _heap; -public: - ShenandoahPartialCollectionCleanupTask() : - AbstractGangTask("Shenandoah Partial Collection Cleanup"), - _heap(ShenandoahHeap::heap()) { - _heap->collection_set()->clear_current_index(); - } - - void work(uint worker_id) { - ShenandoahCollectionSet* cset = _heap->collection_set(); - ShenandoahHeapRegion* r = cset->claim_next(); - while (r != NULL) { - HeapWord* bottom = r->bottom(); - HeapWord* top = _heap->complete_top_at_mark_start(r->bottom()); - if (top > bottom) { - _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top)); - } - r = cset->claim_next(); - } - } - -}; - -ShenandoahPartialGC::ShenandoahPartialGC(ShenandoahHeap* heap, size_t num_regions) : - _heap(heap), - _matrix(heap->connection_matrix()), - _root_regions(new ShenandoahHeapRegionSet()), - _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())) { - - assert(_matrix != NULL, "need matrix"); - - uint num_queues = heap->max_workers(); - for (uint i = 0; i < num_queues; ++i) { - ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); - task_queue->initialize(); - _task_queues->register_queue(i, task_queue); - } - - from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahPartialInboundThreshold, mtGC); - set_has_work(false); -} - -ShenandoahPartialGC::~ShenandoahPartialGC() { - FREE_C_HEAP_ARRAY(size_t, from_idxs); -} - -bool ShenandoahPartialGC::prepare() { - _heap->collection_set()->clear(); - assert(_heap->collection_set()->count() == 0, "collection set not clear"); - - _heap->make_tlabs_parsable(true); - - ShenandoahConnectionMatrix* matrix = _heap->connection_matrix(); - - if (UseShenandoahMatrix && PrintShenandoahMatrix) { - LogTarget(Info, gc) lt; - LogStream ls(lt); - _heap->connection_matrix()->print_on(&ls); - } - - ShenandoahCollectionSet* collection_set = _heap->collection_set(); - size_t num_regions = _heap->num_regions(); - - // First pass: reset all roots - for (uint to_idx = 0; to_idx < num_regions; to_idx++) { - ShenandoahHeapRegion* r = _heap->get_region(to_idx); - r->set_root(false); - } - - // Second pass: find collection set, and mark root candidates - _heap->shenandoahPolicy()->choose_collection_set(collection_set, true); - - // Shortcut: no cset, bail - size_t num_cset = collection_set->count(); - - if (num_cset == 0) { - log_info(gc, ergo)("No regions with fewer inbound connections than threshold (" UINTX_FORMAT ")", - ShenandoahPartialInboundThreshold); - return false; - } - - // Final pass: rebuild free set and region set. - ShenandoahFreeSet* free_set = _heap->free_set(); - _root_regions->clear(); - free_set->clear(); - - assert(_root_regions->count() == 0, "must be cleared"); - - size_t work_size = 0; - - for (uint from_idx = 0; from_idx < num_regions; from_idx++) { - ShenandoahHeapRegion* r = _heap->get_region(from_idx); - - // Never assume anything implicitely marked. - _heap->set_next_top_at_mark_start(r->bottom(), r->end()); - - if (r->is_root() && !r->in_collection_set()) { - _root_regions->add_region(r); - work_size += r->get_live_data_words(); - - matrix->clear_region_outbound(from_idx); - - // Since root region can be allocated at, we should bound the scans - // in it at current top. Otherwise, one thread may evacuate objects - // to that root region, while another would try to scan newly evac'ed - // objects under the race. - r->set_concurrent_iteration_safe_limit(r->top()); - } - } - - free_set->rebuild(); - - if (ShenandoahPacing) { - work_size += collection_set->live_data() >> LogHeapWordSize; - _heap->pacer()->setup_for_partial(work_size); - } - - log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions, "SIZE_FORMAT" root regions", - collection_set->count(), _root_regions->count()); - - return true; -} - -void ShenandoahPartialGC::init_partial_collection() { - assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW partial GC"); - - assert(_heap->is_next_bitmap_clear(), "need clear marking bitmap"); - _heap->set_alloc_seq_gc_start(); - - if (ShenandoahVerify) { - _heap->verifier()->verify_before_partial(); - } - - { - ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::partial_gc_prepare); - ShenandoahHeapLocker lock(_heap->lock()); - bool has_work = prepare(); - set_has_work(has_work); - } - - if (!has_work()) { - reset(); - return; - } - - _heap->set_concurrent_partial_in_progress(true); - - { - ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_partial_gc_work); - assert(_task_queues->is_empty(), "queues must be empty before partial GC"); - -#if defined(COMPILER2) || INCLUDE_JVMCI - DerivedPointerTable::clear(); -#endif - - { - uint nworkers = _heap->workers()->active_workers(); - ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_partial_gc_work); - - if (UseShenandoahOWST) { - ShenandoahTaskTerminator terminator(nworkers, task_queues()); - ShenandoahInitPartialCollectionTask partial_task(&rp); - _heap->workers()->run_task(&partial_task); - } else { - ParallelTaskTerminator terminator(nworkers, task_queues()); - ShenandoahInitPartialCollectionTask partial_task(&rp); - _heap->workers()->run_task(&partial_task); - } - } - -#if defined(COMPILER2) || INCLUDE_JVMCI - DerivedPointerTable::update_pointers(); -#endif - if (_heap->cancelled_concgc()) { - _heap->fixup_roots(); - reset(); - _heap->set_concurrent_partial_in_progress(false); - } - } -} - -template -void ShenandoahPartialGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator) { - ShenandoahObjToScanQueueSet* queues = task_queues(); - ShenandoahObjToScanQueue* q = queues->queue(worker_id); - - uintx stride = ShenandoahMarkLoopStride; - ShenandoahPartialEvacuateUpdateHeapClosure cl(q); - ShenandoahMarkTask task; - - // Process outstanding queues, if any. - q = queues->claim_next(); - while (q != NULL) { - if (_heap->check_cancelled_concgc_and_yield()) { - ShenandoahCancelledTerminatorTerminator tt; - ShenandoahEvacOOMScopeLeaver oom_scope_leaver; - while (!terminator->offer_termination(&tt)); - return; - } - - for (uint i = 0; i < stride; i++) { - if (q->pop_buffer(task) || - q->pop_local(task) || - q->pop_overflow(task)) { - oop obj = task.obj(); - assert(!CompressedOops::is_null(obj), "must not be null"); - obj->oop_iterate(&cl); - } else { - assert(q->is_empty(), "Must be empty"); - q = queues->claim_next(); - break; - } - } - } - - // Normal loop. - q = queues->queue(worker_id); - ShenandoahPartialSATBBufferClosure satb_cl(q); - SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); - - int seed = 17; - - while (true) { - if (check_and_handle_cancelled_gc(terminator)) return; - - for (uint i = 0; i < stride; i++) { - if ((q->pop_buffer(task) || - q->pop_local(task) || - q->pop_overflow(task) || - (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) || - queues->steal(worker_id, &seed, task))) { - oop obj = task.obj(); - assert(!CompressedOops::is_null(obj), "must not be null"); - obj->oop_iterate(&cl); - } else { - ShenandoahEvacOOMScopeLeaver oom_scope_leaver; - if (terminator->offer_termination()) return; - } - } - } -} - -bool ShenandoahPartialGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) { - if (_heap->cancelled_concgc()) { - ShenandoahCancelledTerminatorTerminator tt; - ShenandoahEvacOOMScopeLeaver oom_scope_leaver; - while (! terminator->offer_termination(&tt)); - return true; - } - return false; -} - -void ShenandoahPartialGC::concurrent_partial_collection() { - assert(has_work(), "Performance: should only be here when there is work"); - - ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_partial); - if (!_heap->cancelled_concgc()) { - uint nworkers = _heap->workers()->active_workers(); - task_queues()->reserve(nworkers); - if (UseShenandoahOWST) { - ShenandoahTaskTerminator terminator(nworkers, task_queues()); - ShenandoahConcurrentPartialCollectionTask partial_task(&terminator, _root_regions); - _heap->workers()->run_task(&partial_task); - } else { - ParallelTaskTerminator terminator(nworkers, task_queues()); - ShenandoahConcurrentPartialCollectionTask partial_task(&terminator, _root_regions); - _heap->workers()->run_task(&partial_task); - } - } - - if (_heap->cancelled_concgc()) { - _task_queues->clear(); - } - assert(_task_queues->is_empty(), "queues must be empty after partial GC"); -} - -void ShenandoahPartialGC::final_partial_collection() { - assert(has_work(), "Performance: should only be here when there is work"); - - if (!_heap->cancelled_concgc()) { - ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_partial_gc_work); - uint nworkers = _heap->workers()->active_workers(); - task_queues()->reserve(nworkers); - - StrongRootsScope scope(nworkers); - if (UseShenandoahOWST) { - ShenandoahTaskTerminator terminator(nworkers, task_queues()); - ShenandoahFinalPartialCollectionTask partial_task(&terminator); - _heap->workers()->run_task(&partial_task); - } else { - ParallelTaskTerminator terminator(nworkers, task_queues()); - ShenandoahFinalPartialCollectionTask partial_task(&terminator); - _heap->workers()->run_task(&partial_task); - } - } - - if (!_heap->cancelled_concgc()) { - // Still good? Update the roots then - _heap->concurrentMark()->update_roots(ShenandoahPhaseTimings::final_partial_gc_work); - } - - if (!_heap->cancelled_concgc()) { - // Still good? We can now trash the cset, and make final verification - { - ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::partial_gc_cleanup); - ShenandoahCollectionSet* cset = _heap->collection_set(); - ShenandoahHeapLocker lock(_heap->lock()); - - ShenandoahPartialCollectionCleanupTask cleanup; - _heap->workers()->run_task(&cleanup); - - // Trash everything when bitmaps are cleared. - cset->clear_current_index(); - ShenandoahHeapRegion* r; - while((r = cset->next()) != NULL) { - r->make_trash(); - } - cset->clear(); - - reset(); - } - - if (ShenandoahVerify) { - _heap->verifier()->verify_after_partial(); - } - } else { - // On cancellation path, fixup roots to make them consistent - _heap->fixup_roots(); - reset(); - } - - assert(_task_queues->is_empty(), "queues must be empty after partial GC"); - _heap->set_concurrent_partial_in_progress(false); -} - -void ShenandoahPartialGC::reset() { - _task_queues->clear(); - - ShenandoahHeapRegionSetIterator root_iter = _root_regions->iterator(); - ShenandoahHeapRegion* r; - while((r = root_iter.next()) != NULL) { - r->set_root(false); - } - _root_regions->clear(); - - set_has_work(false); -} - -void ShenandoahPartialGC::set_has_work(bool value) { - _has_work.set_cond(value); -} - -bool ShenandoahPartialGC::has_work() { - return _has_work.is_set(); -} - -ShenandoahObjToScanQueueSet* ShenandoahPartialGC::task_queues() { - return _task_queues; -} diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.hpp deleted file mode 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_HPP -#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_HPP - -#include "memory/allocation.hpp" -#include "gc/shenandoah/shenandoahTaskqueue.hpp" - -class Thread; -class ShenandoahHeapRegionSet; -class ShenandoahHeap; - -class ShenandoahPartialGC : public CHeapObj { -private: - ShenandoahHeapRegionSet* _root_regions; - ShenandoahHeap* _heap; - ShenandoahConnectionMatrix* _matrix; - ShenandoahObjToScanQueueSet* _task_queues; - size_t* from_idxs; - ShenandoahSharedFlag _has_work; - - void set_has_work(bool value); - -public: - ShenandoahPartialGC(ShenandoahHeap* heap, size_t num_regions); - ~ShenandoahPartialGC(); - - bool has_work(); - - void reset(); - bool prepare(); - void init_partial_collection(); - void concurrent_partial_collection(); - void final_partial_collection(); - - template - void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue); - - template - void main_loop(uint worker_id, ParallelTaskTerminator* terminator); - - bool check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator); - - ShenandoahObjToScanQueueSet* task_queues(); -}; - -#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.inline.hpp deleted file mode 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPartialGC.inline.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_INLINE_HPP -#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_INLINE_HPP - -#include "gc/shenandoah/shenandoahAsserts.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" - -template -void ShenandoahPartialGC::process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue) { - T o = RawAccess<>::oop_load(p); - if (!CompressedOops::is_null(o)) { - oop obj = CompressedOops::decode_not_null(o); - if (_heap->in_collection_set(obj)) { - oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (oopDesc::unsafe_equals(obj, forw)) { - bool evacuated = false; - forw = _heap->evacuate_object(obj, thread, evacuated); - - // Only the thread that succeeded evacuating this object pushes it to its work queue. - if (evacuated) { - assert(oopDesc::is_oop(forw), "sanity"); - bool succeeded = queue->push(ShenandoahMarkTask(forw)); - assert(succeeded, "must succeed to push to task queue"); - } - } - assert(! oopDesc::unsafe_equals(obj, forw) || _heap->cancelled_concgc(), "must be evacuated"); - // Update reference. - _heap->atomic_compare_exchange_oop(forw, p, obj); - // TODO: No need to update matrix if other thread beat us. - obj = forw; // For matrix update below. - } - if (UPDATE_MATRIX) { - shenandoah_assert_not_forwarded_except(p, obj, _heap->cancelled_concgc()); - _matrix->set_connected(p, obj); - } - } -} - -#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_INLINE_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp @@ -72,8 +72,6 @@ guarantee(phase == init_evac || phase == scan_roots || phase == update_roots || - phase == init_partial_gc_work || - phase == final_partial_gc_work || phase == init_traversal_gc_work || phase == final_traversal_gc_work || phase == final_traversal_update_roots || @@ -247,44 +245,6 @@ _phase_names[full_gc_update_str_dedup_table] = " Update String Dedup Table"; _phase_names[full_gc_resize_tlabs] = " Resize TLABs"; - _phase_names[init_partial_gc_gross] = "Pause Init Partial (G)"; - _phase_names[init_partial_gc] = "Pause Init Partial (N)"; - _phase_names[partial_gc_prepare] = " Prepare"; - _phase_names[init_partial_gc_work] = " Work"; - _phase_names[init_partial_gc_thread_roots] = " PI: Thread Roots"; - _phase_names[init_partial_gc_code_roots] = " PI: Code Cache Roots"; - _phase_names[init_partial_gc_string_table_roots] = " PI: String Table Roots"; - _phase_names[init_partial_gc_universe_roots] = " PI: Universe Roots"; - _phase_names[init_partial_gc_jni_roots] = " PI: JNI Roots"; - _phase_names[init_partial_gc_jni_weak_roots] = " PI: JNI Weak Roots"; - _phase_names[init_partial_gc_synchronizer_roots] = " PI: Synchronizer Roots"; - _phase_names[init_partial_gc_flat_profiler_roots] = " PI: Flat Profiler Roots"; - _phase_names[init_partial_gc_management_roots] = " PI: Management Roots"; - _phase_names[init_partial_gc_system_dict_roots] = " PI: System Dict Roots"; - _phase_names[init_partial_gc_cldg_roots] = " PI: CLDG Roots"; - _phase_names[init_partial_gc_jvmti_roots] = " PI: JVMTI Roots"; - _phase_names[init_partial_gc_string_dedup_roots] = " PI: String Dedup Roots"; - _phase_names[init_partial_gc_finish_queues] = " PI: Finish Queues"; - _phase_names[final_partial_gc_gross] = "Pause Final Partial (G)"; - _phase_names[final_partial_gc] = "Pause Final Partial (N)"; - _phase_names[final_partial_gc_work] = " Work"; - _phase_names[final_partial_gc_thread_roots] = " PF: Thread Roots"; - _phase_names[final_partial_gc_code_roots] = " PF: Code Cache Roots"; - _phase_names[final_partial_gc_string_table_roots] = " PF: String Table Roots"; - _phase_names[final_partial_gc_universe_roots] = " PF: Universe Roots"; - _phase_names[final_partial_gc_jni_roots] = " PF: JNI Roots"; - _phase_names[final_partial_gc_jni_weak_roots] = " PF: JNI Weak Roots"; - _phase_names[final_partial_gc_synchronizer_roots] = " PF: Synchronizer Roots"; - _phase_names[final_partial_gc_flat_profiler_roots] = " PF: Flat Profiler Roots"; - _phase_names[final_partial_gc_management_roots] = " PF: Management Roots"; - _phase_names[final_partial_gc_system_dict_roots] = " PF: System Dict Roots"; - _phase_names[final_partial_gc_cldg_roots] = " PF: CLDG Roots"; - _phase_names[final_partial_gc_jvmti_roots] = " PF: JVMTI Roots"; - _phase_names[final_partial_gc_string_dedup_roots] = " PF: String Dedup Roots"; - _phase_names[final_partial_gc_finish_queues] = " PF: Finish Queues"; - - _phase_names[partial_gc_cleanup] = " Cleanup"; - _phase_names[init_traversal_gc_gross] = "Pause Init Traversal (G)"; _phase_names[init_traversal_gc] = "Pause Init Traversal (N)"; _phase_names[traversal_gc_prepare] = " Prepare"; @@ -347,7 +307,6 @@ _phase_names[conc_cleanup_recycle] = " Recycle"; _phase_names[conc_cleanup_reset_bitmaps] = " Reset Bitmaps"; _phase_names[conc_other] = "Concurrent Other"; - _phase_names[conc_partial] = "Concurrent Partial"; _phase_names[conc_traversal] = "Concurrent Traversal"; _phase_names[init_update_refs_gross] = "Pause Init Update Refs (G)"; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp @@ -147,49 +147,6 @@ final_update_refs_recycle, - init_partial_gc_gross, - init_partial_gc, - partial_gc_prepare, - - // Per-thread timer block, should have "roots" counters in consistent order - init_partial_gc_work, - init_partial_gc_thread_roots, - init_partial_gc_code_roots, - init_partial_gc_string_table_roots, - init_partial_gc_universe_roots, - init_partial_gc_jni_roots, - init_partial_gc_jni_weak_roots, - init_partial_gc_synchronizer_roots, - init_partial_gc_flat_profiler_roots, - init_partial_gc_management_roots, - init_partial_gc_system_dict_roots, - init_partial_gc_cldg_roots, - init_partial_gc_jvmti_roots, - init_partial_gc_string_dedup_roots, - init_partial_gc_finish_queues, - - final_partial_gc_gross, - final_partial_gc, - - // Per-thread timer block, should have "roots" counters in consistent order - final_partial_gc_work, - final_partial_gc_thread_roots, - final_partial_gc_code_roots, - final_partial_gc_string_table_roots, - final_partial_gc_universe_roots, - final_partial_gc_jni_roots, - final_partial_gc_jni_weak_roots, - final_partial_gc_synchronizer_roots, - final_partial_gc_flat_profiler_roots, - final_partial_gc_management_roots, - final_partial_gc_system_dict_roots, - final_partial_gc_cldg_roots, - final_partial_gc_jvmti_roots, - final_partial_gc_string_dedup_roots, - final_partial_gc_finish_queues, - - partial_gc_cleanup, - degen_gc_gross, degen_gc, @@ -308,7 +265,6 @@ conc_cleanup, conc_cleanup_recycle, conc_cleanup_reset_bitmaps, - conc_partial, conc_traversal, // Unclassified @@ -320,7 +276,7 @@ // These are the subphases of GC phases (scan_roots, update_roots, - // init_evac, final_update_refs_roots, partial_gc_work and full_gc_roots). + // init_evac, final_update_refs_roots and full_gc_roots). // Make sure they are following this order. enum GCParPhases { ThreadRoots, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp @@ -29,6 +29,7 @@ #include "gc/shared/taskqueue.inline.hpp" #include "gc/shared/weakProcessor.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahConnectionMatrix.hpp" #include "gc/shenandoah/shenandoahCollectionSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" @@ -36,6 +37,7 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" #include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "gc/shenandoah/shenandoahRootProcessor.hpp" @@ -96,10 +98,13 @@ ShenandoahObjToScanQueue* _queue; ShenandoahTraversalGC* _traversal_gc; ShenandoahHeap* _heap; + ShenandoahHeapRegionSet* _traversal_set; + public: ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) : _queue(q), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), - _heap(ShenandoahHeap::heap()) + _heap(ShenandoahHeap::heap()), + _traversal_set(ShenandoahHeap::heap()->traversal_gc()->traversal_set()) { } void do_buffer(void** buffer, size_t size) { @@ -107,7 +112,7 @@ oop* p = (oop*) &buffer[i]; oop obj = RawAccess<>::oop_load(p); shenandoah_assert_not_forwarded(p, obj); - if (!_heap->is_marked_next(obj) && _heap->mark_next(obj)) { + if (_traversal_set->is_in((HeapWord*) obj) && !_heap->is_marked_next(obj) && _heap->mark_next(obj)) { _queue->push(ShenandoahMarkTask(obj)); } } @@ -299,7 +304,11 @@ ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) : _heap(heap), - _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())) { + _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())), + _traversal_set(new ShenandoahHeapRegionSet()), + _root_regions(new ShenandoahHeapRegionSet()), + _root_regions_iterator(_root_regions->iterator()), + _matrix(heap->connection_matrix()) { uint num_queues = heap->max_workers(); for (uint i = 0; i < num_queues; ++i) { @@ -319,6 +328,35 @@ ShenandoahTraversalGC::~ShenandoahTraversalGC() { } +void ShenandoahTraversalGC::prepare_regions() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t num_regions = heap->num_regions(); + ShenandoahConnectionMatrix* matrix = _heap->connection_matrix(); + + for (size_t i = 0; i < num_regions; i++) { + ShenandoahHeapRegion* region = heap->get_region(i); + if (heap->is_bitmap_slice_committed(region)) { + if (_traversal_set->is_in(i)) { + heap->set_next_top_at_mark_start(region->bottom(), region->top()); + region->clear_live_data(); + assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared"); + } else { + // Everything outside the traversal set is always considered live. + heap->set_next_top_at_mark_start(region->bottom(), region->bottom()); + } + if (_root_regions->is_in(i)) { + assert(!region->in_collection_set(), "roots must not overlap with cset"); + matrix->clear_region_outbound(i); + // Since root region can be allocated at, we should bound the scans + // in it at current top. Otherwise, one thread may evacuate objects + // to that root region, while another would try to scan newly evac'ed + // objects under the race. + region->set_concurrent_iteration_safe_limit(region->top()); + } + } + } +} + void ShenandoahTraversalGC::prepare() { _heap->collection_set()->clear(); assert(_heap->collection_set()->count() == 0, "collection set not clear"); @@ -331,12 +369,13 @@ ShenandoahCollectionSet* collection_set = _heap->collection_set(); // Find collection set - _heap->shenandoahPolicy()->choose_collection_set(collection_set, false); + _heap->shenandoahPolicy()->choose_collection_set(collection_set); + prepare_regions(); // Rebuild free set free_set->rebuild(); - log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions", collection_set->count()); + log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions and "SIZE_FORMAT" root set regions", collection_set->count(), _root_regions->count()); } void ShenandoahTraversalGC::init_traversal_collection() { @@ -393,6 +432,8 @@ if (ShenandoahPacing) { _heap->pacer()->setup_for_traversal(); } + + _root_regions_iterator = _root_regions->iterator(); } void ShenandoahTraversalGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator, bool do_satb) { @@ -415,44 +456,88 @@ if (_heap->process_references()) { rp = _heap->ref_processor(); } - if (!_heap->is_degenerated_gc_in_progress()) { - if (_heap->unload_classes()) { - if (ShenandoahStringDedup::is_enabled()) { - ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); - ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq); - main_loop_work(&cl, ld, w, t); + if (UseShenandoahMatrix) { + if (!_heap->is_degenerated_gc_in_progress()) { + if (_heap->unload_classes()) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalMetadataMatrixClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } else { - ShenandoahTraversalMetadataClosure cl(q, rp); - main_loop_work(&cl, ld, w, t); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalDedupMatrixClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalMatrixClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } } else { - if (ShenandoahStringDedup::is_enabled()) { - ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); - ShenandoahTraversalDedupClosure cl(q, rp, dq); - main_loop_work(&cl, ld, w, t); + if (_heap->unload_classes()) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } else { - ShenandoahTraversalClosure cl(q, rp); - main_loop_work(&cl, ld, w, t); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalDegenMatrixClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } } } else { - if (_heap->unload_classes()) { - if (ShenandoahStringDedup::is_enabled()) { - ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); - ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq); - main_loop_work(&cl, ld, w, t); + if (!_heap->is_degenerated_gc_in_progress()) { + if (_heap->unload_classes()) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalMetadataClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } else { - ShenandoahTraversalMetadataDegenClosure cl(q, rp); - main_loop_work(&cl, ld, w, t); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalDedupClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } } else { - if (ShenandoahStringDedup::is_enabled()) { - ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); - ShenandoahTraversalDedupDegenClosure cl(q, rp, dq); - main_loop_work(&cl, ld, w, t); + if (_heap->unload_classes()) { + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalMetadataDegenClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } else { - ShenandoahTraversalDegenClosure cl(q, rp); - main_loop_work(&cl, ld, w, t); + if (ShenandoahStringDedup::is_enabled()) { + ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); + ShenandoahTraversalDedupDegenClosure cl(q, rp, dq); + main_loop_work(&cl, ld, w, t); + } else { + ShenandoahTraversalDegenClosure cl(q, rp); + main_loop_work(&cl, ld, w, t); + } } } } @@ -492,6 +577,23 @@ } } } + + if (check_and_handle_cancelled_gc(terminator)) return; + + // Step 2: Process all root regions. + // TODO: Interleave this in the normal mark loop below. + ShenandoahHeapRegion* r = _root_regions_iterator.claim_next(); + while (r != NULL) { + _heap->marked_object_oop_safe_iterate(r, cl); + if (ShenandoahPacing) { + _heap->pacer()->report_partial(r->get_live_data_words()); + } + if (check_and_handle_cancelled_gc(terminator)) return; + r = _root_regions_iterator.claim_next(); + } + + if (check_and_handle_cancelled_gc(terminator)) return; + // Normal loop. q = queues->queue(worker_id); ShenandoahTraversalSATBBufferClosure satb_cl(q); @@ -503,11 +605,11 @@ if (check_and_handle_cancelled_gc(terminator)) return; for (uint i = 0; i < stride; i++) { - if ((q->pop_buffer(task) || - q->pop_local(task) || - q->pop_overflow(task) || - (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) || - queues->steal(worker_id, &seed, task))) { + if (q->pop_buffer(task) || + q->pop_local(task) || + q->pop_overflow(task) || + (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) || + queues->steal(worker_id, &seed, task)) { conc_mark->do_task(q, cl, live_data, &task); } else { ShenandoahEvacOOMScopeLeaver oom_scope_leaver; @@ -598,12 +700,15 @@ // Clear immediate garbage regions. size_t num_regions = _heap->num_regions(); + ShenandoahHeapRegionSet* traversal_regions = traversal_set(); ShenandoahFreeSet* free_regions = _heap->free_set(); free_regions->clear(); for (size_t i = 0; i < num_regions; i++) { ShenandoahHeapRegion* r = _heap->get_region(i); bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top(); - if (r->is_humongous_start() && !r->has_live() && not_allocated) { + + bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated; + if (r->is_humongous_start() && candidate) { // Trash humongous. HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size(); assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked"); @@ -614,7 +719,7 @@ assert(r->is_humongous_continuation(), "must be humongous continuation"); r->make_trash(); } - } else if (!r->is_empty() && !r->has_live() && not_allocated) { + } else if (!r->is_empty() && candidate) { // Trash regular. assert(!r->is_humongous(), "handled above"); assert(!r->is_trash(), "must not already be trashed"); @@ -726,7 +831,7 @@ ShenandoahTraversalGC* _traversal_gc; template inline void do_oop_nv(T* p) { - _traversal_gc->process_oop(p, _thread, _queue); + _traversal_gc->process_oop(p, _thread, _queue, NULL); } public: @@ -745,7 +850,7 @@ ShenandoahTraversalGC* _traversal_gc; template inline void do_oop_nv(T* p) { - _traversal_gc->process_oop(p, _thread, _queue); + _traversal_gc->process_oop(p, _thread, _queue, NULL); } public: @@ -757,6 +862,46 @@ void do_oop(oop* p) { do_oop_nv(p); } }; +class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + Thread* _thread; + ShenandoahTraversalGC* _traversal_gc; + template + inline void do_oop_nv(T* p) { + // TODO: Need to somehow pass base_obj here? + _traversal_gc->process_oop(p, _thread, _queue, NULL); + } + +public: + ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) : + _queue(q), _thread(Thread::current()), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {} + + void do_oop(narrowOop* p) { do_oop_nv(p); } + void do_oop(oop* p) { do_oop_nv(p); } +}; + +class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure { +private: + ShenandoahObjToScanQueue* _queue; + Thread* _thread; + ShenandoahTraversalGC* _traversal_gc; + template + inline void do_oop_nv(T* p) { + // TODO: Need to somehow pass base_obj here? + _traversal_gc->process_oop(p, _thread, _queue, NULL); + } + +public: + ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) : + _queue(q), _thread(Thread::current()), + _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {} + + void do_oop(narrowOop* p) { do_oop_nv(p); } + void do_oop(oop* p) { do_oop_nv(p); } +}; + void ShenandoahTraversalGC::preclean_weak_refs() { // Pre-cleaning weak references before diving into STW makes sense at the // end of concurrent mark. This will filter out the references which referents @@ -788,11 +933,19 @@ ShenandoahTraversalPrecleanCompleteGCClosure complete_gc; ShenandoahForwardedIsAliveClosure is_alive; - ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0)); - ResourceMark rm; - rp->preclean_discovered_references(&is_alive, &keep_alive, - &complete_gc, &yield, - NULL); + if (UseShenandoahMatrix) { + ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0)); + ResourceMark rm; + rp->preclean_discovered_references(&is_alive, &keep_alive, + &complete_gc, &yield, + NULL); + } else { + ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0)); + ResourceMark rm; + rp->preclean_discovered_references(&is_alive, &keep_alive, + &complete_gc, &yield, + NULL); + } assert(!sh->cancelled_concgc() || task_queues()->is_empty(), "Should be empty"); } @@ -867,12 +1020,22 @@ ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator); ShenandoahForwardedIsAliveClosure is_alive; - if (!heap->is_degenerated_gc_in_progress()) { - ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); - _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + if (UseShenandoahMatrix) { + if (!heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } else { + ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } } else { - ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); - _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + if (!heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } else { + ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); + _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); + } } } }; @@ -975,13 +1138,39 @@ ShenandoahGCPhase phase(phase_process); ShenandoahForwardedIsAliveClosure is_alive; - ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); - rp->process_discovered_references(&is_alive, &keep_alive, - &complete_gc, &executor, - &pt); - pt.print_all_references(); - - WeakProcessor::weak_oops_do(&is_alive, &keep_alive); + if (UseShenandoahMatrix) { + if (!_heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id)); + rp->process_discovered_references(&is_alive, &keep_alive, + &complete_gc, &executor, + &pt); + pt.print_all_references(); + WeakProcessor::weak_oops_do(&is_alive, &keep_alive); + } else { + ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id)); + rp->process_discovered_references(&is_alive, &keep_alive, + &complete_gc, &executor, + &pt); + pt.print_all_references(); + WeakProcessor::weak_oops_do(&is_alive, &keep_alive); + } + } else { + if (!_heap->is_degenerated_gc_in_progress()) { + ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); + rp->process_discovered_references(&is_alive, &keep_alive, + &complete_gc, &executor, + &pt); + pt.print_all_references(); + WeakProcessor::weak_oops_do(&is_alive, &keep_alive); + } else { + ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id)); + rp->process_discovered_references(&is_alive, &keep_alive, + &complete_gc, &executor, + &pt); + pt.print_all_references(); + WeakProcessor::weak_oops_do(&is_alive, &keep_alive); + } + } assert(!_heap->cancelled_concgc() || task_queues()->is_empty(), "Should be empty"); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp @@ -25,6 +25,7 @@ #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP #include "memory/allocation.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahTaskqueue.hpp" class Thread; @@ -47,18 +48,28 @@ // too many atomic updates. size_t/jint is too large, jbyte is too small. jushort** _liveness_local; + ShenandoahHeapRegionSet* const _traversal_set; + ShenandoahHeapRegionSet* const _root_regions; + + ShenandoahHeapRegionSetIterator _root_regions_iterator; + + ShenandoahConnectionMatrix* const _matrix; + public: ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions); ~ShenandoahTraversalGC(); + ShenandoahHeapRegionSet* const traversal_set() const { return _traversal_set; } + ShenandoahHeapRegionSet* const root_regions() const { return _root_regions;} + void reset(); void prepare(); void init_traversal_collection(); void concurrent_traversal_collection(); void final_traversal_collection(); - template - inline void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahStrDedupQueue* dq = NULL); + template + inline void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, oop base_obj, ShenandoahStrDedupQueue* dq = NULL); bool check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator); @@ -71,6 +82,8 @@ private: + void prepare_regions(); + template void main_loop_prework(uint w, ParallelTaskTerminator* t); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp @@ -28,17 +28,19 @@ #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" #include "gc/shenandoah/shenandoahStringDedup.hpp" #include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "gc/shenandoah/shenandoahTaskqueue.hpp" #include "memory/iterator.inline.hpp" #include "oops/oop.inline.hpp" -template -void ShenandoahTraversalGC::process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahStrDedupQueue* dq) { +template +void ShenandoahTraversalGC::process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, oop base_obj, ShenandoahStrDedupQueue* dq) { T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); + bool update_matrix = true; if (DEGEN) { oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); if (!oopDesc::unsafe_equals(obj, forw)) { @@ -49,16 +51,32 @@ } else if (_heap->in_collection_set(obj)) { oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); if (oopDesc::unsafe_equals(obj, forw)) { - bool evacuated = false; - forw = _heap->evacuate_object(obj, thread, evacuated); + forw = _heap->evacuate_object(obj, thread); } - assert(! oopDesc::unsafe_equals(obj, forw) || _heap->cancelled_concgc() || _heap->is_degenerated_gc_in_progress(), "must be evacuated"); + // tty->print_cr("NORMAL visit: "PTR_FORMAT", obj: "PTR_FORMAT" to "PTR_FORMAT, p2i(p), p2i(obj), p2i(forw)); + assert(! oopDesc::unsafe_equals(obj, forw) || _heap->cancelled_concgc(), "must be evacuated"); // Update reference. - _heap->atomic_compare_exchange_oop(forw, p, obj); + oop previous = _heap->atomic_compare_exchange_oop(forw, p, obj); + if (UPDATE_MATRIX && !oopDesc::unsafe_equals(previous, obj)) { + update_matrix = false; + } obj = forw; } - if (!_heap->is_marked_next(obj) && _heap->mark_next(obj)) { + if (UPDATE_MATRIX && update_matrix) { + shenandoah_assert_not_forwarded_except(p, obj, _heap->cancelled_concgc()); + const void* src; + if (!_heap->is_in_reserved(p)) { + src = (const void*)(HeapWord*) obj; + } else { + src = p; + } + if (src != NULL) { + _matrix->set_connected(src, obj); + } + } + + if (_traversal_set->is_in((HeapWord*) obj) && !_heap->is_marked_next(obj) && _heap->mark_next(obj)) { bool succeeded = queue->push(ShenandoahMarkTask(obj)); assert(succeeded, "must succeed to push to task queue"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.hpp @@ -87,8 +87,6 @@ return type == VM_Operation::VMOp_ShenandoahInitMark || type == VM_Operation::VMOp_ShenandoahFinalMarkStartEvac || type == VM_Operation::VMOp_ShenandoahFinalEvac || - type == VM_Operation::VMOp_ShenandoahInitPartialGC || - type == VM_Operation::VMOp_ShenandoahFinalPartialGC || type == VM_Operation::VMOp_ShenandoahInitTraversalGC || type == VM_Operation::VMOp_ShenandoahFinalTraversalGC || type == VM_Operation::VMOp_ShenandoahInitUpdateRefs || diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -813,30 +813,6 @@ ); } -void ShenandoahVerifier::verify_before_partial() { - verify_at_safepoint( - "Before Partial", - _verify_forwarded_none, // cannot have forwarded objects - _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well - _verify_matrix_conservative, // matrix is conservatively consistent - _verify_cset_none, // no cset references before partial - _verify_liveness_disable, // no reliable liveness data anymore - _verify_regions_notrash_nocset // no trash and no cset regions - ); -} - -void ShenandoahVerifier::verify_after_partial() { - verify_at_safepoint( - "After Partial", - _verify_forwarded_none, // cannot have forwarded objects - _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well - _verify_matrix_conservative, // matrix is conservatively consistent - _verify_cset_none, // no cset references left after partial - _verify_liveness_disable, // no reliable liveness data anymore - _verify_regions_nocset // no cset regions, trash regions allowed - ); -} - void ShenandoahVerifier::verify_before_traversal() { verify_at_safepoint( "Before Traversal", @@ -853,7 +829,7 @@ verify_at_safepoint( "After Traversal", _verify_forwarded_none, // cannot have forwarded objects - _verify_marked_next, // marking should be complete in next bitmap + _verify_marked_disable, // We only have partial marking info after traversal _verify_matrix_disable, // matrix is conservatively consistent _verify_cset_none, // no cset references left after traversal _verify_liveness_disable, // liveness data is not collected for new allocations diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -183,8 +183,6 @@ void verify_after_updaterefs(); void verify_before_fullgc(); void verify_after_fullgc(); - void verify_before_partial(); - void verify_after_partial(); void verify_before_traversal(); void verify_after_traversal(); void verify_after_degenerated(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp --- a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.cpp @@ -32,8 +32,6 @@ uint ShenandoahWorkerPolicy::_prev_conc_evac = 0; uint ShenandoahWorkerPolicy::_prev_fullgc = 0; uint ShenandoahWorkerPolicy::_prev_degengc = 0; -uint ShenandoahWorkerPolicy::_prev_stw_partial = 0; -uint ShenandoahWorkerPolicy::_prev_conc_partial = 0; uint ShenandoahWorkerPolicy::_prev_stw_traversal = 0; uint ShenandoahWorkerPolicy::_prev_conc_traversal = 0; uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0; @@ -94,26 +92,6 @@ return _prev_degengc; } -// Calculate workers for Stop-the-world partial GC -uint ShenandoahWorkerPolicy::calc_workers_for_stw_partial() { - uint active_workers = (_prev_stw_partial == 0) ? ParallelGCThreads : _prev_stw_partial; - _prev_stw_partial = - AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, - active_workers, - Threads::number_of_non_daemon_threads()); - return _prev_stw_partial; -} - -// Calculate workers for concurent partial GC -uint ShenandoahWorkerPolicy::calc_workers_for_conc_partial() { - uint active_workers = (_prev_conc_partial == 0) ? ConcGCThreads : _prev_conc_partial; - _prev_conc_partial = - AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, - active_workers, - Threads::number_of_non_daemon_threads()); - return _prev_conc_partial; -} - // Calculate workers for Stop-the-world traversal GC uint ShenandoahWorkerPolicy::calc_workers_for_stw_traversal() { uint active_workers = (_prev_stw_traversal == 0) ? ParallelGCThreads : _prev_stw_traversal; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahWorkerPolicy.hpp @@ -35,8 +35,6 @@ static uint _prev_conc_evac; static uint _prev_fullgc; static uint _prev_degengc; - static uint _prev_stw_partial; - static uint _prev_conc_partial; static uint _prev_stw_traversal; static uint _prev_conc_traversal; static uint _prev_conc_update_ref; @@ -62,12 +60,6 @@ // Calculate workers for parallel degenerated gc static uint calc_workers_for_stw_degenerated(); - // Calculate workers for Stop-the-world partial GC - static uint calc_workers_for_stw_partial(); - - // Calculate workers for concurrent partial GC - static uint calc_workers_for_conc_partial(); - // Calculate workers for Stop-the-world traversal GC static uint calc_workers_for_stw_traversal(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -304,10 +304,6 @@ diagnostic(bool, ShenandoahSATBBarrier, true, \ "Turn on/off SATB barriers in Shenandoah") \ \ - diagnostic(bool, ShenandoahConditionalSATBBarrier, false, \ - "Generate additional conc-mark-in-progress checks around SATB" \ - " barrier") \ - \ diagnostic(bool, ShenandoahKeepAliveBarrier, true, \ "Turn on/off keep alive barriers in Shenandoah") \ \ @@ -323,12 +319,6 @@ diagnostic(bool, ShenandoahStoreValEnqueueBarrier, false, \ "Turn on/off enqueuing of oops for storeval barriers") \ \ - diagnostic(bool, ShenandoahMWF, false, \ - "Turn on/off enqueuing of oops after write barriers (MWF)") \ - \ - diagnostic(bool, ShenandoahStoreValWriteBarrier, false, \ - "Turn on/off store val write barriers in Shenandoah") \ - \ diagnostic(bool, ShenandoahStoreValReadBarrier, true, \ "Turn on/off store val read barriers in Shenandoah") \ \ @@ -438,6 +428,9 @@ diagnostic(bool, ShenandoahAllowMixedAllocs, true, \ "Allow mixing mutator and collector allocations in a single " \ "region") \ + \ + diagnostic(bool, ShenandoahRecycleClearsBitmap, false, \ + "Recycling a region also clears the marking bitmap") \ #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_specialized_oop_closures.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_specialized_oop_closures.hpp --- a/src/hotspot/share/gc/shenandoah/shenandoah_specialized_oop_closures.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_specialized_oop_closures.hpp @@ -34,7 +34,6 @@ class ShenandoahMarkRefsMetadataDedupClosure; class ShenandoahUpdateHeapRefsClosure; class ShenandoahUpdateHeapRefsMatrixClosure; -class ShenandoahPartialEvacuateUpdateHeapClosure; class ShenandoahTraversalClosure; class ShenandoahTraversalMetadataClosure; class ShenandoahTraversalDedupClosure; @@ -49,7 +48,6 @@ f(ShenandoahUpdateHeapRefsMatrixClosure,_nv) \ f(ShenandoahTraversalClosure,_nv) \ f(ShenandoahTraversalMetadataClosure,_nv) \ - f(ShenandoahPartialEvacuateUpdateHeapClosure,_nv) \ f(ShenandoahMarkUpdateRefsDedupClosure,_nv) \ f(ShenandoahMarkUpdateRefsMetadataDedupClosure,_nv) \ f(ShenandoahMarkRefsDedupClosure,_nv) \ diff --git a/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.cpp b/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.cpp --- a/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.cpp +++ b/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.cpp @@ -28,7 +28,6 @@ #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahMarkCompact.hpp" -#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahTraversalGC.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahVerifier.hpp" @@ -73,16 +72,6 @@ ShenandoahHeap::heap()->entry_degenerated(_point); } -void VM_ShenandoahInitPartialGC::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::MINOR); - ShenandoahHeap::heap()->entry_init_partial(); -} - -void VM_ShenandoahFinalPartialGC::doit() { - ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::MINOR); - ShenandoahHeap::heap()->entry_final_partial(); -} - void VM_ShenandoahInitTraversalGC::doit() { ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::OTHER); ShenandoahHeap::heap()->entry_init_traversal(); diff --git a/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.hpp b/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.hpp --- a/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.hpp +++ b/src/hotspot/share/gc/shenandoah/vm_operations_shenandoah.hpp @@ -37,8 +37,6 @@ // - VM_ShenandoahInitUpdateRefs: initiate update references // - VM_ShenandoahFinalUpdateRefs: finish up update references // - VM_ShenandoahFullGC: do full GC -// - VM_ShenandoahInitPartialGC: init partial GC -// - VM_ShenandoahFinalPartialGC: finish partial GC // - VM_ShenandoahInitTraversalGC: init traversal GC // - VM_ShenandoahFinalTraversalGC: finish traversal GC @@ -117,22 +115,6 @@ bool marks_nmethods() { return false; } }; -class VM_ShenandoahInitPartialGC: public VM_ShenandoahOperation { -public: - VM_ShenandoahInitPartialGC() : VM_ShenandoahOperation() {}; - VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitPartialGC; } - const char* name() const { return "Shenandoah Init Partial Collection"; } - virtual void doit(); -}; - -class VM_ShenandoahFinalPartialGC: public VM_ShenandoahOperation { -public: - VM_ShenandoahFinalPartialGC() : VM_ShenandoahOperation() {}; - VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalPartialGC; } - const char* name() const { return "Shenandoah Final Partial Collection"; } - virtual void doit(); -}; - class VM_ShenandoahInitTraversalGC: public VM_ShenandoahOperation { public: VM_ShenandoahInitTraversalGC() : VM_ShenandoahOperation() {}; diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -4098,7 +4098,7 @@ } static void g1_write_barrier_pre_helper(const GraphKit& kit, Node* adr) { - if (UseShenandoahGC && (ShenandoahSATBBarrier || ShenandoahConditionalSATBBarrier) && adr != NULL) { + if (UseShenandoahGC && ShenandoahSATBBarrier && adr != NULL) { Node* c = kit.control(); Node* call = c->in(1)->in(1)->in(1)->in(0); assert(call->is_g1_wb_pre_call(), "g1_wb_pre call expected"); @@ -4336,39 +4336,6 @@ shenandoah_update_matrix(adr, val); } - if (ShenandoahConditionalSATBBarrier) { - enum { _set_path = 1, _not_set_path, PATH_LIMIT }; - RegionNode* region = new RegionNode(PATH_LIMIT); - Node* prev_mem = memory(Compile::AliasIdxRaw); - Node* memphi = PhiNode::make(region, prev_mem, Type::MEMORY, TypeRawPtr::BOTTOM); - - Node* gc_state_addr_p = _gvn.transform(new CastX2PNode(MakeConX((intptr_t) ShenandoahHeap::gc_state_addr()))); - Node* gc_state_addr = _gvn.transform(new AddPNode(top(), gc_state_addr_p, MakeConX(0))); - Node* gc_state = _gvn.transform(LoadNode::make(_gvn, control(), memory(Compile::AliasIdxRaw), gc_state_addr, TypeRawPtr::BOTTOM, TypeInt::INT, T_BYTE, MemNode::unordered)); - Node* add_set = _gvn.transform(new AddINode(gc_state, intcon(ShenandoahHeap::MARKING))); - Node* cmp_set = _gvn.transform(new CmpINode(add_set, intcon(0))); - Node* cmp_set_bool = _gvn.transform(new BoolNode(cmp_set, BoolTest::eq)); - IfNode* cmp_iff = create_and_map_if(control(), cmp_set_bool, PROB_MIN, COUNT_UNKNOWN); - Node* if_not_set = _gvn.transform(new IfTrueNode(cmp_iff)); - Node* if_set = _gvn.transform(new IfFalseNode(cmp_iff)); - - // Conc-mark not in progress. Skip SATB barrier. - set_control(if_not_set); - region->init_req(_not_set_path, control()); - memphi->init_req(_not_set_path, prev_mem); - - // Conc-mark in progress. Do the SATB barrier. - set_control(if_set); - g1_write_barrier_pre(do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); - region->init_req(_set_path, control()); - memphi->init_req(_set_path, memory(Compile::AliasIdxRaw)); - - // Merge control flow and memory. - set_control(_gvn.transform(region)); - record_for_igvn(region); - set_memory(_gvn.transform(memphi), Compile::AliasIdxRaw); - - } if (ShenandoahSATBBarrier) { g1_write_barrier_pre(do_load, obj, adr, alias_idx, val, val_type, pre_val, bt); } @@ -4888,10 +4855,8 @@ Node* GraphKit::shenandoah_storeval_barrier(Node* obj) { if (UseShenandoahGC) { - if (ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahStoreValEnqueueBarrier) { obj = shenandoah_write_barrier(obj); - } - if (ShenandoahStoreValEnqueueBarrier && !ShenandoahMWF) { shenandoah_enqueue_barrier(obj); } if (ShenandoahStoreValReadBarrier) { @@ -4967,11 +4932,7 @@ Node* GraphKit::shenandoah_write_barrier(Node* obj) { if (UseShenandoahGC && ShenandoahWriteBarrier) { - obj = shenandoah_write_barrier_impl(obj); - if (ShenandoahStoreValEnqueueBarrier && ShenandoahMWF) { - shenandoah_enqueue_barrier(obj); - } - return obj; + return shenandoah_write_barrier_impl(obj); } else { return obj; } diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -2316,7 +2316,7 @@ // runtime filters that guard the pre-barrier code. // Also add memory barrier for non volatile load from the referent field // to prevent commoning of loads across safepoint. - if (!(UseG1GC || (UseShenandoahGC && (ShenandoahSATBBarrier || ShenandoahConditionalSATBBarrier))) && !need_mem_bar) + if (!(UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) && !need_mem_bar) return; // Some compile time checks. diff --git a/src/hotspot/share/opto/shenandoahSupport.cpp b/src/hotspot/share/opto/shenandoahSupport.cpp --- a/src/hotspot/share/opto/shenandoahSupport.cpp +++ b/src/hotspot/share/opto/shenandoahSupport.cpp @@ -581,7 +581,7 @@ return false; } in2 = in1->in(2); - if (in2->find_int_con(-1) != (ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL)) { + if (in2->find_int_con(-1) != (ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL)) { return false; } in1 = in1->in(1); @@ -3608,7 +3608,7 @@ Node* gc_state = new LoadUBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered); phase->register_new_node(gc_state, ctrl); - Node* evacuation_in_progress = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::EVACUATION | ShenandoahHeap::PARTIAL | ShenandoahHeap::TRAVERSAL)); + Node* evacuation_in_progress = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL)); phase->register_new_node(evacuation_in_progress, ctrl); Node* evacuation_in_progress_cmp = new CmpINode(evacuation_in_progress, phase->igvn().zerocon(T_INT)); phase->register_new_node(evacuation_in_progress_cmp, ctrl); diff --git a/src/hotspot/share/opto/shenandoahSupport.hpp b/src/hotspot/share/opto/shenandoahSupport.hpp --- a/src/hotspot/share/opto/shenandoahSupport.hpp +++ b/src/hotspot/share/opto/shenandoahSupport.hpp @@ -139,15 +139,13 @@ ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj) : ShenandoahBarrierNode(ctrl, mem, obj, true) { assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || - ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || - ShenandoahAcmpBarrier), + ShenandoahWriteBarrier || ShenandoahAcmpBarrier), "should be enabled"); } ShenandoahReadBarrierNode(Node* ctrl, Node* mem, Node* obj, bool allow_fromspace) : ShenandoahBarrierNode(ctrl, mem, obj, allow_fromspace) { assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || - ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || - ShenandoahAcmpBarrier), + ShenandoahWriteBarrier || ShenandoahAcmpBarrier), "should be enabled"); } @@ -172,7 +170,7 @@ public: ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj) : ShenandoahBarrierNode(ctrl, mem, obj, false) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier), "should be enabled"); + assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled"); C->add_shenandoah_barrier(this); } @@ -241,7 +239,7 @@ public: enum {SWBMEMPROJCON = (uint)-3}; ShenandoahWBMemProjNode(Node *src) : ProjNode( src, SWBMEMPROJCON) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier), "should be enabled"); + assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled"); assert(src->Opcode() == Op_ShenandoahWriteBarrier || src->is_Mach(), "epxect wb"); } virtual Node* Identity(PhaseGVN* phase); diff --git a/src/hotspot/share/runtime/vm_operations.hpp b/src/hotspot/share/runtime/vm_operations.hpp --- a/src/hotspot/share/runtime/vm_operations.hpp +++ b/src/hotspot/share/runtime/vm_operations.hpp @@ -102,8 +102,6 @@ template(ShenandoahInitMark) \ template(ShenandoahFinalMarkStartEvac) \ template(ShenandoahFinalEvac) \ - template(ShenandoahInitPartialGC) \ - template(ShenandoahFinalPartialGC) \ template(ShenandoahInitTraversalGC) \ template(ShenandoahFinalTraversalGC) \ template(ShenandoahInitUpdateRefs) \ diff --git a/src/hotspot/share/utilities/bitMap.cpp b/src/hotspot/share/utilities/bitMap.cpp --- a/src/hotspot/share/utilities/bitMap.cpp +++ b/src/hotspot/share/utilities/bitMap.cpp @@ -672,6 +672,28 @@ prefix, p2i(map()), p2i((char*)map() + (size() >> LogBitsPerByte))); } +void BitMap::copy_from(BitMap& other, idx_t start_bit, idx_t end_bit) { + // Copy prefix. + while (bit_in_word(start_bit) != 0 && start_bit < end_bit) { + tty->print_cr("prefix: "SIZE_FORMAT, start_bit); + at_put(start_bit, other.at(start_bit)); + start_bit++; + } + // Copy suffix. + while (bit_in_word(end_bit) != 0 && end_bit > start_bit) { + end_bit--; + at_put(end_bit, other.at(end_bit)); + tty->print_cr("suffix: "SIZE_FORMAT, end_bit); + } + + assert(bit_in_word(start_bit) == 0, "can only handle aligned copy for now, bit: "SIZE_FORMAT, bit_in_word(start_bit)); + assert(bit_in_word(end_bit) == 0, "can only handle aligned copy for now, bit: "SIZE_FORMAT, bit_in_word(end_bit)); + + idx_t start_word = word_index(start_bit); + idx_t end_word = word_index(end_bit); + Copy::conjoint_jbytes(other._map + start_word, _map + start_word, (end_word - start_word) * sizeof(bm_word_t)); +} + #ifndef PRODUCT void BitMap::print_on(outputStream* st) const { diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -236,6 +236,9 @@ void clear_large(); inline void clear(); + // Copying + void copy_from(BitMap& other, size_t start, size_t end); + // Iteration support. Returns "true" if the iteration completed, false // if the iteration terminated early (because the closure "blk" returned // false). diff --git a/test/hotspot/gtest/utilities/test_bitMap_copy.cpp b/test/hotspot/gtest/utilities/test_bitMap_copy.cpp new file mode 100644 --- /dev/null +++ b/test/hotspot/gtest/utilities/test_bitMap_copy.cpp @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/resourceArea.hpp" +#include "utilities/bitMap.inline.hpp" +#include "unittest.hpp" + +class BitMapCopyTest { + + template + static void fillBitMap(ResizableBitMapClass& map) { + map.set_bit(0); + map.set_bit(1); + map.set_bit(3); + map.set_bit(17); + map.set_bit(512); + } + +public: + static void testcopy0() { + BitMap::idx_t size = 1024; + ResourceMark rm; + ResourceBitMap map1(size); + fillBitMap(map1); + + ResourceBitMap map2(size); + map2.copy_from(map1, 0, 0); + EXPECT_TRUE(map2.is_empty()); + EXPECT_TRUE(map2.count_one_bits() == 0); + } + + static void testcopy1() { + BitMap::idx_t size = 1024; + ResourceMark rm; + ResourceBitMap map1(size); + fillBitMap(map1); + + ResourceBitMap map2(size); + map2.copy_from(map1, 0, 1); + EXPECT_TRUE(map2.at(0)); + EXPECT_TRUE(map2.count_one_bits() == 1); + + } + + static void testcopy4() { + BitMap::idx_t size = 1024; + ResourceMark rm; + ResourceBitMap map1(size); + map1.set_range(0, 1024); + + ResourceBitMap map2(size); + map2.copy_from(map1, 6, 10); + EXPECT_FALSE(map2.at(5)); + EXPECT_TRUE(map2.at(6)); + EXPECT_TRUE(map2.at(7)); + EXPECT_TRUE(map2.at(8)); + EXPECT_TRUE(map2.at(9)); + EXPECT_FALSE(map2.at(10)); + EXPECT_TRUE(map2.count_one_bits() == 4); + + } + + static void testcopy8() { + BitMap::idx_t size = 1024; + ResourceMark rm; + ResourceBitMap map1(size); + map1.set_range(0, 1024); + + ResourceBitMap map2(size); + map2.copy_from(map1, 0, 8); + EXPECT_TRUE(map2.at(0)); + EXPECT_TRUE(map2.at(1)); + EXPECT_TRUE(map2.at(2)); + EXPECT_TRUE(map2.at(3)); + EXPECT_TRUE(map2.at(4)); + EXPECT_TRUE(map2.at(5)); + EXPECT_TRUE(map2.at(6)); + EXPECT_TRUE(map2.at(7)); + EXPECT_FALSE(map2.at(8)); + EXPECT_TRUE(map2.count_one_bits() == 8); + + } + + static void testcopy100() { + BitMap::idx_t size = 1024; + ResourceMark rm; + ResourceBitMap map1(size); + map1.set_range(0, 1024); + + ResourceBitMap map2(size); + map2.copy_from(map1, 48, 148); + EXPECT_FALSE(map2.at(47)); + EXPECT_TRUE(map2.at(48)); + EXPECT_TRUE(map2.at(147)); + EXPECT_FALSE(map2.at(148)); + EXPECT_TRUE(map2.count_one_bits() == 100); + + } + + static void testcopyall() { + BitMap::idx_t size = 1024; + ResourceMark rm; + ResourceBitMap map1(size); + fillBitMap(map1); + + ResourceBitMap map2(size); + map2.set_range(0, 512); + map2.copy_from(map1, 0, 1024); + EXPECT_TRUE(map2.at(0)); + EXPECT_TRUE(map2.at(1)); + EXPECT_TRUE(map2.at(3)); + EXPECT_TRUE(map2.at(17)); + EXPECT_TRUE(map2.at(512)); + EXPECT_TRUE(map2.count_one_bits() == 5); + + } + +}; + +TEST_VM(BitMap, copy0) { + BitMapCopyTest::testcopy0(); +} + +TEST_VM(BitMap, copy1) { + BitMapCopyTest::testcopy1(); +} + +TEST_VM(BitMap, copy4) { + BitMapCopyTest::testcopy4(); +} + +TEST_VM(BitMap, copy8) { + BitMapCopyTest::testcopy8(); +} + +TEST_VM(BitMap, copy100) { + BitMapCopyTest::testcopy100(); +} + +TEST_VM(BitMap, copyall) { + BitMapCopyTest::testcopyall(); +} diff --git a/test/hotspot/jtreg/gc/shenandoah/TestSelectiveBarrierFlags.java b/test/hotspot/jtreg/gc/shenandoah/TestSelectiveBarrierFlags.java --- a/test/hotspot/jtreg/gc/shenandoah/TestSelectiveBarrierFlags.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestSelectiveBarrierFlags.java @@ -39,11 +39,11 @@ public static void main(String[] args) throws Exception { String[][] opts = { - new String[]{ "ShenandoahSATBBarrier", "ShenandoahConditionalSATBBarrier" }, + new String[]{ "ShenandoahSATBBarrier" }, new String[]{ "ShenandoahKeepAliveBarrier" }, new String[]{ "ShenandoahWriteBarrier" }, new String[]{ "ShenandoahReadBarrier" }, - new String[]{ "ShenandoahStoreValWriteBarrier", "ShenandoahStoreValReadBarrier", "ShenandoahStoreValEnqueueBarrier" }, + new String[]{ "ShenandoahStoreValReadBarrier", "ShenandoahStoreValEnqueueBarrier" }, new String[]{ "ShenandoahCASBarrier" }, new String[]{ "ShenandoahAcmpBarrier" }, new String[]{ "ShenandoahCloneBarrier" },