# HG changeset patch # User rkennke # Date 1489429383 -3600 # Mon Mar 13 19:23:03 2017 +0100 # Node ID 39e89fa3843c0912a120e007974526801627e6e3 # Parent cba65b85b10ce1286d0c2683f28147c5e4aab259 [mq]: partial.patch diff --git a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp --- a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -26,6 +26,7 @@ #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahPhaseTimes.hpp" #include "runtime/os.hpp" @@ -57,6 +58,8 @@ size_t _bytes_allocated_start_CM; size_t _bytes_allocated_during_CM; + size_t _bytes_allocated_after_last_gc; + uint _cancelled_cm_cycles_in_a_row; uint _successful_cm_cycles_in_a_row; @@ -72,6 +75,14 @@ void record_bytes_start_CM(size_t bytes); void record_bytes_end_CM(size_t bytes); + void record_gc_start() { + // Do nothing. + } + + void record_gc_end() { + _bytes_allocated_after_last_gc = ShenandoahHeap::heap()->used(); + } + size_t bytes_in_cset() const { return _bytes_in_cset; } virtual void print_thresholds() { @@ -79,6 +90,10 @@ virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0; + virtual bool should_start_partial_gc() { + return false; + } + virtual bool handover_cancelled_marking() { return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold; } @@ -134,6 +149,7 @@ _bytes_reclaimed_this_cycle(0), _bytes_allocated_start_CM(0), _bytes_allocated_during_CM(0), + _bytes_allocated_after_last_gc(0), _bytes_in_cset(0), _cancelled_cm_cycles_in_a_row(0), _successful_cm_cycles_in_a_row(0), @@ -274,6 +290,14 @@ _timing_data[phase]._secs.add(elapsed); } +void ShenandoahCollectorPolicy::record_gc_start() { + _heuristics->record_gc_start(); +} + +void ShenandoahCollectorPolicy::record_gc_end() { + _heuristics->record_gc_end(); +} + void ShenandoahCollectorPolicy::report_concgc_cancelled() { } @@ -632,6 +656,32 @@ return false; } }; +class PartialHeuristics : public AdaptiveHeuristics { +public: + PartialHeuristics() : AdaptiveHeuristics() { + if (FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) { + FLAG_SET_DEFAULT(ShenandoahAllocationThreshold, 5); + } + FLAG_SET_DEFAULT(UseShenandoahMatrix, true); + // TODO: Disable this optimization for now, as it also requires the matrix barriers. + FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0); + } + + virtual ~PartialHeuristics() {} + + bool should_start_concurrent_mark(size_t used, size_t capacity) const { + // Never do concurrent GCs. + return false; + } + + bool should_start_partial_gc() { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t capacity = heap->capacity(); + + size_t used = heap->used(); + return (used - _bytes_allocated_after_last_gc) * 100 / capacity > ShenandoahAllocationThreshold; + } +}; ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : _cycle_counter(0), @@ -746,6 +796,9 @@ } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) { log_info(gc, init)("Shenandoah heuristics: connections"); _heuristics = new ConnectionHeuristics(); + } else if (strcmp(ShenandoahGCHeuristics, "partial") == 0) { + log_info(gc, init)("Shenandoah heuristics: partial GC"); + _heuristics = new PartialHeuristics(); } else { vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); } @@ -1065,3 +1118,6 @@ application_workers, 0, active_workers_by_liveset); } +bool ShenandoahCollectorPolicy::should_start_partial_gc() { + return _heuristics->should_start_partial_gc(); +} diff --git a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp --- a/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.hpp @@ -165,6 +165,9 @@ void post_heap_initialize(); + void record_gc_start(); + void record_gc_end(); + void record_phase_start(TimingPhase phase); void record_phase_end(TimingPhase phase); @@ -181,6 +184,7 @@ void record_bytes_start_CM(size_t bytes); void record_bytes_end_CM(size_t bytes); bool should_start_concurrent_mark(size_t used, size_t capacity); + bool should_start_partial_gc(); bool handover_cancelled_marking(); void record_cm_cancelled(); diff --git a/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp --- a/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.cpp @@ -53,6 +53,8 @@ break; } else if (is_full_gc()) { service_fullgc_cycle(); + } else if (heap->shenandoahPolicy()->should_start_partial_gc()) { + service_partial_cycle(); } else if (heap->shenandoahPolicy()->should_start_concurrent_mark(heap->used(), heap->capacity())) { service_normal_cycle(); if (heap->is_evacuation_in_progress()) { @@ -73,6 +75,12 @@ } } +void ShenandoahConcurrentThread::service_partial_cycle() { + GCIdMark gc_id_mark; + VM_ShenandoahPartialGC partial_gc; + VMThread::execute(&partial_gc); +} + void ShenandoahConcurrentThread::service_normal_cycle() { if (check_cancellation()) return; diff --git a/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.hpp b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.hpp --- a/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahConcurrentThread.hpp @@ -52,6 +52,7 @@ bool check_cancellation(); void service_normal_cycle(); void service_fullgc_cycle(); + void service_partial_cycle(); public: // Constructor diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp @@ -41,6 +41,7 @@ #include "gc/shenandoah/shenandoahHumongous.hpp" #include "gc/shenandoah/shenandoahMarkCompact.hpp" #include "gc/shenandoah/shenandoahMonitoringSupport.hpp" +#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahRootProcessor.hpp" #include "gc/shenandoah/vm_operations_shenandoah.hpp" @@ -264,6 +265,7 @@ _next_mark_bit_map = &_mark_bit_map1; _connection_matrix = new ShenandoahConnectionMatrix(_max_regions); + _partial_gc = new ShenandoahPartialGC(this, _max_regions); _monitoring_support = new ShenandoahMonitoringSupport(this); @@ -2527,6 +2529,18 @@ return _connection_matrix; } +ShenandoahPartialGC* ShenandoahHeap::partial_gc() { + return _partial_gc; +} + +void ShenandoahHeap::do_partial_collection() { + { + ShenandoahHeapLock lock(this); + partial_gc()->prepare(); + } + partial_gc()->do_partial_collection(); +} + #ifdef ASSERT void ShenandoahHeap::assert_heaplock_owned_by_current_thread() { assert(_heap_lock == locked, "must be locked"); diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp @@ -37,6 +37,7 @@ class ShenandoahCollectionSet; class ShenandoahFreeSet; class ShenandoahConcurrentMark; +class ShenandoahPartialGC; class ShenandoahConcurrentThread; class ShenandoahMonitoringSupport; @@ -144,8 +145,7 @@ ShenandoahHeapRegion* _currentAllocationRegion; ShenandoahConcurrentMark* _scm; - - + ShenandoahPartialGC* _partial_gc; ShenandoahConcurrentThread* _concurrent_gc_thread; @@ -299,6 +299,7 @@ void prepare_for_concurrent_evacuation(); void evacuate_and_update_roots(); + void do_partial_collection(); void verify_matrix(); private: void set_evacuation_in_progress(bool in_progress); @@ -342,6 +343,7 @@ ShenandoahHeapRegionSet* regions() { return _ordered_regions;} ShenandoahFreeSet* free_regions(); + ShenandoahCollectionSet* collection_set() { return _collection_set; } void clear_free_regions(); void add_free_region(ShenandoahHeapRegion* r); @@ -392,6 +394,7 @@ ShenandoahMonitoringSupport* monitoring_support(); ShenandoahConcurrentMark* concurrentMark() { return _scm;} + ShenandoahPartialGC* partial_gc(); ReferenceProcessor* ref_processor() { return _ref_processor;} @@ -405,6 +408,7 @@ void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions = false, bool skip_humongous_continuation = false) const; void verify_heap_after_evacuation(); + void verify_heap_after_marking(); // Delete entries for dead interned string and clean up unreferenced symbols // in symbol table, possibly in parallel. @@ -448,9 +452,11 @@ void parallel_evacuate(); +public: inline oop atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c); inline oop atomic_compare_exchange_oop(oop n, oop* addr, oop c); +private: void evacuate_region(ShenandoahHeapRegion* from_region, ShenandoahHeapRegion* to_region); #ifdef ASSERT @@ -460,14 +466,13 @@ inline void copy_object(oop p, HeapWord* s, size_t words); void verify_copy(oop p, oop c); void verify_heap_size_consistency(); - void verify_heap_after_marking(); +private: void verify_heap_after_update_refs(); void verify_regions_after_update_refs(); void ref_processing_init(); GCTracer* tracer(); - ShenandoahCollectionSet* collection_set() { return _collection_set; } bool call_from_write_barrier(bool evacuating); void grow_heap_by(size_t num_regions); diff --git a/src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp b/src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp --- a/src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegion.cpp @@ -212,6 +212,17 @@ return NULL; // all done } +void ShenandoahHeapRegion::oop_iterate(ExtendedOopClosure* blk) { + if (is_empty()) return; + HeapWord* obj_addr = bottom() + BrooksPointer::word_size(); + HeapWord* t = top(); + // Could call objects iterate, but this is easier. + while (obj_addr < t) { + oop obj = oop(obj_addr); + obj_addr += obj->oop_iterate_size(blk) + BrooksPointer::word_size(); + } +} + void ShenandoahHeapRegion::fill_region() { ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap(); diff --git a/src/share/vm/gc/shenandoah/shenandoahHeapRegion.hpp b/src/share/vm/gc/shenandoah/shenandoahHeapRegion.hpp --- a/src/share/vm/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegion.hpp @@ -89,6 +89,7 @@ void object_iterate_interruptible(ObjectClosure* blk, bool allow_cancel); HeapWord* object_iterate_careful(ObjectClosureCareful* cl); + void oop_iterate(ExtendedOopClosure* cl); HeapWord* block_start_const(const void* p) const; diff --git a/src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.cpp b/src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.cpp --- a/src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeapRegionSet.cpp @@ -184,7 +184,7 @@ bool ShenandoahHeapRegionSet::contains(ShenandoahHeapRegion* r) { FindRegionClosure cl(r); - unclaimed_heap_region_iterate(&cl); + active_heap_region_iterate(&cl); return cl.result(); } diff --git a/src/share/vm/gc/shenandoah/shenandoahMarkCompact.cpp b/src/share/vm/gc/shenandoah/shenandoahMarkCompact.cpp --- a/src/share/vm/gc/shenandoah/shenandoahMarkCompact.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahMarkCompact.cpp @@ -227,6 +227,13 @@ policy->record_phase_end(ShenandoahCollectorPolicy::full_gc); oopDesc::set_bs(old_bs); + + if (UseShenandoahMatrix) { + if (PrintShenandoahMatrix) { + outputStream* log = Log(gc)::info_stream(); + _heap->connection_matrix()->print_on(log); + } + } } #ifdef ASSERT @@ -494,7 +501,7 @@ class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure { private: ShenandoahHeap* _heap; - + uint _from_idx; public: ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()) { @@ -509,6 +516,13 @@ assert(_heap->is_marked_complete(obj), "must be marked"); oop forw = oop(BrooksPointer::get_raw(obj)); oopDesc::encode_store_heap_oop(p, forw); + if (UseShenandoahMatrix) { + if (_heap->is_in_reserved(p)) { + assert(_heap->is_in_reserved(forw), "must be in heap"); + uint to_idx = _heap->heap_region_index_containing(forw); + _heap->connection_matrix()->set_connected(_from_idx, to_idx, true); + } + } } } public: @@ -518,19 +532,25 @@ void do_oop(narrowOop* p) { do_oop_work(p); } + void set_from_idx(uint from_idx) { + _from_idx = from_idx; + } }; class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { private: - ShenandoahAdjustPointersClosure* _cl; + ShenandoahAdjustPointersClosure _cl; ShenandoahHeap* _heap; public: - ShenandoahAdjustPointersObjectClosure(ShenandoahAdjustPointersClosure* cl) : - _cl(cl), _heap(ShenandoahHeap::heap()) { + ShenandoahAdjustPointersObjectClosure() : + _heap(ShenandoahHeap::heap()) { } void do_object(oop p) { assert(_heap->is_marked_complete(p), "must be marked"); - p->oop_iterate(_cl); + oop forw = oop(BrooksPointer::get_raw(p)); + uint from_idx = _heap->heap_region_index_containing(forw); + _cl.set_from_idx(from_idx); + p->oop_iterate(&_cl); } }; @@ -547,8 +567,7 @@ void work(uint worker_id) { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahHeapRegion* r = _regions->claim_next(); - ShenandoahAdjustPointersClosure cl; - ShenandoahAdjustPointersObjectClosure obj_cl(&cl); + ShenandoahAdjustPointersObjectClosure obj_cl; while (r != NULL) { if (! r->is_humongous_continuation()) { heap->marked_object_iterate(r, &obj_cl); @@ -585,6 +604,10 @@ GCTraceTime(Info, gc, phases) time("Phase 2: Adjust pointers", _gc_timer); ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (UseShenandoahMatrix) { + heap->connection_matrix()->clear_all(); + } + // Need cleared claim bits for the roots processing ClassLoaderDataGraph::clear_claimed_marks(); diff --git a/src/share/vm/gc/shenandoah/shenandoahPartialGC.cpp b/src/share/vm/gc/shenandoah/shenandoahPartialGC.cpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc/shenandoah/shenandoahPartialGC.cpp @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/workgroup.hpp" +#include "gc/shared/taskqueue.inline.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahHeapRegionSet.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahPartialGC.hpp" +#include "gc/shenandoah/shenandoahRootProcessor.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" +#include "memory/iterator.hpp" + +class PartialEvacuateUpdateRootsClosure : public OopClosure { + ShenandoahPartialGC* _partial_gc; + Thread* _thread; + SCMObjToScanQueue* _queue; +private: + template + void do_oop_work(T* p) { _partial_gc->process_oop(p, _thread, _queue, false); } +public: + PartialEvacuateUpdateRootsClosure(SCMObjToScanQueue* q) : + _partial_gc(ShenandoahHeap::heap()->partial_gc()), + _thread(Thread::current()), _queue(q) {} + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class PartialEvacuateUpdateHeapClosure : public ExtendedOopClosure { + ShenandoahPartialGC* _partial_gc; + Thread* _thread; + SCMObjToScanQueue* _queue; +private: + template + void do_oop_work(T* p) { + _partial_gc->process_oop(p, _thread, _queue, true); + } +public: + PartialEvacuateUpdateHeapClosure(SCMObjToScanQueue* q) : + _partial_gc(ShenandoahHeap::heap()->partial_gc()), + _thread(Thread::current()), _queue(q) {} + void do_oop(oop* p) { do_oop_work(p); } + void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class ShenandoahPartialCollectionTask : public AbstractGangTask { +private: + ShenandoahRootProcessor* _rp; + ShenandoahHeapRegionSet* _root_regions; + ShenandoahHeap* _heap; +public: + ShenandoahPartialCollectionTask(ShenandoahRootProcessor* rp, + ShenandoahHeapRegionSet* root_regions) : + AbstractGangTask("Shenandoah Partial Collection"), + _rp(rp), _root_regions(root_regions), + _heap(ShenandoahHeap::heap()) {} + + void work(uint worker_id) { + SCMObjToScanQueueSet* queues = _heap->partial_gc()->task_queues(); + SCMObjToScanQueue* q = queues->queue(worker_id); + { + // First process ordinary GC roots. + PartialEvacuateUpdateRootsClosure roots_cl(q); + CLDToOopClosure cld_cl(&roots_cl); + MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); + _rp->process_all_roots(&roots_cl, &roots_cl, &cld_cl, &code_cl, worker_id); + } + drain_queue(worker_id); + if (_heap->cancelled_concgc()) { q->set_empty(); return; } + { + // Then process root regions. + PartialEvacuateUpdateHeapClosure cl(q); + ShenandoahHeapRegion* r = _root_regions->claim_next(); + while (r != NULL) { + r->oop_iterate(&cl); + drain_queue(worker_id); + if (_heap->cancelled_concgc()) { q->set_empty(); return; } + r = _root_regions->claim_next(); + } + } + } + + void drain_queue(uint worker_id) { + SCMObjToScanQueueSet* queues = _heap->partial_gc()->task_queues(); + SCMObjToScanQueue* q = queues->queue(worker_id); + PartialEvacuateUpdateHeapClosure cl(q); + // Empty queue if necessary. + int seed = 17; + SCMTask task; + while ((q->pop_buffer(task) || + q->pop_local(task) || + q->pop_overflow(task)) && + !_heap->cancelled_concgc()) { + oop obj = task.obj(); + assert(! oopDesc::is_null(obj), "must not be null"); + obj->oop_iterate(&cl); + } + } + +}; + +ShenandoahPartialGC::ShenandoahPartialGC(ShenandoahHeap* heap, uint max_regions) : + _heap(heap), + _root_regions(new ShenandoahHeapRegionSet(max_regions)), + _task_queues(new SCMObjToScanQueueSet(heap->max_workers())) { + + uint num_queues = heap->max_workers(); + for (uint i = 0; i < num_queues; ++i) { + SCMObjToScanQueue* task_queue = new SCMObjToScanQueue(); + task_queue->initialize(); + _task_queues->register_queue(i, task_queue); + } + +} + +ShenandoahHeapRegionSet* ShenandoahPartialGC::root_regions() { + return _root_regions; +} + +void ShenandoahPartialGC::prepare() { + _heap->collection_set()->clear(); + assert(_heap->collection_set()->count() == 0, "collection set not clear"); + + _heap->ensure_parsability(true); + + ShenandoahConnectionMatrix* matrix = _heap->connection_matrix(); + ShenandoahHeapRegionSet* regions = _heap->regions(); + ShenandoahCollectionSet* collection_set = _heap->collection_set(); + ShenandoahFreeSet* free_set = _heap->free_regions(); + free_set->clear(); + _root_regions->clear(); + assert(_root_regions->count() == 0, "must be cleared"); + uint num_regions = _heap->num_regions(); + + // First pass: find collection set. + for (uint to_idx = 0; to_idx < num_regions; to_idx++) { + ShenandoahHeapRegion* region = regions->get(to_idx); + if (region->is_humongous() || region->is_empty() || region->is_pinned()) continue; + assert(! _heap->region_in_collection_set(to_idx), "must not be in cset yet"); + uint num_incoming = 0; + for (uint from_idx = 0; from_idx < num_regions; from_idx++) { + if (matrix->is_connected(from_idx, to_idx)) { + num_incoming++; + } + } + if (num_incoming < ShenandoahPartialInboundThreshold) { + collection_set->add_region(region); + _heap->set_region_in_collection_set(to_idx, true); + } + } + // Second pass: find all root regions. + for (uint to_idx = 0; to_idx < num_regions; to_idx++) { + ShenandoahHeapRegion* region = regions->get(to_idx); + if (region->is_humongous() || region->is_empty() || region->is_pinned()) continue; + if (_heap->region_in_collection_set(to_idx)) { + for (uint from_idx = 0; from_idx < num_regions; from_idx++) { + if (matrix->is_connected(from_idx, to_idx)) { + ShenandoahHeapRegion* r = regions->get(from_idx); + if (! _root_regions->contains(r)) { + _root_regions->add_region(r); + } + } + } + } + } + // Final pass: free regions. + for (uint to_idx = 0; to_idx < num_regions; to_idx++) { + ShenandoahHeapRegion* region = regions->get(to_idx); + if (! region->is_humongous() && + ! region->is_pinned() && + ! _root_regions->contains(region) && + ! _heap->in_collection_set(region)) { + + free_set->add_region(region); + } + } + log_debug(gc, ergo)("got "SIZE_FORMAT" cset regions", collection_set->count()); + log_debug(gc, ergo)("got "SIZE_FORMAT" root regions", _root_regions->count()); +} + +void ShenandoahPartialGC::do_partial_collection() { + + _heap->gc_timer()->register_gc_start(); + { + GCTraceTime(Info, gc) time("Pause Partial", _heap->gc_timer(), GCCause::_no_gc, true); + + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + { + ClassLoaderDataGraph::clear_claimed_marks(); + ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers()); + ShenandoahPartialCollectionTask partial_task(&rp, _root_regions); + _heap->workers()->run_task(&partial_task); + } + + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + if (! _heap->cancelled_concgc()) { + ShenandoahHeap::ShenandoahHeapLock heap_lock(_heap); + uint num_cset = _heap->collection_set()->count(); + for (uint i = 0; i < num_cset; i++) { + ShenandoahHeapRegion* r = _heap->collection_set()->get(i); + _heap->decrease_used(r->used()); + r->recycle(); + _heap->free_regions()->add_region(r); + } + + reset(); + } + } + _heap->gc_timer()->register_gc_end(); +} + +void ShenandoahPartialGC::reset() { + _heap->collection_set()->clear(); + _heap->clear_cset_fast_test(); + _root_regions->clear(); +} + +template +void ShenandoahPartialGC::process_oop(T* p, Thread* thread, SCMObjToScanQueue* queue, bool update_matrix) { + T o = oopDesc::load_heap_oop(p); + if (! oopDesc::is_null(o)) { + oop obj = oopDesc::decode_heap_oop_not_null(o); + if (_heap->in_collection_set(obj)) { + oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj); + if (oopDesc::unsafe_equals(obj, forw)) { + forw = _heap->evacuate_object(obj, thread); + } + assert(! oopDesc::unsafe_equals(obj, forw) || _heap->cancelled_concgc(), "must be evacuated"); + // oopDesc::encode_store_heap_oop_not_null(p, forw); + // queue->push(forw); + // Can use simple store for GC roots. + if (oopDesc::unsafe_equals(obj, _heap->atomic_compare_exchange_oop(forw, p, obj))) { + if (update_matrix) { + uint from_idx = _heap->heap_region_index_containing(p); + uint to_idx = _heap->heap_region_index_containing(forw); + _heap->connection_matrix()->set_connected(from_idx, to_idx, true); + } + assert(forw->is_oop(), "sanity"); + bool succeeded = queue->push(SCMTask(forw)); + assert(succeeded, "must succeed to push to task queue"); + } + } + } +} + +bool ShenandoahPartialGC::is_in_root_region(oop obj) { + // TODO: make this very fast!! + ShenandoahHeapRegion* r = _heap->heap_region_containing(obj); + return _root_regions->contains(r); +} + +SCMObjToScanQueueSet* ShenandoahPartialGC::task_queues() { + return _task_queues; +} diff --git a/src/share/vm/gc/shenandoah/shenandoahPartialGC.hpp b/src/share/vm/gc/shenandoah/shenandoahPartialGC.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc/shenandoah/shenandoahPartialGC.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_HPP +#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_HPP + +#include "memory/allocation.hpp" +#include "gc/shenandoah/shenandoahTaskqueue.hpp" + +class Thread; +class ShenandoahHeapRegionSet; +class ShenandoahHeap; + +class ShenandoahPartialGC : public CHeapObj { +private: + ShenandoahHeapRegionSet* _root_regions; + ShenandoahHeap* _heap; + SCMObjToScanQueueSet* _task_queues; + +public: + ShenandoahPartialGC(ShenandoahHeap* heap, uint max_regions); + + void reset(); + + ShenandoahHeapRegionSet* root_regions(); + + void prepare(); + void do_partial_collection(); + + bool is_in_root_region(oop obj); + + template + void process_oop(T* p, Thread* thread, SCMObjToScanQueue* queue, bool update_matrix); + + SCMObjToScanQueueSet* task_queues(); +}; + +#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHPARTIALGC_HPP diff --git a/src/share/vm/gc/shenandoah/shenandoah_globals.hpp b/src/share/vm/gc/shenandoah/shenandoah_globals.hpp --- a/src/share/vm/gc/shenandoah/shenandoah_globals.hpp +++ b/src/share/vm/gc/shenandoah/shenandoah_globals.hpp @@ -136,6 +136,12 @@ "How many successful marking cycles before improving free " \ "threshold for adaptive heuristics") \ \ + experimental(uintx, ShenandoahPartialInboundThreshold, 10, \ + "Specifies how many inbound regions a region can have maximum " \ + "to be considered for collection set in partial collections.") \ + range(0, 100) \ + writeable(Always) \ + \ experimental(uint, ShenandoahMarkLoopStride, 1000, \ "How many items are processed during one marking step") \ \ diff --git a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp --- a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp +++ b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.cpp @@ -26,6 +26,7 @@ #include "gc/shenandoah/shenandoahMarkCompact.hpp" #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahPartialGC.hpp" #include "gc/shenandoah/shenandoahWorkGroup.hpp" #include "gc/shenandoah/vm_operations_shenandoah.hpp" @@ -60,13 +61,15 @@ void VM_ShenandoahFullGC::doit() { GCIdMark gc_id_mark(_gc_id); + ShenandoahHeap *sh = ShenandoahHeap::heap(); + sh->shenandoahPolicy()->record_gc_start(); ShenandoahMarkCompact::do_mark_compact(_gc_cause); - ShenandoahHeap *sh = ShenandoahHeap::heap(); if (UseTLAB) { sh->shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::resize_tlabs); sh->resize_all_tlabs(); sh->shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::resize_tlabs); } + sh->shenandoahPolicy()->record_gc_end(); } bool VM_ShenandoahReferenceOperation::doit_prologue() { @@ -84,10 +87,12 @@ void VM_ShenandoahStartEvacuation::doit() { GCIdMark gc_id_mark(_gc_id); + ShenandoahHeap *sh = ShenandoahHeap::heap(); + sh->shenandoahPolicy()->record_gc_start(); + // It is critical that we // evacuate roots right after finishing marking, so that we don't // get unmarked objects in the roots. - ShenandoahHeap *sh = ShenandoahHeap::heap(); // Setup workers for final marking WorkGang* workers = sh->workers(); uint n_workers = ShenandoahCollectorPolicy::calc_workers_for_final_marking(workers->active_workers(), @@ -121,6 +126,16 @@ sh->concurrentMark()->cancel(); sh->stop_concurrent_marking(); } + + sh->shenandoahPolicy()->record_gc_end(); +} + +void VM_ShenandoahPartialGC::doit() { + GCIdMark gc_id_mark(_gc_id); + ShenandoahHeap *sh = ShenandoahHeap::heap(); + sh->shenandoahPolicy()->record_gc_start(); + ShenandoahHeap::heap()->do_partial_collection(); + sh->shenandoahPolicy()->record_gc_end(); } void VM_ShenandoahVerifyHeapAfterEvacuation::doit() { diff --git a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp --- a/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp +++ b/src/share/vm/gc/shenandoah/vm_operations_shenandoah.hpp @@ -75,6 +75,14 @@ virtual void doit(); }; +class VM_ShenandoahPartialGC: public VM_ShenandoahOperation { +public: + VM_ShenandoahPartialGC() : VM_ShenandoahOperation() {}; + VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahPartialGC; } + const char* name() const { return "Shenandoah Partial Collection"; } + virtual void doit(); +}; + class VM_ShenandoahVerifyHeapAfterEvacuation: public VM_ShenandoahOperation { public: VM_ShenandoahVerifyHeapAfterEvacuation() : VM_ShenandoahOperation() {}; diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp --- a/src/share/vm/runtime/vm_operations.hpp +++ b/src/share/vm/runtime/vm_operations.hpp @@ -100,6 +100,7 @@ template(ShenandoahInitMark) \ template(ShenandoahStartEvacuation) \ template(ShenandoahVerifyHeapAfterEvacuation) \ + template(ShenandoahPartialGC) \ template(Exit) \ template(LinuxDllLoad) \ template(RotateGCLog) \