< prev index next >
src/hotspot/share/gc/epsilon/epsilonHeap.cpp
Print this page
rev 53608 : Epsilon + Mark-Compact
*** 20,35 ****
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
! #include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
jint EpsilonHeap::initialize() {
size_t align = _policy->heap_alignment();
size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
--- 20,53 ----
* questions.
*
*/
#include "precompiled.hpp"
+ #include "classfile/classLoaderDataGraph.hpp"
+ #include "classfile/stringTable.hpp"
+ #include "classfile/systemDictionary.hpp"
+ #include "code/codeCache.hpp"
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/epsilon/epsilonMemoryPool.hpp"
#include "gc/epsilon/epsilonThreadLocalData.hpp"
! #include "gc/shared/barrierSet.inline.hpp"
! #include "gc/shared/gcTraceTime.inline.hpp"
! #include "gc/shared/strongRootsScope.hpp"
! #include "gc/shared/preservedMarks.inline.hpp"
! #include "gc/shared/weakProcessor.hpp"
#include "memory/allocation.inline.hpp"
+ #include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
+ #include "oops/compressedOops.inline.hpp"
+ #include "oops/markOop.inline.hpp"
+ #include "runtime/biasedLocking.hpp"
+ #include "runtime/objectMonitor.inline.hpp"
+ #include "runtime/thread.hpp"
+ #include "runtime/vmOperations.hpp"
+ #include "runtime/vmThread.hpp"
+ #include "utilities/stack.inline.hpp"
+ #include "services/management.hpp"
jint EpsilonHeap::initialize() {
size_t align = _policy->heap_alignment();
size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
*** 234,244 ****
ergo_tlab * HeapWordSize / K,
size * HeapWordSize / K);
}
// All prepared, let's do it!
! HeapWord* res = allocate_work(size);
if (res != NULL) {
// Allocation successful
*actual_size = size;
if (EpsilonElasticTLABDecay) {
--- 252,262 ----
ergo_tlab * HeapWordSize / K,
size * HeapWordSize / K);
}
// All prepared, let's do it!
! HeapWord* res = allocate_or_collect_work(size);
if (res != NULL) {
// Allocation successful
*actual_size = size;
if (EpsilonElasticTLABDecay) {
*** 258,268 ****
return res;
}
HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
*gc_overhead_limit_was_exceeded = false;
! return allocate_work(size);
}
void EpsilonHeap::collect(GCCause::Cause cause) {
switch (cause) {
case GCCause::_metadata_GC_threshold:
--- 276,286 ----
return res;
}
HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
*gc_overhead_limit_was_exceeded = false;
! return allocate_or_collect_work(size);
}
void EpsilonHeap::collect(GCCause::Cause cause) {
switch (cause) {
case GCCause::_metadata_GC_threshold:
*** 275,286 ****
--- 293,312 ----
log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
MetaspaceGC::compute_new_size();
print_metaspace_info();
break;
default:
+ if (EpsilonWhyNotGCAnyway) {
+ if (SafepointSynchronize::is_at_safepoint()) {
+ entry_collect(cause);
+ } else {
+ vmentry_collect(cause);
+ }
+ } else {
log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
}
+ }
_monitoring_support->update_counters();
}
void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
collect(gc_cause());
*** 339,343 ****
--- 365,698 ----
used * 100.0 / reserved);
} else {
log_info(gc, metaspace)("Metaspace: no reliable data");
}
}
+
+ // ------------------------------- EXPERIMENTAL MARK-COMPACT --------------------------------------------
+ //
+ // This implements a trivial Lisp2-style sliding collector:
+ // https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
+ //
+ // The goal for this implementation is to be as trivial as possible, ignoring even the
+ // basic and obvious performance optimizations.
+ //
+
+ // VM operation that executes collection cycle under safepoint
+ class VM_EpsilonCollect: public VM_Operation {
+ private:
+ const GCCause::Cause _cause;
+ public:
+ VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(), _cause(cause) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
+ const char* name() const { return "Epsilon Mark-Compact Collection"; }
+ virtual void doit() {
+ EpsilonHeap* heap = EpsilonHeap::heap();
+ heap->entry_collect(_cause);
+ if (EpsilonWhyNotGCAnywayAgain) {
+ heap->entry_collect(_cause);
+ }
+ }
+ };
+
+ // Utility to enter the safepoint for GC
+ void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
+ VM_EpsilonCollect vmop(cause);
+ VMThread::execute(&vmop);
+ }
+
+ HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) {
+ HeapWord* res = allocate_work(size);
+ if (res == NULL && EpsilonWhyNotGCAnyway) {
+ vmentry_collect(GCCause::_allocation_failure);
+ res = allocate_work(size);
+ }
+ return res;
+ }
+
+ typedef Stack<oop, mtGC> EpsilonMarkStack;
+
+ void EpsilonHeap::do_roots(OopClosure* cl, bool everything) {
+ // Need to adapt passed closure for some root types
+ CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
+ MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations);
+
+ // Need to tell runtime we are about to walk the roots with 1 thread
+ StrongRootsScope scope(1);
+
+ // Need locks to walk some roots
+ MutexLockerEx lock_cc(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+ MutexLockerEx lock_cldg(ClassLoaderDataGraph_lock);
+
+ // Walk all these different parts of runtime roots
+ CodeCache::blobs_do(&blobs);
+ ClassLoaderDataGraph::cld_do(&clds);
+ Universe::oops_do(cl);
+ Management::oops_do(cl);
+ JvmtiExport::oops_do(cl);
+ JNIHandles::oops_do(cl);
+ WeakProcessor::oops_do(cl);
+ ObjectSynchronizer::oops_do(cl);
+ SystemDictionary::oops_do(cl);
+ Threads::possibly_parallel_oops_do(false, cl, &blobs);
+
+ // This is implicitly handled by other roots, and we only want to
+ // touch these during verification.
+ if (everything) {
+ StringTable::oops_do(cl);
+ }
+ }
+
+ // Walk the parsable heap and call object closure on every marked object
+ void EpsilonHeap::walk_heap(ObjectClosure* cl, bool only_marked) {
+ HeapWord* cur = _space->bottom();
+ HeapWord* limit = _space->top();
+ do {
+ oop o = (oop)cur;
+ cur += o->size();
+ if (only_marked && o->is_gc_marked()) {
+ cl->do_object(o);
+ }
+ } while (cur < limit);
+ }
+
+ class EpsilonScanOopClosure : public BasicOopIterateClosure {
+ private:
+ EpsilonMarkStack* const _stack;
+ PreservedMarks* const _preserved_marks;
+
+ template <class T>
+ void do_oop_work(T* p) {
+ T o = RawAccess<>::oop_load(p);
+ if (!CompressedOops::is_null(o)) {
+ oop obj = CompressedOops::decode_not_null(o);
+ markOop mark = obj->mark_raw();
+ if (!mark->is_marked()) {
+ if (mark->must_be_preserved(obj)) {
+ _preserved_marks->push(obj, mark);
+ }
+ obj->set_mark_raw(markOopDesc::prototype()->set_marked());
+ _stack->push(obj);
+ }
+ }
+ }
+
+ public:
+ EpsilonScanOopClosure(EpsilonMarkStack* stack, PreservedMarks* preserved_marks) :
+ _stack(stack), _preserved_marks(preserved_marks) {}
+ virtual void do_oop(oop* p) { do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+ };
+
+ class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
+ private:
+ HeapWord* _compact_point;
+
+ public:
+ EpsilonCalcNewLocationObjectClosure(HeapWord* bottom) : _compact_point(bottom) {}
+
+ void do_object(oop obj) {
+ obj->forward_to(oop(_compact_point));
+ _compact_point += obj->size();
+ }
+
+ HeapWord* compact_point() {
+ return _compact_point;
+ }
+ };
+
+ class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
+ private:
+ template <class T>
+ void do_oop_work(T* p) {
+ T o = RawAccess<>::oop_load(p);
+ if (!CompressedOops::is_null(o)) {
+ oop obj = CompressedOops::decode_not_null(o);
+ oop fwd = obj->forwardee();
+ if (!oopDesc::equals_raw(obj, fwd)) {
+ RawAccess<>::oop_store(p, fwd);
+ }
+ }
+ }
+
+ public:
+ virtual void do_oop(oop* p) { do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+ };
+
+ class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
+ public:
+ void do_object(oop obj) {
+ EpsilonAdjustPointersOopClosure cl;
+ obj->oop_iterate(&cl);
+ }
+ };
+
+ class EpsilonMoveObjects : public ObjectClosure {
+ public:
+ void do_object(oop obj) {
+ oop fwd = obj->forwardee();
+ if (!oopDesc::equals_raw(obj, fwd)) {
+ Copy::aligned_conjoint_words((HeapWord *) obj, (HeapWord *) fwd, obj->size());
+ fwd->init_mark_raw();
+ } else {
+ obj->init_mark_raw();
+ }
+ }
+ };
+
+ class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
+ private:
+ EpsilonHeap* const _heap;
+
+ template <class T>
+ void do_oop_work(T* p) {
+ T o = RawAccess<>::oop_load(p);
+ if (!CompressedOops::is_null(o)) {
+ oop obj = CompressedOops::decode_not_null(o);
+ guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
+ guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
+ guarantee(!obj->mark()->is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
+ }
+ }
+
+ public:
+ EpsilonVerifyOopClosure() : _heap(EpsilonHeap::heap()) {}
+ virtual void do_oop(oop* p) { do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+ };
+
+ class EpsilonVerifyObjectClosure : public ObjectClosure {
+ private:
+ EpsilonHeap* const _heap;
+ public:
+ void do_object(oop obj) {
+ guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
+ guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
+ guarantee(!obj->mark()->is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
+ EpsilonVerifyOopClosure cl;
+ obj->oop_iterate(&cl);
+ }
+
+ EpsilonVerifyObjectClosure() : _heap(EpsilonHeap::heap()) {}
+ };
+
+ void EpsilonHeap::entry_collect(GCCause::Cause cause) {
+ GCIdMark mark;
+ GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
+
+ {
+ GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
+
+ // We need parsable heap to walk it.
+ ensure_parsability(true);
+
+ // Tell various parts of runtime we are doing GC.
+ CodeCache::gc_prologue();
+ BiasedLocking::preserve_marks();
+ DerivedPointerTable::clear();
+ DerivedPointerTable::set_active(false);
+ }
+
+ // We are going to store marking information (whether the object was reachable)
+ // and forwarding information (where the new copy resides) in mark words.
+ // Some of those mark words need to be carefully preserved. This is an utility
+ // that maintains the list of those special mark words.
+ PreservedMarks preserved_marks;
+
+ {
+ GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
+
+ // Marking stack and the closure that does most of the work.
+ // The closure would scan the outgoing references, mark them,
+ // and push newly-marked objects to stack for further processing.
+ EpsilonMarkStack stack;
+ EpsilonScanOopClosure cl(&stack, &preserved_marks);
+
+ // Seed the marking with roots.
+ process_roots(&cl);
+
+ // Scan the rest of the heap until we run out of objects.
+ // Termination is guaranteed, because all reachable threads would
+ // be marked eventually.
+ while (!stack.is_empty()) {
+ oop obj = stack.pop();
+ obj->oop_iterate(&cl);
+ }
+ }
+
+ // New top of the allocated space.
+ HeapWord* new_top;
+
+ {
+ GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
+
+ // Walk all alive objects, compute their new addresses and store those addresses
+ // in mark words. Optionally preserve some marks.
+ EpsilonCalcNewLocationObjectClosure cl(_space->bottom());
+ walk_heap(&cl, /* only_marked = */ true);
+
+ // After addresses are calculated, we know the new top for the allocated space.
+ // We cannot set it just yet, because some asserts check that objects are "in heap"
+ // based on current "top".
+ new_top = cl.compact_point();
+ }
+
+ {
+ GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
+
+ // Walk all alive objects _and their reference fields_, and put "new addresses"
+ // there. We know the new addresses from the forwarding data in mark words.
+ // Take care of the heap objects first.
+ EpsilonAdjustPointersObjectClosure cl;
+ walk_heap(&cl, /* only_marked = */ true);
+
+ // Now do the same, but for all VM roots, which reference the objects on
+ // their own: their references should also be updated.
+ EpsilonAdjustPointersOopClosure cli;
+ process_roots(&cli);
+
+ // Finally, make sure preserved marks know the objects are about to move.
+ preserved_marks.adjust_during_full_gc();
+ }
+
+ {
+ GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
+
+ // Move all alive objects to their new locations. All the references are already
+ // adjusted at previous step.
+ EpsilonMoveObjects cl;
+ walk_heap(&cl, /* only_marked = */ true);
+
+ // Now we moved all objects to their relevant locations, we can retract the "top"
+ // of the allocation space to the end of the compacted prefix.
+ _space->set_top(new_top);
+ }
+
+ {
+ GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
+
+ // Restore all special mark words.
+ preserved_marks.restore();
+
+ // Tell the rest of runtime we have finished the GC.
+ DerivedPointerTable::update_pointers();
+ BiasedLocking::restore_marks();
+ CodeCache::gc_epilogue();
+ JvmtiExport::gc_epilogue();
+ }
+
+ if (EpsilonVerify) {
+ GCTraceTime(Info, gc) time("Step 6: Verify", NULL);
+
+ // Verify all roots are correct.
+ EpsilonVerifyOopClosure cl;
+ process_all_roots(&cl);
+
+ // Verify all objects in heap are correct. Since we have compacted everything
+ // to be beginning, the heap is parsable right now, and we can just walk all
+ // objects and verify them.
+ EpsilonVerifyObjectClosure ocl;
+ walk_heap(&ocl, /* only_marked = */ false);
+ }
+
+ }
< prev index next >