# HG changeset patch # User shade # Date 1549148886 -3600 # Sun Feb 03 00:08:06 2019 +0100 # Node ID adbb564dca5a08158b1bd79ca8ecc30e8c95e990 # Parent 9c84d2865c2d427fac4f1eef991ce9e1a5c9ac44 Epsilon + Mark-Compact diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp --- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp @@ -22,12 +22,30 @@ */ #include "precompiled.hpp" +#include "classfile/classLoaderDataGraph.hpp" +#include "classfile/stringTable.hpp" +#include "classfile/systemDictionary.hpp" +#include "code/codeCache.hpp" #include "gc/epsilon/epsilonHeap.hpp" #include "gc/epsilon/epsilonMemoryPool.hpp" #include "gc/epsilon/epsilonThreadLocalData.hpp" -#include "memory/allocation.hpp" +#include "gc/shared/barrierSet.inline.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" +#include "gc/shared/strongRootsScope.hpp" +#include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/weakProcessor.hpp" #include "memory/allocation.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/markOop.inline.hpp" +#include "runtime/biasedLocking.hpp" +#include "runtime/objectMonitor.inline.hpp" +#include "runtime/thread.hpp" +#include "runtime/vmOperations.hpp" +#include "runtime/vmThread.hpp" +#include "utilities/stack.inline.hpp" +#include "services/management.hpp" jint EpsilonHeap::initialize() { size_t align = _policy->heap_alignment(); @@ -236,7 +254,7 @@ } // All prepared, let's do it! - HeapWord* res = allocate_work(size); + HeapWord* res = allocate_or_collect_work(size); if (res != NULL) { // Allocation successful @@ -260,7 +278,7 @@ HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) { *gc_overhead_limit_was_exceeded = false; - return allocate_work(size); + return allocate_or_collect_work(size); } void EpsilonHeap::collect(GCCause::Cause cause) { @@ -277,7 +295,15 @@ print_metaspace_info(); break; default: - log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause)); + if (EpsilonWhyNotGCAnyway) { + if (SafepointSynchronize::is_at_safepoint()) { + entry_collect(cause); + } else { + vmentry_collect(cause); + } + } else { + log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause)); + } } _monitoring_support->update_counters(); } @@ -341,3 +367,332 @@ log_info(gc, metaspace)("Metaspace: no reliable data"); } } + +// ------------------------------- EXPERIMENTAL MARK-COMPACT -------------------------------------------- +// +// This implements a trivial Lisp2-style sliding collector: +// https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm +// +// The goal for this implementation is to be as trivial as possible, ignoring even the +// basic and obvious performance optimizations. +// + +// VM operation that executes collection cycle under safepoint +class VM_EpsilonCollect: public VM_Operation { +private: + const GCCause::Cause _cause; +public: + VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(), _cause(cause) {}; + VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; } + const char* name() const { return "Epsilon Mark-Compact Collection"; } + virtual void doit() { + EpsilonHeap* heap = EpsilonHeap::heap(); + heap->entry_collect(_cause); + if (EpsilonWhyNotGCAnywayAgain) { + heap->entry_collect(_cause); + } + } +}; + +// Utility to enter the safepoint for GC +void EpsilonHeap::vmentry_collect(GCCause::Cause cause) { + VM_EpsilonCollect vmop(cause); + VMThread::execute(&vmop); +} + +HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) { + HeapWord* res = allocate_work(size); + if (res == NULL && EpsilonWhyNotGCAnyway) { + vmentry_collect(GCCause::_allocation_failure); + res = allocate_work(size); + } + return res; +} + +typedef Stack EpsilonMarkStack; + +void EpsilonHeap::do_roots(OopClosure* cl, bool everything) { + // Need to adapt passed closure for some root types + CLDToOopClosure clds(cl, ClassLoaderData::_claim_none); + MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations); + + // Need to tell runtime we are about to walk the roots with 1 thread + StrongRootsScope scope(1); + + // Need locks to walk some roots + MutexLockerEx lock_cc(CodeCache_lock, Mutex::_no_safepoint_check_flag); + MutexLockerEx lock_cldg(ClassLoaderDataGraph_lock); + + // Walk all these different parts of runtime roots + CodeCache::blobs_do(&blobs); + ClassLoaderDataGraph::cld_do(&clds); + Universe::oops_do(cl); + Management::oops_do(cl); + JvmtiExport::oops_do(cl); + JNIHandles::oops_do(cl); + WeakProcessor::oops_do(cl); + ObjectSynchronizer::oops_do(cl); + SystemDictionary::oops_do(cl); + Threads::possibly_parallel_oops_do(false, cl, &blobs); + + // This is implicitly handled by other roots, and we only want to + // touch these during verification. + if (everything) { + StringTable::oops_do(cl); + } +} + +// Walk the parsable heap and call object closure on every marked object +void EpsilonHeap::walk_heap(ObjectClosure* cl, bool only_marked) { + HeapWord* cur = _space->bottom(); + HeapWord* limit = _space->top(); + do { + oop o = (oop)cur; + cur += o->size(); + if (only_marked && o->is_gc_marked()) { + cl->do_object(o); + } + } while (cur < limit); +} + +class EpsilonScanOopClosure : public BasicOopIterateClosure { +private: + EpsilonMarkStack* const _stack; + PreservedMarks* const _preserved_marks; + + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + markOop mark = obj->mark_raw(); + if (!mark->is_marked()) { + if (mark->must_be_preserved(obj)) { + _preserved_marks->push(obj, mark); + } + obj->set_mark_raw(markOopDesc::prototype()->set_marked()); + _stack->push(obj); + } + } + } + +public: + EpsilonScanOopClosure(EpsilonMarkStack* stack, PreservedMarks* preserved_marks) : + _stack(stack), _preserved_marks(preserved_marks) {} + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class EpsilonCalcNewLocationObjectClosure : public ObjectClosure { +private: + HeapWord* _compact_point; + +public: + EpsilonCalcNewLocationObjectClosure(HeapWord* bottom) : _compact_point(bottom) {} + + void do_object(oop obj) { + obj->forward_to(oop(_compact_point)); + _compact_point += obj->size(); + } + + HeapWord* compact_point() { + return _compact_point; + } +}; + +class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure { +private: + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + oop fwd = obj->forwardee(); + if (!oopDesc::equals_raw(obj, fwd)) { + RawAccess<>::oop_store(p, fwd); + } + } + } + +public: + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class EpsilonAdjustPointersObjectClosure : public ObjectClosure { +public: + void do_object(oop obj) { + EpsilonAdjustPointersOopClosure cl; + obj->oop_iterate(&cl); + } +}; + +class EpsilonMoveObjects : public ObjectClosure { +public: + void do_object(oop obj) { + oop fwd = obj->forwardee(); + if (!oopDesc::equals_raw(obj, fwd)) { + Copy::aligned_conjoint_words((HeapWord *) obj, (HeapWord *) fwd, obj->size()); + fwd->init_mark_raw(); + } else { + obj->init_mark_raw(); + } + } +}; + +class EpsilonVerifyOopClosure : public BasicOopIterateClosure { +private: + EpsilonHeap* const _heap; + + template + void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj)); + guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj)); + guarantee(!obj->mark()->is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj)); + } + } + +public: + EpsilonVerifyOopClosure() : _heap(EpsilonHeap::heap()) {} + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; + +class EpsilonVerifyObjectClosure : public ObjectClosure { +private: + EpsilonHeap* const _heap; +public: + void do_object(oop obj) { + guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj)); + guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj)); + guarantee(!obj->mark()->is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj)); + EpsilonVerifyOopClosure cl; + obj->oop_iterate(&cl); + } + + EpsilonVerifyObjectClosure() : _heap(EpsilonHeap::heap()) {} +}; + +void EpsilonHeap::entry_collect(GCCause::Cause cause) { + GCIdMark mark; + GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true); + + { + GCTraceTime(Info, gc) time("Step 0: Prologue", NULL); + + // We need parsable heap to walk it. + ensure_parsability(true); + + // Tell various parts of runtime we are doing GC. + CodeCache::gc_prologue(); + BiasedLocking::preserve_marks(); + DerivedPointerTable::clear(); + DerivedPointerTable::set_active(false); + } + + // We are going to store marking information (whether the object was reachable) + // and forwarding information (where the new copy resides) in mark words. + // Some of those mark words need to be carefully preserved. This is an utility + // that maintains the list of those special mark words. + PreservedMarks preserved_marks; + + { + GCTraceTime(Info, gc) time("Step 1: Mark", NULL); + + // Marking stack and the closure that does most of the work. + // The closure would scan the outgoing references, mark them, + // and push newly-marked objects to stack for further processing. + EpsilonMarkStack stack; + EpsilonScanOopClosure cl(&stack, &preserved_marks); + + // Seed the marking with roots. + process_roots(&cl); + + // Scan the rest of the heap until we run out of objects. + // Termination is guaranteed, because all reachable threads would + // be marked eventually. + while (!stack.is_empty()) { + oop obj = stack.pop(); + obj->oop_iterate(&cl); + } + } + + // New top of the allocated space. + HeapWord* new_top; + + { + GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL); + + // Walk all alive objects, compute their new addresses and store those addresses + // in mark words. Optionally preserve some marks. + EpsilonCalcNewLocationObjectClosure cl(_space->bottom()); + walk_heap(&cl, /* only_marked = */ true); + + // After addresses are calculated, we know the new top for the allocated space. + // We cannot set it just yet, because some asserts check that objects are "in heap" + // based on current "top". + new_top = cl.compact_point(); + } + + { + GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL); + + // Walk all alive objects _and their reference fields_, and put "new addresses" + // there. We know the new addresses from the forwarding data in mark words. + // Take care of the heap objects first. + EpsilonAdjustPointersObjectClosure cl; + walk_heap(&cl, /* only_marked = */ true); + + // Now do the same, but for all VM roots, which reference the objects on + // their own: their references should also be updated. + EpsilonAdjustPointersOopClosure cli; + process_roots(&cli); + + // Finally, make sure preserved marks know the objects are about to move. + preserved_marks.adjust_during_full_gc(); + } + + { + GCTraceTime(Info, gc) time("Step 4: Move objects", NULL); + + // Move all alive objects to their new locations. All the references are already + // adjusted at previous step. + EpsilonMoveObjects cl; + walk_heap(&cl, /* only_marked = */ true); + + // Now we moved all objects to their relevant locations, we can retract the "top" + // of the allocation space to the end of the compacted prefix. + _space->set_top(new_top); + } + + { + GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL); + + // Restore all special mark words. + preserved_marks.restore(); + + // Tell the rest of runtime we have finished the GC. + DerivedPointerTable::update_pointers(); + BiasedLocking::restore_marks(); + CodeCache::gc_epilogue(); + JvmtiExport::gc_epilogue(); + } + + if (EpsilonVerify) { + GCTraceTime(Info, gc) time("Step 6: Verify", NULL); + + // Verify all roots are correct. + EpsilonVerifyOopClosure cl; + process_all_roots(&cl); + + // Verify all objects in heap are correct. Since we have compacted everything + // to be beginning, the heap is parsable right now, and we can just walk all + // objects and verify them. + EpsilonVerifyObjectClosure ocl; + walk_heap(&ocl, /* only_marked = */ false); + } + +} diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp --- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp @@ -90,7 +90,8 @@ virtual bool is_scavengable(oop obj) { // No GC is going to happen, therefore no objects ever move. - return false; + // Or are they... (evil laugh). + return EpsilonWhyNotGCAnyway; } virtual bool is_maximal_no_gc() const { @@ -100,6 +101,7 @@ // Allocation HeapWord* allocate_work(size_t size); + HeapWord* allocate_or_collect_work(size_t size); virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); virtual HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, @@ -122,7 +124,8 @@ } // Object pinning support: every object is implicitly pinned - virtual bool supports_object_pinning() const { return true; } + // Or is it... (evil laugh) + virtual bool supports_object_pinning() const { return !EpsilonWhyNotGCAnyway; } virtual oop pin_object(JavaThread* thread, oop obj) { return obj; } virtual void unpin_object(JavaThread* thread, oop obj) { } @@ -147,10 +150,19 @@ virtual void print_on(outputStream* st) const; virtual void print_tracing_info() const; + void entry_collect(GCCause::Cause cause); + private: void print_heap_info(size_t used) const; void print_metaspace_info() const; + void vmentry_collect(GCCause::Cause cause); + + void do_roots(OopClosure* cl, bool everything); + void process_roots(OopClosure* cl) { do_roots(cl, false); } + void process_all_roots(OopClosure* cl) { do_roots(cl, true); } + void walk_heap(ObjectClosure* cl, bool only_marked); + }; #endif // SHARE_GC_EPSILON_EPSILONHEAP_HPP diff --git a/src/hotspot/share/gc/epsilon/epsilon_globals.hpp b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp --- a/src/hotspot/share/gc/epsilon/epsilon_globals.hpp +++ b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp @@ -91,6 +91,16 @@ experimental(size_t, EpsilonMinHeapExpand, 128 * M, \ "Min expansion step for heap. Larger value improves performance " \ "at the potential expense of memory waste.") \ - range(1, max_intx) + range(1, max_intx) \ + \ + experimental(bool, EpsilonWhyNotGCAnyway, false, \ + "Actually does sliding mark-compact GC.") \ + \ + experimental(bool, EpsilonWhyNotGCAnywayAgain, false, \ + "Does the GC twice in a row to demo static costs.") \ + \ + experimental(bool, EpsilonVerify, false, \ + "Does the additional GC verification step.") \ + \ #endif // SHARE_GC_EPSILON_EPSILON_GLOBALS_HPP diff --git a/src/hotspot/share/runtime/vmOperations.hpp b/src/hotspot/share/runtime/vmOperations.hpp --- a/src/hotspot/share/runtime/vmOperations.hpp +++ b/src/hotspot/share/runtime/vmOperations.hpp @@ -126,6 +126,7 @@ template(ScavengeMonitors) \ template(PrintMetadata) \ template(GTestExecuteAtSafepoint) \ + template(EpsilonCollect) \ class VM_Operation: public CHeapObj { public: diff --git a/test/hotspot/jtreg/gc/epsilon/TestWhyNotGC.java b/test/hotspot/jtreg/gc/epsilon/TestWhyNotGC.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/gc/epsilon/TestWhyNotGC.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package gc.epsilon; + +/** + * @test TestWhyNotGC + * @key gc + * @requires vm.gc.Epsilon & !vm.graal.enabled + * @summary Epsilon GC prototype GC works + * @library /test/lib + * @run main gc.epsilon.TestWhyNotGC + */ + +import java.util.concurrent.*; + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; + +public class TestWhyNotGC { + + public static void passWith(String... args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args); + OutputAnalyzer out = new OutputAnalyzer(pb.start()); + out.shouldNotContain("OutOfMemoryError"); + out.shouldHaveExitValue(0); + } + + public static void main(String[] args) throws Exception { + passWith("-Xmx512m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseEpsilonGC", + "-XX:+EpsilonWhyNotGCAnyway", + "-XX:+EpsilonVerify", + "-Dcount=1", + TestWhyNotGC.Workload.class.getName()); + + passWith("-Xmx512m", + "-XX:+UnlockExperimentalVMOptions", + "-XX:+UseEpsilonGC", + "-XX:+EpsilonWhyNotGCAnyway", + "-XX:+EpsilonVerify", + TestWhyNotGC.Workload.class.getName()); + } + + public static class Workload { + static int SIZE = Integer.getInteger("size", 10_000_000); + static int COUNT = Integer.getInteger("count", 100_000_000); // ~2.4 GB allocation + + static Object[] sink; + + public static void main(String... args) { + sink = new Object[SIZE]; + for (int c = 0; c < COUNT; c++) { + sink[ThreadLocalRandom.current().nextInt(SIZE)] = new Object(); + } + } + } + +}