< prev index next >

src/hotspot/share/gc/epsilon/epsilonHeap.cpp

Print this page
rev 53444 : Epsilon + Mark-Compact

@@ -20,16 +20,35 @@
  * questions.
  *
  */
 
 #include "precompiled.hpp"
+#include "classfile/classLoaderDataGraph.hpp"
+#include "classfile/stringTable.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
 #include "gc/epsilon/epsilonHeap.hpp"
 #include "gc/epsilon/epsilonMemoryPool.hpp"
 #include "gc/epsilon/epsilonThreadLocalData.hpp"
-#include "memory/allocation.hpp"
+#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/markBitMap.inline.hpp"
+#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/preservedMarks.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/iterator.inline.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/markOop.inline.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/objectMonitor.inline.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/vmOperations.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/stack.inline.hpp"
+#include "services/management.hpp"
 
 jint EpsilonHeap::initialize() {
   size_t align = _policy->heap_alignment();
   size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
   size_t max_byte_size  = align_up(_policy->max_heap_byte_size(), align);

@@ -58,10 +77,24 @@
   _last_heap_print = 0;
 
   // Install barrier set
   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
 
+  size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
+  size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
+  _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
+
+  // Initialize marking bitmap
+  if (EpsilonWhyNotGCAnyway) {
+    ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
+    os::commit_memory_or_exit(bitmap.base(), bitmap.size(), false, "couldn't allocate marking bitmap");
+    MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
+    MemRegion bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
+    MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
+    _bitmap.initialize(heap_region, bitmap_region);
+  }
+
   // All done, print out the configuration
   if (init_byte_size != max_byte_size) {
     log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
                  init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
   } else {

@@ -234,11 +267,11 @@
                   ergo_tlab * HeapWordSize / K,
                   size * HeapWordSize / K);
   }
 
   // All prepared, let's do it!
-  HeapWord* res = allocate_work(size);
+  HeapWord* res = allocate_or_collect_work(size);
 
   if (res != NULL) {
     // Allocation successful
     *actual_size = size;
     if (EpsilonElasticTLABDecay) {

@@ -258,11 +291,11 @@
   return res;
 }
 
 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
   *gc_overhead_limit_was_exceeded = false;
-  return allocate_work(size);
+  return allocate_or_collect_work(size);
 }
 
 void EpsilonHeap::collect(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_metadata_GC_threshold:

@@ -275,12 +308,20 @@
       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
       MetaspaceGC::compute_new_size();
       print_metaspace_info();
       break;
     default:
+      if (EpsilonWhyNotGCAnyway) {
+        if (SafepointSynchronize::is_at_safepoint()) {
+          entry_collect(cause);
+        } else {
+          vmentry_collect(cause);
+        }
+      } else {
       log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
   }
+  }
   _monitoring_support->update_counters();
 }
 
 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
   collect(gc_cause());

@@ -339,5 +380,291 @@
             used * 100.0 / reserved);
   } else {
     log_info(gc, metaspace)("Metaspace: no reliable data");
   }
 }
+
+// ------------------------------- EXPERIMENTAL MARK-COMPACT --------------------------------------------
+//
+// This implements a trivial Lisp2-style sliding collector:
+//     https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
+//
+// The goal for this implementation is to be as trivial as possible, ignoring even the
+// basic and obvious performance optimizations.
+//
+
+// VM operation that executes collection cycle under safepoint
+class VM_EpsilonCollect: public VM_Operation {
+private:
+  GCCause::Cause _cause;
+public:
+  VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(), _cause(cause) {};
+  VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
+  const char* name()             const { return "Epsilon Collection"; }
+  virtual void doit() {
+    EpsilonHeap* heap = EpsilonHeap::heap();
+    heap->entry_collect(_cause);
+    if (EpsilonWhyNotGCAnywayAgain) {
+      heap->entry_collect(_cause);
+    }
+  }
+};
+
+// Utility to enter the safepoint for GC
+void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
+  VM_EpsilonCollect vmop(cause);
+  VMThread::execute(&vmop);
+}
+
+HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) {
+  HeapWord* res = allocate_work(size);
+  if (res == NULL && EpsilonWhyNotGCAnyway) {
+    vmentry_collect(GCCause::_allocation_failure);
+    res = allocate_work(size);
+  }
+  return res;
+}
+
+typedef Stack<oop, mtGC> EpsilonMarkStack;
+
+void EpsilonHeap::process_all_roots(OopClosure* oops) {
+  // Need to adapt passed closure for some root types
+  CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
+  MarkingCodeBlobClosure blobs(oops, CodeBlobToOopClosure::FixRelocations);
+
+  // Need to tell runtime we are about to walk the roots with 1 thread
+  StrongRootsScope scope(1);
+
+  // Need locks to walk some roots
+  MutexLockerEx lock_cc(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  MutexLockerEx lock_cldg(ClassLoaderDataGraph_lock);
+
+  // Walk all these different parts of runtime roots
+  CodeCache::blobs_do(&blobs);
+  ClassLoaderDataGraph::cld_do(&clds);
+  Universe::oops_do(oops);
+  Management::oops_do(oops);
+  JvmtiExport::oops_do(oops);
+  JNIHandles::oops_do(oops);
+  WeakProcessor::oops_do(oops);
+  ObjectSynchronizer::oops_do(oops);
+  SystemDictionary::oops_do(oops);
+  StringTable::oops_do(oops);
+  Threads::possibly_parallel_oops_do(false, oops, &blobs);
+}
+
+// Walk the marking bitmap and call object closure on every marked object.
+void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
+  HeapWord* limit = _space->top();
+  HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
+  while (addr < limit) {
+    oop obj = oop(addr);
+    assert(_bitmap.is_marked(obj), "sanity");
+    cl->do_object(obj);
+    addr += 1;
+    if (addr < limit) {
+      addr = _bitmap.get_next_marked_addr(addr, limit);
+    }
+  }
+}
+
+class EpsilonScanOopClosure : public BasicOopIterateClosure {
+private:
+  EpsilonMarkStack* _stack;
+  MarkBitMap* _bitmap;
+
+  template <class T>
+  void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      if (_bitmap->par_mark(obj)) {
+        _stack->push(obj);
+      }
+    }
+  }
+
+public:
+  EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
+                        _stack(stack), _bitmap(bitmap) {}
+
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
+private:
+  PreservedMarks* _preserved;
+  HeapWord* _compact_point;
+
+public:
+  EpsilonCalcNewLocationObjectClosure(PreservedMarks* preserved, HeapWord *bottom) :
+                                    _preserved(preserved), _compact_point(bottom) {}
+
+  void do_object(oop obj) {
+    if ((HeapWord*)obj != _compact_point) {
+      markOop mark = obj->mark_raw();
+      if (mark->must_be_preserved(obj)) {
+        _preserved->push(obj, mark);
+      }
+      obj->forward_to(oop(_compact_point));
+    }
+    _compact_point += obj->size();
+  }
+
+  HeapWord* compact_point() {
+    return _compact_point;
+  }
+};
+
+class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
+private:
+  template <class T>
+  void do_oop_work(T* p) {
+    T o = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(o)) {
+      oop obj = CompressedOops::decode_not_null(o);
+      if (obj->is_forwarded()) {
+        oop fwd = obj->forwardee();
+        RawAccess<>::oop_store(p, fwd);
+      }
+    }
+  }
+
+public:
+  EpsilonAdjustPointersOopClosure() {}
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+
+class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
+public:
+  void do_object(oop obj) {
+    EpsilonAdjustPointersOopClosure cl;
+    obj->oop_iterate(&cl);
+  }
+};
+
+class EpsilonMoveObjects : public ObjectClosure {
+public:
+  void do_object(oop obj) {
+    if (obj->is_forwarded()) {
+      oop fwd = obj->forwardee();
+      Copy::aligned_conjoint_words((HeapWord*) obj, (HeapWord*) fwd, obj->size());
+      oop(fwd)->init_mark_raw();
+    }
+  }
+};
+
+void EpsilonHeap::entry_collect(GCCause::Cause cause) {
+  GCIdMark mark;
+  GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
+
+  {
+    GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
+
+    // Strictly speaking, we do not need parsable heap for this algorithm,
+    // but we want threads to give up their TLABs.
+    ensure_parsability(true);
+
+    // Tell various parts of runtime we are doing GC.
+    CodeCache::gc_prologue();
+    BiasedLocking::preserve_marks();
+    DerivedPointerTable::clear();
+    DerivedPointerTable::set_active(false);
+  }
+
+  {
+    GCTraceTime(Info, gc) time("Step 1: Clear bitmap", NULL);
+
+    // Clear bitmap in preparation for marking. Do this in a separate step
+    // to show the heap-size-dependent cost of bitmap manipulations.
+    _bitmap.clear_range_large(_space->used_region());
+  }
+
+  {
+    GCTraceTime(Info, gc) time("Step 2: Mark", NULL);
+
+    // Marking stack and the closure that does most of the work.
+    // The closure would scan the outgoing references, mark them in bitmap,
+    // and push newly-marked objects to stack for further processing.
+    EpsilonMarkStack stack;
+    EpsilonScanOopClosure cl(&stack, &_bitmap);
+
+    // Seed the marking with roots.
+    process_all_roots(&cl);
+
+    // Scan the rest of the heap until we run out of objects.
+    // Termination is guaranteed, because all reachable threads would
+    // be marked eventually.
+    while (!stack.is_empty()) {
+      oop obj = stack.pop();
+      obj->oop_iterate(&cl);
+    }
+  }
+
+  // We are going to store forwarding information (where the new copy resides)
+  // in mark words. Some of those mark words need to be carefully preserved.
+  // This is a utility that maintains the list of those special mark words.
+  PreservedMarks preserved_marks;
+
+  // New top of the allocated space.
+  HeapWord* new_top;
+
+  {
+    GCTraceTime(Info, gc) time("Step 3: Calculate new locations", NULL);
+
+    // Walk all alive objects, compute their new addresses and store those addresses
+    // in mark words. Optionally preserve some marks.
+    EpsilonCalcNewLocationObjectClosure cl(&preserved_marks, _space->bottom());
+    walk_bitmap(&cl);
+
+    // After addresses are calculated, we know the new top for the allocated space.
+    // We cannot set it just yet, because some asserts check that objects are "in heap"
+    // based on current "top".
+    new_top = cl.compact_point();
+  }
+
+  {
+    GCTraceTime(Info, gc) time("Step 4: Adjust pointers", NULL);
+
+    // Walk all alive objects _and their reference fields_, and put "new addresses"
+    // there. We know the new addresses from the forwarding data in mark words.
+    // Take care of the heap objects first.
+    EpsilonAdjustPointersObjectClosure cl;
+    walk_bitmap(&cl);
+
+    // Now do the same, but for all VM roots, which reference the objects on
+    // their own: their references should also be updated.
+    EpsilonAdjustPointersOopClosure cli;
+    process_all_roots(&cli);
+
+    // Finally, make sure preserved marks know the objects are about to move.
+    preserved_marks.adjust_during_full_gc();
+  }
+
+  {
+    GCTraceTime(Info, gc) time("Step 5: Move objects", NULL);
+
+    // Move all alive objects to their new locations. All the references are already
+    // adjusted at previous step.
+    EpsilonMoveObjects cl;
+    walk_bitmap(&cl);
+
+    // Now we moved all objects to their relevant locations, we can retract the "top"
+    // of the allocation space to the end of the compacted prefix.
+    _space->set_top(new_top);
+  }
+
+  {
+    GCTraceTime(Info, gc) time("Step 6: Epilogue", NULL);
+
+    // Restore all special mark words.
+    preserved_marks.restore();
+
+    // Tell the rest of runtime we have finished the GC.
+    DerivedPointerTable::update_pointers();
+    BiasedLocking::restore_marks();
+    CodeCache::gc_epilogue();
+    JvmtiExport::gc_epilogue();
+  }
+}
< prev index next >