1 /*
   2  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/classLoaderDataGraph.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/epsilon/epsilonHeap.hpp"
  30 #include "gc/epsilon/epsilonMemoryPool.hpp"
  31 #include "gc/epsilon/epsilonThreadLocalData.hpp"
  32 #include "gc/shared/barrierSet.inline.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/markBitMap.inline.hpp"
  35 #include "gc/shared/strongRootsScope.hpp"
  36 #include "gc/shared/preservedMarks.inline.hpp"
  37 #include "gc/shared/weakProcessor.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/iterator.inline.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "oops/compressedOops.inline.hpp"
  42 #include "oops/markOop.inline.hpp"
  43 #include "runtime/biasedLocking.hpp"
  44 #include "runtime/objectMonitor.inline.hpp"
  45 #include "runtime/thread.hpp"
  46 #include "runtime/vmOperations.hpp"
  47 #include "runtime/vmThread.hpp"
  48 #include "utilities/stack.inline.hpp"
  49 #include "services/management.hpp"
  50 
  51 jint EpsilonHeap::initialize() {
  52   size_t align = _policy->heap_alignment();
  53   size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
  54   size_t max_byte_size  = align_up(_policy->max_heap_byte_size(), align);
  55 
  56   // Initialize backing storage
  57   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
  58   _virtual_space.initialize(heap_rs, init_byte_size);
  59 
  60   MemRegion committed_region((HeapWord*)_virtual_space.low(),          (HeapWord*)_virtual_space.high());
  61   MemRegion  reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
  62 
  63   initialize_reserved_region(reserved_region.start(), reserved_region.end());
  64 
  65   _space = new ContiguousSpace();
  66   _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
  67 
  68   // Precompute hot fields
  69   _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
  70   _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
  71   _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
  72   _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
  73 
  74   // Enable monitoring
  75   _monitoring_support = new EpsilonMonitoringSupport(this);
  76   _last_counter_update = 0;
  77   _last_heap_print = 0;
  78 
  79   // Install barrier set
  80   BarrierSet::set_barrier_set(new EpsilonBarrierSet());
  81 
  82   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
  83   size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
  84   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
  85 
  86   // Initialize marking bitmap
  87   if (EpsilonWhyNotGCAnyway) {
  88     ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
  89     os::commit_memory_or_exit(bitmap.base(), bitmap.size(), false, "couldn't allocate marking bitmap");
  90     MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
  91     MemRegion bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
  92     MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
  93     _bitmap.initialize(heap_region, bitmap_region);
  94   }
  95 
  96   // All done, print out the configuration
  97   if (init_byte_size != max_byte_size) {
  98     log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
  99                  init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
 100   } else {
 101     log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
 102   }
 103 
 104   if (UseTLAB) {
 105     log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K", _max_tlab_size * HeapWordSize / K);
 106     if (EpsilonElasticTLAB) {
 107       log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx", EpsilonTLABElasticity);
 108     }
 109     if (EpsilonElasticTLABDecay) {
 110       log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms", EpsilonTLABDecayTime);
 111     }
 112   } else {
 113     log_info(gc)("Not using TLAB allocation");
 114   }
 115 
 116   return JNI_OK;
 117 }
 118 
 119 void EpsilonHeap::post_initialize() {
 120   CollectedHeap::post_initialize();
 121 }
 122 
 123 void EpsilonHeap::initialize_serviceability() {
 124   _pool = new EpsilonMemoryPool(this);
 125   _memory_manager.add_pool(_pool);
 126 }
 127 
 128 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
 129   GrowableArray<GCMemoryManager*> memory_managers(1);
 130   memory_managers.append(&_memory_manager);
 131   return memory_managers;
 132 }
 133 
 134 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
 135   GrowableArray<MemoryPool*> memory_pools(1);
 136   memory_pools.append(_pool);
 137   return memory_pools;
 138 }
 139 
 140 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 141   // Return max allocatable TLAB size, and let allocation path figure out
 142   // the actual TLAB allocation size.
 143   return _max_tlab_size;
 144 }
 145 
 146 EpsilonHeap* EpsilonHeap::heap() {
 147   CollectedHeap* heap = Universe::heap();
 148   assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
 149   assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
 150   return (EpsilonHeap*)heap;
 151 }
 152 
 153 HeapWord* EpsilonHeap::allocate_work(size_t size) {
 154   assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
 155 
 156   HeapWord* res = _space->par_allocate(size);
 157 
 158   while (res == NULL) {
 159     // Allocation failed, attempt expansion, and retry:
 160     MutexLockerEx ml(Heap_lock);
 161 
 162     size_t space_left = max_capacity() - capacity();
 163     size_t want_space = MAX2(size, EpsilonMinHeapExpand);
 164 
 165     if (want_space < space_left) {
 166       // Enough space to expand in bulk:
 167       bool expand = _virtual_space.expand_by(want_space);
 168       assert(expand, "Should be able to expand");
 169     } else if (size < space_left) {
 170       // No space to expand in bulk, and this allocation is still possible,
 171       // take all the remaining space:
 172       bool expand = _virtual_space.expand_by(space_left);
 173       assert(expand, "Should be able to expand");
 174     } else {
 175       // No space left:
 176       return NULL;
 177     }
 178 
 179     _space->set_end((HeapWord *) _virtual_space.high());
 180     res = _space->par_allocate(size);
 181   }
 182 
 183   size_t used = _space->used();
 184 
 185   // Allocation successful, update counters
 186   {
 187     size_t last = _last_counter_update;
 188     if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
 189       _monitoring_support->update_counters();
 190     }
 191   }
 192 
 193   // ...and print the occupancy line, if needed
 194   {
 195     size_t last = _last_heap_print;
 196     if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
 197       print_heap_info(used);
 198       print_metaspace_info();
 199     }
 200   }
 201 
 202   assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
 203   return res;
 204 }
 205 
 206 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
 207                                          size_t requested_size,
 208                                          size_t* actual_size) {
 209   Thread* thread = Thread::current();
 210 
 211   // Defaults in case elastic paths are not taken
 212   bool fits = true;
 213   size_t size = requested_size;
 214   size_t ergo_tlab = requested_size;
 215   int64_t time = 0;
 216 
 217   if (EpsilonElasticTLAB) {
 218     ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
 219 
 220     if (EpsilonElasticTLABDecay) {
 221       int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
 222       time = (int64_t) os::javaTimeNanos();
 223 
 224       assert(last_time <= time, "time should be monotonic");
 225 
 226       // If the thread had not allocated recently, retract the ergonomic size.
 227       // This conserves memory when the thread had initial burst of allocations,
 228       // and then started allocating only sporadically.
 229       if (last_time != 0 && (time - last_time > _decay_time_ns)) {
 230         ergo_tlab = 0;
 231         EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 232       }
 233     }
 234 
 235     // If we can fit the allocation under current TLAB size, do so.
 236     // Otherwise, we want to elastically increase the TLAB size.
 237     fits = (requested_size <= ergo_tlab);
 238     if (!fits) {
 239       size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
 240     }
 241   }
 242 
 243   // Always honor boundaries
 244   size = MAX2(min_size, MIN2(_max_tlab_size, size));
 245 
 246   // Always honor alignment
 247   size = align_up(size, MinObjAlignment);
 248 
 249   // Check that adjustments did not break local and global invariants
 250   assert(is_object_aligned(size),
 251          "Size honors object alignment: " SIZE_FORMAT, size);
 252   assert(min_size <= size,
 253          "Size honors min size: "  SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
 254   assert(size <= _max_tlab_size,
 255          "Size honors max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
 256   assert(size <= CollectedHeap::max_tlab_size(),
 257          "Size honors global max size: "  SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
 258 
 259   if (log_is_enabled(Trace, gc)) {
 260     ResourceMark rm;
 261     log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
 262                           "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
 263                   thread->name(),
 264                   requested_size * HeapWordSize / K,
 265                   min_size * HeapWordSize / K,
 266                   _max_tlab_size * HeapWordSize / K,
 267                   ergo_tlab * HeapWordSize / K,
 268                   size * HeapWordSize / K);
 269   }
 270 
 271   // All prepared, let's do it!
 272   HeapWord* res = allocate_or_collect_work(size);
 273 
 274   if (res != NULL) {
 275     // Allocation successful
 276     *actual_size = size;
 277     if (EpsilonElasticTLABDecay) {
 278       EpsilonThreadLocalData::set_last_tlab_time(thread, time);
 279     }
 280     if (EpsilonElasticTLAB && !fits) {
 281       // If we requested expansion, this is our new ergonomic TLAB size
 282       EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
 283     }
 284   } else {
 285     // Allocation failed, reset ergonomics to try and fit smaller TLABs
 286     if (EpsilonElasticTLAB) {
 287       EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
 288     }
 289   }
 290 
 291   return res;
 292 }
 293 
 294 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
 295   *gc_overhead_limit_was_exceeded = false;
 296   return allocate_or_collect_work(size);
 297 }
 298 
 299 void EpsilonHeap::collect(GCCause::Cause cause) {
 300   switch (cause) {
 301     case GCCause::_metadata_GC_threshold:
 302     case GCCause::_metadata_GC_clear_soft_refs:
 303       // Receiving these causes means the VM itself entered the safepoint for metadata collection.
 304       // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
 305       // re-enter the safepoint again very soon.
 306 
 307       assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
 308       log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
 309       MetaspaceGC::compute_new_size();
 310       print_metaspace_info();
 311       break;
 312     default:
 313       if (EpsilonWhyNotGCAnyway) {
 314         if (SafepointSynchronize::is_at_safepoint()) {
 315           entry_collect(cause);
 316         } else {
 317           vmentry_collect(cause);
 318         }
 319       } else {
 320         log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
 321       }
 322   }
 323   _monitoring_support->update_counters();
 324 }
 325 
 326 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
 327   collect(gc_cause());
 328 }
 329 
 330 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
 331   _space->safe_object_iterate(cl);
 332 }
 333 
 334 void EpsilonHeap::print_on(outputStream *st) const {
 335   st->print_cr("Epsilon Heap");
 336 
 337   // Cast away constness:
 338   ((VirtualSpace)_virtual_space).print_on(st);
 339 
 340   st->print_cr("Allocation space:");
 341   _space->print_on(st);
 342 
 343   MetaspaceUtils::print_on(st);
 344 }
 345 
 346 void EpsilonHeap::print_tracing_info() const {
 347   print_heap_info(used());
 348   print_metaspace_info();
 349 }
 350 
 351 void EpsilonHeap::print_heap_info(size_t used) const {
 352   size_t reserved  = max_capacity();
 353   size_t committed = capacity();
 354 
 355   if (reserved != 0) {
 356     log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 357                  SIZE_FORMAT "%s (%.2f%%) used",
 358             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 359             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 360             committed * 100.0 / reserved,
 361             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 362             used * 100.0 / reserved);
 363   } else {
 364     log_info(gc)("Heap: no reliable data");
 365   }
 366 }
 367 
 368 void EpsilonHeap::print_metaspace_info() const {
 369   size_t reserved  = MetaspaceUtils::reserved_bytes();
 370   size_t committed = MetaspaceUtils::committed_bytes();
 371   size_t used      = MetaspaceUtils::used_bytes();
 372 
 373   if (reserved != 0) {
 374     log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
 375                             SIZE_FORMAT "%s (%.2f%%) used",
 376             byte_size_in_proper_unit(reserved),  proper_unit_for_byte_size(reserved),
 377             byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
 378             committed * 100.0 / reserved,
 379             byte_size_in_proper_unit(used),      proper_unit_for_byte_size(used),
 380             used * 100.0 / reserved);
 381   } else {
 382     log_info(gc, metaspace)("Metaspace: no reliable data");
 383   }
 384 }
 385 
 386 // ------------------------------- EXPERIMENTAL MARK-COMPACT --------------------------------------------
 387 //
 388 // This implements a trivial Lisp2-style sliding collector:
 389 //     https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
 390 //
 391 // The goal for this implementation is to be as trivial as possible, ignoring even the
 392 // basic and obvious performance optimizations.
 393 //
 394 
 395 // VM operation that executes collection cycle under safepoint
 396 class VM_EpsilonCollect: public VM_Operation {
 397 private:
 398   GCCause::Cause _cause;
 399 public:
 400   VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(), _cause(cause) {};
 401   VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
 402   const char* name()             const { return "Epsilon Collection"; }
 403   virtual void doit() {
 404     EpsilonHeap* heap = EpsilonHeap::heap();
 405     heap->entry_collect(_cause);
 406     if (EpsilonWhyNotGCAnywayAgain) {
 407       heap->entry_collect(_cause);
 408     }
 409   }
 410 };
 411 
 412 // Utility to enter the safepoint for GC
 413 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
 414   VM_EpsilonCollect vmop(cause);
 415   VMThread::execute(&vmop);
 416 }
 417 
 418 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) {
 419   HeapWord* res = allocate_work(size);
 420   if (res == NULL && EpsilonWhyNotGCAnyway) {
 421     vmentry_collect(GCCause::_allocation_failure);
 422     res = allocate_work(size);
 423   }
 424   return res;
 425 }
 426 
 427 typedef Stack<oop, mtGC> EpsilonMarkStack;
 428 
 429 void EpsilonHeap::process_all_roots(OopClosure* oops) {
 430   // Need to adapt passed closure for some root types
 431   CLDToOopClosure clds(oops, ClassLoaderData::_claim_none);
 432   MarkingCodeBlobClosure blobs(oops, CodeBlobToOopClosure::FixRelocations);
 433 
 434   // Need to tell runtime we are about to walk the roots with 1 thread
 435   StrongRootsScope scope(1);
 436 
 437   // Need locks to walk some roots
 438   MutexLockerEx lock_cc(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 439   MutexLockerEx lock_cldg(ClassLoaderDataGraph_lock);
 440 
 441   // Walk all these different parts of runtime roots
 442   CodeCache::blobs_do(&blobs);
 443   ClassLoaderDataGraph::cld_do(&clds);
 444   Universe::oops_do(oops);
 445   Management::oops_do(oops);
 446   JvmtiExport::oops_do(oops);
 447   JNIHandles::oops_do(oops);
 448   WeakProcessor::oops_do(oops);
 449   ObjectSynchronizer::oops_do(oops);
 450   SystemDictionary::oops_do(oops);
 451   StringTable::oops_do(oops);
 452   Threads::possibly_parallel_oops_do(false, oops, &blobs);
 453 }
 454 
 455 // Walk the marking bitmap and call object closure on every marked object.
 456 void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
 457   HeapWord* limit = _space->top();
 458   HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
 459   while (addr < limit) {
 460     oop obj = oop(addr);
 461     assert(_bitmap.is_marked(obj), "sanity");
 462     cl->do_object(obj);
 463     addr += 1;
 464     if (addr < limit) {
 465       addr = _bitmap.get_next_marked_addr(addr, limit);
 466     }
 467   }
 468 }
 469 
 470 class EpsilonScanOopClosure : public BasicOopIterateClosure {
 471 private:
 472   EpsilonMarkStack* _stack;
 473   MarkBitMap* _bitmap;
 474 
 475   template <class T>
 476   void do_oop_work(T* p) {
 477     T o = RawAccess<>::oop_load(p);
 478     if (!CompressedOops::is_null(o)) {
 479       oop obj = CompressedOops::decode_not_null(o);
 480       if (_bitmap->par_mark(obj)) {
 481         _stack->push(obj);
 482       }
 483     }
 484   }
 485 
 486 public:
 487   EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
 488                         _stack(stack), _bitmap(bitmap) {}
 489 
 490   virtual void do_oop(oop* p)       { do_oop_work(p); }
 491   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 492 };
 493 
 494 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
 495 private:
 496   PreservedMarks* _preserved;
 497   HeapWord* _compact_point;
 498 
 499 public:
 500   EpsilonCalcNewLocationObjectClosure(PreservedMarks* preserved, HeapWord *bottom) :
 501                                     _preserved(preserved), _compact_point(bottom) {}
 502 
 503   void do_object(oop obj) {
 504     if ((HeapWord*)obj != _compact_point) {
 505       markOop mark = obj->mark_raw();
 506       if (mark->must_be_preserved(obj)) {
 507         _preserved->push(obj, mark);
 508       }
 509       obj->forward_to(oop(_compact_point));
 510     }
 511     _compact_point += obj->size();
 512   }
 513 
 514   HeapWord* compact_point() {
 515     return _compact_point;
 516   }
 517 };
 518 
 519 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
 520 private:
 521   template <class T>
 522   void do_oop_work(T* p) {
 523     T o = RawAccess<>::oop_load(p);
 524     if (!CompressedOops::is_null(o)) {
 525       oop obj = CompressedOops::decode_not_null(o);
 526       if (obj->is_forwarded()) {
 527         oop fwd = obj->forwardee();
 528         RawAccess<>::oop_store(p, fwd);
 529       }
 530     }
 531   }
 532 
 533 public:
 534   EpsilonAdjustPointersOopClosure() {}
 535   virtual void do_oop(oop* p)       { do_oop_work(p); }
 536   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 537 };
 538 
 539 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
 540 public:
 541   void do_object(oop obj) {
 542     EpsilonAdjustPointersOopClosure cl;
 543     obj->oop_iterate(&cl);
 544   }
 545 };
 546 
 547 class EpsilonMoveObjects : public ObjectClosure {
 548 public:
 549   void do_object(oop obj) {
 550     if (obj->is_forwarded()) {
 551       oop fwd = obj->forwardee();
 552       Copy::aligned_conjoint_words((HeapWord*) obj, (HeapWord*) fwd, obj->size());
 553       oop(fwd)->init_mark_raw();
 554     }
 555   }
 556 };
 557 
 558 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
 559   GCIdMark mark;
 560   GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
 561 
 562   {
 563     GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
 564 
 565     // Strictly speaking, we do not need parsable heap for this algorithm,
 566     // but we want threads to give up their TLABs.
 567     ensure_parsability(true);
 568 
 569     // Tell various parts of runtime we are doing GC.
 570     CodeCache::gc_prologue();
 571     BiasedLocking::preserve_marks();
 572     DerivedPointerTable::clear();
 573     DerivedPointerTable::set_active(false);
 574   }
 575 
 576   {
 577     GCTraceTime(Info, gc) time("Step 1: Clear bitmap", NULL);
 578 
 579     // Clear bitmap in preparation for marking. Do this in a separate step
 580     // to show the heap-size-dependent cost of bitmap manipulations.
 581     _bitmap.clear_range_large(_space->used_region());
 582   }
 583 
 584   {
 585     GCTraceTime(Info, gc) time("Step 2: Mark", NULL);
 586 
 587     // Marking stack and the closure that does most of the work.
 588     // The closure would scan the outgoing references, mark them in bitmap,
 589     // and push newly-marked objects to stack for further processing.
 590     EpsilonMarkStack stack;
 591     EpsilonScanOopClosure cl(&stack, &_bitmap);
 592 
 593     // Seed the marking with roots.
 594     process_all_roots(&cl);
 595 
 596     // Scan the rest of the heap until we run out of objects.
 597     // Termination is guaranteed, because all reachable threads would
 598     // be marked eventually.
 599     while (!stack.is_empty()) {
 600       oop obj = stack.pop();
 601       obj->oop_iterate(&cl);
 602     }
 603   }
 604 
 605   // We are going to store forwarding information (where the new copy resides)
 606   // in mark words. Some of those mark words need to be carefully preserved.
 607   // This is a utility that maintains the list of those special mark words.
 608   PreservedMarks preserved_marks;
 609 
 610   // New top of the allocated space.
 611   HeapWord* new_top;
 612 
 613   {
 614     GCTraceTime(Info, gc) time("Step 3: Calculate new locations", NULL);
 615 
 616     // Walk all alive objects, compute their new addresses and store those addresses
 617     // in mark words. Optionally preserve some marks.
 618     EpsilonCalcNewLocationObjectClosure cl(&preserved_marks, _space->bottom());
 619     walk_bitmap(&cl);
 620 
 621     // After addresses are calculated, we know the new top for the allocated space.
 622     // We cannot set it just yet, because some asserts check that objects are "in heap"
 623     // based on current "top".
 624     new_top = cl.compact_point();
 625   }
 626 
 627   {
 628     GCTraceTime(Info, gc) time("Step 4: Adjust pointers", NULL);
 629 
 630     // Walk all alive objects _and their reference fields_, and put "new addresses"
 631     // there. We know the new addresses from the forwarding data in mark words.
 632     // Take care of the heap objects first.
 633     EpsilonAdjustPointersObjectClosure cl;
 634     walk_bitmap(&cl);
 635 
 636     // Now do the same, but for all VM roots, which reference the objects on
 637     // their own: their references should also be updated.
 638     EpsilonAdjustPointersOopClosure cli;
 639     process_all_roots(&cli);
 640 
 641     // Finally, make sure preserved marks know the objects are about to move.
 642     preserved_marks.adjust_during_full_gc();
 643   }
 644 
 645   {
 646     GCTraceTime(Info, gc) time("Step 5: Move objects", NULL);
 647 
 648     // Move all alive objects to their new locations. All the references are already
 649     // adjusted at previous step.
 650     EpsilonMoveObjects cl;
 651     walk_bitmap(&cl);
 652 
 653     // Now we moved all objects to their relevant locations, we can retract the "top"
 654     // of the allocation space to the end of the compacted prefix.
 655     _space->set_top(new_top);
 656   }
 657 
 658   {
 659     GCTraceTime(Info, gc) time("Step 6: Epilogue", NULL);
 660 
 661     // Restore all special mark words.
 662     preserved_marks.restore();
 663 
 664     // Tell the rest of runtime we have finished the GC.
 665     DerivedPointerTable::update_pointers();
 666     BiasedLocking::restore_marks();
 667     CodeCache::gc_epilogue();
 668     JvmtiExport::gc_epilogue();
 669   }
 670 }