1 /*
   2  * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "code/codeCache.hpp"
  27 #include "gc/shared/gcTraceTime.inline.hpp"
  28 #include "gc/shenandoah/shenandoahBrooksPointer.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  30 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  32 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  33 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  35 #include "gc/shenandoah/shenandoahHeap.hpp"
  36 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  37 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  38 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 #include "gc/shenandoah/shenandoahVerifier.hpp"
  45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  46 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  47 #include "memory/metaspace.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/copy.hpp"
  51 #include "utilities/growableArray.hpp"
  52 #include "gc/shared/workgroup.hpp"
  53 
  54 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
  55   _gc_timer = gc_timer;
  56 }
  57 
  58 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
  59   ShenandoahHeap* heap = ShenandoahHeap::heap();
  60 
  61   if (ShenandoahVerify) {
  62     heap->verifier()->verify_before_fullgc();
  63   }
  64 
  65   if (VerifyBeforeGC) {
  66     Universe::verify();
  67   }
  68 
  69   heap->set_full_gc_in_progress(true);
  70 
  71   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
  72   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
  73 
  74   {
  75     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
  76     heap->pre_full_gc_dump(_gc_timer);
  77   }
  78 
  79   {
  80     ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
  81     // Full GC is supposed to recover from any GC state:
  82 
  83     // a0. Remember if we have forwarded objects
  84     bool has_forwarded_objects = heap->has_forwarded_objects();
  85 
  86     // a1. Cancel evacuation, if in progress
  87     if (heap->is_evacuation_in_progress()) {
  88       heap->set_evacuation_in_progress(false);
  89     }
  90     assert(!heap->is_evacuation_in_progress(), "sanity");
  91 
  92     // a2. Cancel update-refs, if in progress
  93     if (heap->is_update_refs_in_progress()) {
  94       heap->set_update_refs_in_progress(false);
  95     }
  96     assert(!heap->is_update_refs_in_progress(), "sanity");
  97 
  98     // a3. Cancel concurrent traversal GC, if in progress
  99     if (heap->is_concurrent_traversal_in_progress()) {
 100       heap->traversal_gc()->reset();
 101       heap->set_concurrent_traversal_in_progress(false);
 102     }
 103 
 104     // b. Cancel concurrent mark, if in progress
 105     if (heap->is_concurrent_mark_in_progress()) {
 106       heap->concurrent_mark()->cancel();
 107       heap->stop_concurrent_marking();
 108     }
 109     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 110 
 111     // c. Reset the bitmaps for new marking
 112     heap->reset_mark_bitmap();
 113     assert(heap->marking_context()->is_bitmap_clear(), "sanity");
 114     assert(!heap->marking_context()->is_complete(), "sanity");
 115 
 116     // d. Abandon reference discovery and clear all discovered references.
 117     ReferenceProcessor* rp = heap->ref_processor();
 118     rp->disable_discovery();
 119     rp->abandon_partial_discovery();
 120     rp->verify_no_references_recorded();
 121 
 122     // e. Set back forwarded objects bit back, in case some steps above dropped it.
 123     heap->set_has_forwarded_objects(has_forwarded_objects);
 124   }
 125 
 126   heap->make_parsable(true);
 127 
 128   CodeCache::gc_prologue();
 129 
 130   OrderAccess::fence();
 131 
 132   phase1_mark_heap();
 133 
 134   // Once marking is done, which may have fixed up forwarded objects, we can drop it.
 135   // Coming out of Full GC, we would not have any forwarded objects.
 136   // This also prevents read barrier from kicking in while adjusting pointers in phase3.
 137   heap->set_has_forwarded_objects(false);
 138 
 139   heap->set_full_gc_move_in_progress(true);
 140 
 141   // Setup workers for the rest
 142   OrderAccess::fence();
 143 
 144   // Initialize worker slices
 145   ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 146   for (uint i = 0; i < heap->max_workers(); i++) {
 147     worker_slices[i] = new ShenandoahHeapRegionSet();
 148   }
 149 
 150   {
 151     // The rest of code performs region moves, where region status is undefined
 152     // until all phases run together.
 153     ShenandoahHeapLocker lock(heap->lock());
 154 
 155     phase2_calculate_target_addresses(worker_slices);
 156 
 157     OrderAccess::fence();
 158 
 159     phase3_update_references();
 160 
 161     phase4_compact_objects(worker_slices);
 162   }
 163 
 164   // Resize metaspace
 165   MetaspaceGC::compute_new_size();
 166 
 167   // Free worker slices
 168   for (uint i = 0; i < heap->max_workers(); i++) {
 169     delete worker_slices[i];
 170   }
 171   FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 172 
 173   CodeCache::gc_epilogue();
 174   JvmtiExport::gc_epilogue();
 175 
 176   heap->set_full_gc_move_in_progress(false);
 177   heap->set_full_gc_in_progress(false);
 178 
 179   if (ShenandoahVerify) {
 180     heap->verifier()->verify_after_fullgc();
 181   }
 182 
 183   if (VerifyAfterGC) {
 184     Universe::verify();
 185   }
 186 
 187   {
 188     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
 189     heap->post_full_gc_dump(_gc_timer);
 190   }
 191 }
 192 
 193 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 194 private:
 195   ShenandoahMarkingContext* const _ctx;
 196 
 197 public:
 198   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 199 
 200   void heap_region_do(ShenandoahHeapRegion *r) {
 201     _ctx->capture_top_at_mark_start(r);
 202     r->clear_live_data();
 203     r->set_concurrent_iteration_safe_limit(r->top());
 204   }
 205 };
 206 
 207 void ShenandoahMarkCompact::phase1_mark_heap() {
 208   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 209   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 210 
 211   ShenandoahHeap* heap = ShenandoahHeap::heap();
 212 
 213   ShenandoahPrepareForMarkClosure cl;
 214   heap->heap_region_iterate(&cl);
 215 
 216   ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 217 
 218   heap->set_process_references(heap->heuristics()->can_process_references());
 219   heap->set_unload_classes(heap->heuristics()->can_unload_classes());
 220 
 221   ReferenceProcessor* rp = heap->ref_processor();
 222   // enable ("weak") refs discovery
 223   rp->enable_discovery(true /*verify_no_refs*/);
 224   rp->setup_policy(true); // forcefully purge all soft references
 225   rp->set_active_mt_degree(heap->workers()->active_workers());
 226 
 227   cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
 228   cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
 229   cm->finish_mark_from_roots(/* full_gc = */ true);
 230 
 231   heap->mark_complete_marking_context();
 232 }
 233 
 234 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 235 private:
 236   ShenandoahHeap*          const _heap;
 237   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 238   int _empty_regions_pos;
 239   ShenandoahHeapRegion*          _to_region;
 240   ShenandoahHeapRegion*          _from_region;
 241   HeapWord* _compact_point;
 242 
 243 public:
 244   ShenandoahPrepareForCompactionObjectClosure(GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
 245     _heap(ShenandoahHeap::heap()),
 246     _empty_regions(empty_regions),
 247     _empty_regions_pos(0),
 248     _to_region(to_region),
 249     _from_region(NULL),
 250     _compact_point(to_region->bottom()) {}
 251 
 252   void set_from_region(ShenandoahHeapRegion* from_region) {
 253     _from_region = from_region;
 254   }
 255 
 256   void finish_region() {
 257     assert(_to_region != NULL, "should not happen");
 258     _to_region->set_new_top(_compact_point);
 259   }
 260 
 261   bool is_compact_same_region() {
 262     return _from_region == _to_region;
 263   }
 264 
 265   int empty_regions_pos() {
 266     return _empty_regions_pos;
 267   }
 268 
 269   void do_object(oop p) {
 270     assert(_from_region != NULL, "must set before work");
 271     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 272     assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
 273 
 274     size_t obj_size = p->size() + ShenandoahBrooksPointer::word_size();
 275     if (_compact_point + obj_size > _to_region->end()) {
 276       finish_region();
 277 
 278       // Object doesn't fit. Pick next empty region and start compacting there.
 279       ShenandoahHeapRegion* new_to_region;
 280       if (_empty_regions_pos < _empty_regions.length()) {
 281         new_to_region = _empty_regions.at(_empty_regions_pos);
 282         _empty_regions_pos++;
 283       } else {
 284         // Out of empty region? Compact within the same region.
 285         new_to_region = _from_region;
 286       }
 287 
 288       assert(new_to_region != _to_region, "must not reuse same to-region");
 289       assert(new_to_region != NULL, "must not be NULL");
 290       _to_region = new_to_region;
 291       _compact_point = _to_region->bottom();
 292     }
 293 
 294     // Object fits into current region, record new location:
 295     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 296     shenandoah_assert_not_forwarded(NULL, p);
 297     ShenandoahBrooksPointer::set_raw(p, _compact_point + ShenandoahBrooksPointer::word_size());
 298     _compact_point += obj_size;
 299   }
 300 };
 301 
 302 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 303 private:
 304   ShenandoahHeap*           const _heap;
 305   ShenandoahHeapRegionSet** const _worker_slices;
 306   ShenandoahRegionIterator        _heap_regions;
 307 
 308   ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
 309     ShenandoahHeapRegion* from_region = _heap_regions.next();
 310 
 311     while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
 312       from_region = _heap_regions.next();
 313     }
 314 
 315     if (from_region != NULL) {
 316       assert(slice != NULL, "sanity");
 317       assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
 318       assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
 319       slice->add_region(from_region);
 320     }
 321 
 322     return from_region;
 323   }
 324 
 325 public:
 326   ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet** worker_slices) :
 327     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 328     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 329   }
 330 
 331   void work(uint worker_id) {
 332     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 333     ShenandoahHeapRegion* from_region = next_from_region(slice);
 334     // No work?
 335     if (from_region == NULL) {
 336       return;
 337     }
 338 
 339     // Sliding compaction. Walk all regions in the slice, and compact them.
 340     // Remember empty regions and reuse them as needed.
 341     ResourceMark rm;
 342     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 343     ShenandoahPrepareForCompactionObjectClosure cl(empty_regions, from_region);
 344     while (from_region != NULL) {
 345       cl.set_from_region(from_region);
 346       if (from_region->has_live()) {
 347         _heap->marked_object_iterate(from_region, &cl);
 348       }
 349 
 350       // Compacted the region to somewhere else? From-region is empty then.
 351       if (!cl.is_compact_same_region()) {
 352         empty_regions.append(from_region);
 353       }
 354       from_region = next_from_region(slice);
 355     }
 356     cl.finish_region();
 357 
 358     // Mark all remaining regions as empty
 359     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 360       ShenandoahHeapRegion* r = empty_regions.at(pos);
 361       r->set_new_top(r->bottom());
 362     }
 363   }
 364 };
 365 
 366 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
 367   ShenandoahHeap* heap = ShenandoahHeap::heap();
 368 
 369   // Compute the new addresses for humongous objects. We need to do this after addresses
 370   // for regular objects are calculated, and we know what regions in heap suffix are
 371   // available for humongous moves.
 372   //
 373   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 374   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 375   // humongous start there.
 376   //
 377   // The complication is potential non-movable regions during the scan. If such region is
 378   // detected, then sliding restarts towards that non-movable region.
 379 
 380   size_t to_begin = heap->num_regions();
 381   size_t to_end = heap->num_regions();
 382 
 383   for (size_t c = heap->num_regions() - 1; c > 0; c--) {
 384     ShenandoahHeapRegion *r = heap->get_region(c);
 385     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 386       // To-region candidate: record this, and continue scan
 387       to_begin = r->region_number();
 388       continue;
 389     }
 390 
 391     if (r->is_humongous_start() && r->is_move_allowed()) {
 392       // From-region candidate: movable humongous region
 393       oop old_obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
 394       size_t words_size = old_obj->size() + ShenandoahBrooksPointer::word_size();
 395       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 396 
 397       size_t start = to_end - num_regions;
 398 
 399       if (start >= to_begin && start != r->region_number()) {
 400         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 401         ShenandoahBrooksPointer::set_raw(old_obj, heap->get_region(start)->bottom() + ShenandoahBrooksPointer::word_size());
 402         to_end = start;
 403         continue;
 404       }
 405     }
 406 
 407     // Failed to fit. Scan starting from current region.
 408     to_begin = r->region_number();
 409     to_end = r->region_number();
 410   }
 411 }
 412 
 413 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
 414 private:
 415   ShenandoahHeap* const _heap;
 416 
 417 public:
 418   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
 419   void heap_region_do(ShenandoahHeapRegion* r) {
 420     if (r->is_trash()) {
 421       r->recycle();
 422     }
 423     if (r->is_cset()) {
 424       r->make_regular_bypass();
 425     }
 426     if (r->is_empty_uncommitted()) {
 427       r->make_committed_bypass();
 428     }
 429     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number());
 430 
 431     // Record current region occupancy: this communicates empty regions are free
 432     // to the rest of Full GC code.
 433     r->set_new_top(r->top());
 434   }
 435 };
 436 
 437 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
 438 private:
 439   ShenandoahHeap* const _heap;
 440   ShenandoahMarkingContext* const _ctx;
 441 
 442 public:
 443   ShenandoahTrashImmediateGarbageClosure() :
 444     _heap(ShenandoahHeap::heap()),
 445     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 446 
 447   void heap_region_do(ShenandoahHeapRegion* r) {
 448     if (r->is_humongous_start()) {
 449       oop humongous_obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
 450       if (!_ctx->is_marked(humongous_obj)) {
 451         assert(!r->has_live(),
 452                "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number());
 453         _heap->trash_humongous_region_at(r);
 454       } else {
 455         assert(r->has_live(),
 456                "Region " SIZE_FORMAT " should have live", r->region_number());
 457       }
 458     } else if (r->is_humongous_continuation()) {
 459       // If we hit continuation, the non-live humongous starts should have been trashed already
 460       assert(r->humongous_start_region()->has_live(),
 461              "Region " SIZE_FORMAT " should have live", r->region_number());
 462     } else if (r->is_regular()) {
 463       if (!r->has_live()) {
 464         r->make_trash_immediate();
 465       }
 466     }
 467   }
 468 };
 469 
 470 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 471   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 472   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 473 
 474   ShenandoahHeap* heap = ShenandoahHeap::heap();
 475 
 476   {
 477     // Trash the immediately collectible regions before computing addresses
 478     ShenandoahTrashImmediateGarbageClosure tigcl;
 479     heap->heap_region_iterate(&tigcl);
 480 
 481     // Make sure regions are in good state: committed, active, clean.
 482     // This is needed because we are potentially sliding the data through them.
 483     ShenandoahEnsureHeapActiveClosure ecl;
 484     heap->heap_region_iterate(&ecl);
 485   }
 486 
 487   // Compute the new addresses for regular objects
 488   {
 489     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 490     ShenandoahPrepareForCompactionTask prepare_task(worker_slices);
 491     heap->workers()->run_task(&prepare_task);
 492   }
 493 
 494   // Compute the new addresses for humongous objects
 495   {
 496     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 497     calculate_target_humongous_objects();
 498   }
 499 }
 500 
 501 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
 502 private:
 503   ShenandoahHeap* const _heap;
 504   ShenandoahMarkingContext* const _ctx;
 505 
 506   template <class T>
 507   inline void do_oop_work(T* p) {
 508     T o = RawAccess<>::oop_load(p);
 509     if (!CompressedOops::is_null(o)) {
 510       oop obj = CompressedOops::decode_not_null(o);
 511       assert(_ctx->is_marked(obj), "must be marked");
 512       oop forw = oop(ShenandoahBrooksPointer::get_raw(obj));
 513       RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 514     }
 515   }
 516 
 517 public:
 518   ShenandoahAdjustPointersClosure() :
 519     _heap(ShenandoahHeap::heap()),
 520     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 521 
 522   void do_oop(oop* p)       { do_oop_work(p); }
 523   void do_oop(narrowOop* p) { do_oop_work(p); }
 524 };
 525 
 526 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 527 private:
 528   ShenandoahHeap* const _heap;
 529   ShenandoahAdjustPointersClosure _cl;
 530 
 531 public:
 532   ShenandoahAdjustPointersObjectClosure() :
 533     _heap(ShenandoahHeap::heap()) {
 534   }
 535   void do_object(oop p) {
 536     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 537     HeapWord* forw = ShenandoahBrooksPointer::get_raw(p);
 538     p->oop_iterate(&_cl);
 539   }
 540 };
 541 
 542 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 543 private:
 544   ShenandoahHeap*          const _heap;
 545   ShenandoahRegionIterator       _regions;
 546 
 547 public:
 548   ShenandoahAdjustPointersTask() :
 549     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 550     _heap(ShenandoahHeap::heap()) {
 551   }
 552 
 553   void work(uint worker_id) {
 554     ShenandoahAdjustPointersObjectClosure obj_cl;
 555     ShenandoahHeapRegion* r = _regions.next();
 556     while (r != NULL) {
 557       if (!r->is_humongous_continuation() && r->has_live()) {
 558         _heap->marked_object_iterate(r, &obj_cl);
 559       }
 560       r = _regions.next();
 561     }
 562   }
 563 };
 564 
 565 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 566 private:
 567   ShenandoahRootProcessor* _rp;
 568 
 569 public:
 570   ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
 571     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 572     _rp(rp) {}
 573 
 574   void work(uint worker_id) {
 575     ShenandoahAdjustPointersClosure cl;
 576     CLDToOopClosure adjust_cld_closure(&cl, ClassLoaderData::_claim_strong);
 577     MarkingCodeBlobClosure adjust_code_closure(&cl,
 578                                              CodeBlobToOopClosure::FixRelocations);
 579 
 580     _rp->process_all_roots(&cl, &cl,
 581                            &adjust_cld_closure,
 582                            &adjust_code_closure, NULL, worker_id);
 583   }
 584 };
 585 
 586 void ShenandoahMarkCompact::phase3_update_references() {
 587   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 588   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 589 
 590   ShenandoahHeap* heap = ShenandoahHeap::heap();
 591 
 592   WorkGang* workers = heap->workers();
 593   uint nworkers = workers->active_workers();
 594   {
 595 #if COMPILER2_OR_JVMCI
 596     DerivedPointerTable::clear();
 597 #endif
 598     ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
 599     ShenandoahAdjustRootPointersTask task(&rp);
 600     workers->run_task(&task);
 601 #if COMPILER2_OR_JVMCI
 602     DerivedPointerTable::update_pointers();
 603 #endif
 604   }
 605 
 606   ShenandoahAdjustPointersTask adjust_pointers_task;
 607   workers->run_task(&adjust_pointers_task);
 608 }
 609 
 610 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 611 private:
 612   ShenandoahHeap* const _heap;
 613   uint            const _worker_id;
 614 
 615 public:
 616   ShenandoahCompactObjectsClosure(uint worker_id) :
 617     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 618 
 619   void do_object(oop p) {
 620     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
 621     size_t size = (size_t)p->size();
 622     HeapWord* compact_to = ShenandoahBrooksPointer::get_raw(p);
 623     HeapWord* compact_from = (HeapWord*) p;
 624     if (compact_from != compact_to) {
 625       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 626     }
 627     oop new_obj = oop(compact_to);
 628     ShenandoahBrooksPointer::initialize(new_obj);
 629   }
 630 };
 631 
 632 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 633 private:
 634   ShenandoahHeap* const _heap;
 635   ShenandoahHeapRegionSet** const _worker_slices;
 636 
 637 public:
 638   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 639     AbstractGangTask("Shenandoah Compact Objects Task"),
 640     _heap(ShenandoahHeap::heap()),
 641     _worker_slices(worker_slices) {
 642   }
 643 
 644   void work(uint worker_id) {
 645     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 646 
 647     ShenandoahCompactObjectsClosure cl(worker_id);
 648     ShenandoahHeapRegion* r = slice.next();
 649     while (r != NULL) {
 650       assert(!r->is_humongous(), "must not get humongous regions here");
 651       if (r->has_live()) {
 652         _heap->marked_object_iterate(r, &cl);
 653       }
 654       r->set_top(r->new_top());
 655       r = slice.next();
 656     }
 657   }
 658 };
 659 
 660 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 661 private:
 662   ShenandoahHeap* const _heap;
 663   size_t _live;
 664 
 665 public:
 666   ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
 667     _heap->free_set()->clear();
 668   }
 669 
 670   void heap_region_do(ShenandoahHeapRegion* r) {
 671     assert (!r->is_cset(), "cset regions should have been demoted already");
 672 
 673     // Need to reset the complete-top-at-mark-start pointer here because
 674     // the complete marking bitmap is no longer valid. This ensures
 675     // size-based iteration in marked_object_iterate().
 676     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 677     // pinned regions.
 678     if (!r->is_pinned()) {
 679       _heap->complete_marking_context()->reset_top_at_mark_start(r);
 680     }
 681 
 682     size_t live = r->used();
 683 
 684     // Make empty regions that have been allocated into regular
 685     if (r->is_empty() && live > 0) {
 686       r->make_regular_bypass();
 687     }
 688 
 689     // Reclaim regular regions that became empty
 690     if (r->is_regular() && live == 0) {
 691       r->make_trash();
 692     }
 693 
 694     // Recycle all trash regions
 695     if (r->is_trash()) {
 696       live = 0;
 697       r->recycle();
 698     }
 699 
 700     r->set_live_data(live);
 701     r->reset_alloc_metadata_to_shared();
 702     _live += live;
 703   }
 704 
 705   size_t get_live() {
 706     return _live;
 707   }
 708 };
 709 
 710 void ShenandoahMarkCompact::compact_humongous_objects() {
 711   // Compact humongous regions, based on their fwdptr objects.
 712   //
 713   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 714   // humongous regions are already compacted, and do not require further moves, which alleviates
 715   // sliding costs. We may consider doing this in parallel in future.
 716 
 717   ShenandoahHeap* heap = ShenandoahHeap::heap();
 718 
 719   for (size_t c = heap->num_regions() - 1; c > 0; c--) {
 720     ShenandoahHeapRegion* r = heap->get_region(c);
 721     if (r->is_humongous_start()) {
 722       oop old_obj = oop(r->bottom() + ShenandoahBrooksPointer::word_size());
 723       size_t words_size = old_obj->size() + ShenandoahBrooksPointer::word_size();
 724       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 725 
 726       size_t old_start = r->region_number();
 727       size_t old_end   = old_start + num_regions - 1;
 728       size_t new_start = heap->heap_region_index_containing(ShenandoahBrooksPointer::get_raw(old_obj));
 729       size_t new_end   = new_start + num_regions - 1;
 730 
 731       if (old_start == new_start) {
 732         // No need to move the object, it stays at the same slot
 733         continue;
 734       }
 735 
 736       assert (r->is_move_allowed(), "should be movable");
 737 
 738       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 739                                    heap->get_region(new_start)->bottom(),
 740                                    ShenandoahHeapRegion::region_size_words()*num_regions);
 741 
 742       oop new_obj = oop(heap->get_region(new_start)->bottom() + ShenandoahBrooksPointer::word_size());
 743       ShenandoahBrooksPointer::initialize(new_obj);
 744 
 745       {
 746         for (size_t c = old_start; c <= old_end; c++) {
 747           ShenandoahHeapRegion* r = heap->get_region(c);
 748           r->make_regular_bypass();
 749           r->set_top(r->bottom());
 750         }
 751 
 752         for (size_t c = new_start; c <= new_end; c++) {
 753           ShenandoahHeapRegion* r = heap->get_region(c);
 754           if (c == new_start) {
 755             r->make_humongous_start_bypass();
 756           } else {
 757             r->make_humongous_cont_bypass();
 758           }
 759 
 760           // Trailing region may be non-full, record the remainder there
 761           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 762           if ((c == new_end) && (remainder != 0)) {
 763             r->set_top(r->bottom() + remainder);
 764           } else {
 765             r->set_top(r->end());
 766           }
 767 
 768           r->reset_alloc_metadata_to_shared();
 769         }
 770       }
 771     }
 772   }
 773 }
 774 
 775 // This is slightly different to ShHeap::reset_next_mark_bitmap:
 776 // we need to remain able to walk pinned regions.
 777 // Since pinned region do not move and don't get compacted, we will get holes with
 778 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
 779 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
 780 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
 781 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
 782 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
 783 private:
 784   ShenandoahRegionIterator _regions;
 785 
 786 public:
 787   ShenandoahMCResetCompleteBitmapTask() :
 788     AbstractGangTask("Parallel Reset Bitmap Task") {
 789   }
 790 
 791   void work(uint worker_id) {
 792     ShenandoahHeapRegion* region = _regions.next();
 793     ShenandoahHeap* heap = ShenandoahHeap::heap();
 794     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 795     while (region != NULL) {
 796       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
 797         ctx->clear_bitmap(region);
 798       }
 799       region = _regions.next();
 800     }
 801   }
 802 };
 803 
 804 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
 805   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
 806   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
 807 
 808   ShenandoahHeap* heap = ShenandoahHeap::heap();
 809 
 810   // Compact regular objects first
 811   {
 812     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
 813     ShenandoahCompactObjectsTask compact_task(worker_slices);
 814     heap->workers()->run_task(&compact_task);
 815   }
 816 
 817   // Compact humongous objects after regular object moves
 818   {
 819     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
 820     compact_humongous_objects();
 821   }
 822 
 823   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 824   // and must ensure the bitmap is in sync.
 825   {
 826     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
 827     ShenandoahMCResetCompleteBitmapTask task;
 828     heap->workers()->run_task(&task);
 829   }
 830 
 831   // Bring regions in proper states after the collection, and set heap properties.
 832   {
 833     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
 834 
 835     ShenandoahPostCompactClosure post_compact;
 836     heap->heap_region_iterate(&post_compact);
 837     heap->set_used(post_compact.get_live());
 838 
 839     heap->collection_set()->clear();
 840     heap->free_set()->rebuild();
 841   }
 842 
 843   heap->clear_cancelled_gc();
 844 }