1 /*
   2  * Copyright (c) 2014, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shenandoah/brooksPointer.hpp"
  30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  31 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  34 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  35 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  36 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  41 #include "gc/shenandoah/shenandoahUtils.hpp"
  42 #include "gc/shenandoah/shenandoahVerifier.hpp"
  43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  44 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/biasedLocking.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/copy.hpp"
  49 #include "utilities/growableArray.hpp"
  50 #include "gc/shared/taskqueue.inline.hpp"
  51 #include "gc/shared/workgroup.hpp"
  52 
  53 class ShenandoahClearRegionStatusClosure: public ShenandoahHeapRegionClosure {
  54 private:
  55   ShenandoahHeap* const _heap;
  56 
  57 public:
  58   ShenandoahClearRegionStatusClosure() : _heap(ShenandoahHeap::heap()) {}
  59 
  60   bool heap_region_do(ShenandoahHeapRegion *r) {
  61     _heap->set_next_top_at_mark_start(r->bottom(), r->top());
  62     r->clear_live_data();
  63     r->set_concurrent_iteration_safe_limit(r->top());
  64     return false;
  65   }
  66 };
  67 
  68 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
  69 private:
  70   ShenandoahHeap* const _heap;
  71 
  72 public:
  73   ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
  74   bool heap_region_do(ShenandoahHeapRegion* r) {
  75     if (r->is_trash()) {
  76       r->recycle();
  77     }
  78     if (r->is_cset()) {
  79       r->make_regular_bypass();
  80     }
  81     if (r->is_empty_uncommitted()) {
  82       r->make_committed_bypass();
  83     }
  84     assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number());
  85 
  86     // Record current region occupancy: this communicates empty regions are free
  87     // to the rest of Full GC code.
  88     r->set_new_top(r->top());
  89     return false;
  90   }
  91 };
  92 
  93 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
  94   _gc_timer = gc_timer;
  95 }
  96 
  97 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
  98   ShenandoahHeap* heap = ShenandoahHeap::heap();
  99 
 100   {
 101     if (ShenandoahVerify) {
 102       heap->verifier()->verify_before_fullgc();
 103     }
 104 
 105     heap->set_full_gc_in_progress(true);
 106 
 107     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
 108     assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 109 
 110     {
 111       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
 112       heap->pre_full_gc_dump(_gc_timer);
 113     }
 114 
 115     {
 116       ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
 117       // Full GC is supposed to recover from any GC state:
 118 
 119       // a1. Cancel evacuation, if in progress
 120       if (heap->is_evacuation_in_progress()) {
 121         heap->set_evacuation_in_progress(false);
 122       }
 123       assert(!heap->is_evacuation_in_progress(), "sanity");
 124 
 125       // a2. Cancel update-refs, if in progress
 126       if (heap->is_update_refs_in_progress()) {
 127         heap->set_update_refs_in_progress(false);
 128       }
 129       assert(!heap->is_update_refs_in_progress(), "sanity");
 130 
 131       // a3. Cancel concurrent traversal GC, if in progress
 132       if (heap->is_concurrent_traversal_in_progress()) {
 133         heap->traversal_gc()->reset();
 134         heap->set_concurrent_traversal_in_progress(false);
 135       }
 136 
 137       // b. Cancel concurrent mark, if in progress
 138       if (heap->is_concurrent_mark_in_progress()) {
 139         heap->concurrentMark()->cancel();
 140         heap->stop_concurrent_marking();
 141       }
 142       assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 143 
 144       // c. Reset the bitmaps for new marking
 145       heap->reset_next_mark_bitmap();
 146       assert(heap->is_next_bitmap_clear(), "sanity");
 147 
 148       // d. Abandon reference discovery and clear all discovered references.
 149       ReferenceProcessor* rp = heap->ref_processor();
 150       rp->disable_discovery();
 151       rp->abandon_partial_discovery();
 152       rp->verify_no_references_recorded();
 153 
 154       {
 155         ShenandoahHeapLocker lock(heap->lock());
 156 
 157         // f. Make sure all regions are active. This is needed because we are potentially
 158         // sliding the data through them
 159         ShenandoahEnsureHeapActiveClosure ecl;
 160         heap->heap_region_iterate(&ecl, false, false);
 161 
 162         // g. Clear region statuses, including collection set status
 163         ShenandoahClearRegionStatusClosure cl;
 164         heap->heap_region_iterate(&cl, false, false);
 165       }
 166     }
 167 
 168     {
 169       if (UseTLAB) {
 170         heap->make_tlabs_parsable(true);
 171       }
 172 
 173       CodeCache::gc_prologue();
 174 
 175       // TODO: We don't necessarily need to update refs. We might want to clean
 176       // up managing has_forwarded_objects when diving into degen/full-gc.
 177       heap->set_has_forwarded_objects(true);
 178 
 179       OrderAccess::fence();
 180 
 181       phase1_mark_heap();
 182 
 183       // Prevent read-barrier from kicking in while adjusting pointers in phase3.
 184       heap->set_has_forwarded_objects(false);
 185 
 186       heap->set_full_gc_move_in_progress(true);
 187 
 188       // Setup workers for the rest
 189       {
 190         OrderAccess::fence();
 191 
 192         // Initialize worker slices
 193         ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
 194         for (uint i = 0; i < heap->max_workers(); i++) {
 195           worker_slices[i] = new ShenandoahHeapRegionSet();
 196         }
 197 
 198         phase2_calculate_target_addresses(worker_slices);
 199 
 200         OrderAccess::fence();
 201 
 202         phase3_update_references();
 203 
 204         phase4_compact_objects(worker_slices);
 205 
 206         // Free worker slices
 207         for (uint i = 0; i < heap->max_workers(); i++) {
 208           delete worker_slices[i];
 209         }
 210         FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
 211 
 212         CodeCache::gc_epilogue();
 213         JvmtiExport::gc_epilogue();
 214       }
 215 
 216       heap->set_full_gc_move_in_progress(false);
 217       heap->set_full_gc_in_progress(false);
 218 
 219       if (ShenandoahVerify) {
 220         heap->verifier()->verify_after_fullgc();
 221       }
 222     }
 223 
 224     {
 225       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
 226       heap->post_full_gc_dump(_gc_timer);
 227     }
 228 
 229     if (UseTLAB) {
 230       ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
 231       heap->resize_all_tlabs();
 232     }
 233   }
 234 
 235 
 236   if (UseShenandoahMatrix && PrintShenandoahMatrix) {
 237     LogTarget(Info, gc) lt;
 238     LogStream ls(lt);
 239     heap->connection_matrix()->print_on(&ls);
 240   }
 241 }
 242 
 243 void ShenandoahMarkCompact::phase1_mark_heap() {
 244   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 245   ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
 246 
 247   ShenandoahHeap* heap = ShenandoahHeap::heap();
 248 
 249   ShenandoahConcurrentMark* cm = heap->concurrentMark();
 250 
 251   // Do not trust heuristics, because this can be our last resort collection.
 252   // Only ignore processing references and class unloading if explicitly disabled.
 253   heap->set_process_references(ShenandoahRefProcFrequency != 0);
 254   heap->set_unload_classes(ShenandoahUnloadClassesFrequency != 0);
 255 
 256   ReferenceProcessor* rp = heap->ref_processor();
 257   // enable ("weak") refs discovery
 258   rp->enable_discovery(true /*verify_no_refs*/);
 259   rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
 260   rp->set_active_mt_degree(heap->workers()->active_workers());
 261 
 262   cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
 263   cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
 264   cm->shared_finish_mark_from_roots(/* full_gc = */ true);
 265 
 266   heap->swap_mark_bitmaps();
 267 
 268   if (UseShenandoahMatrix && PrintShenandoahMatrix) {
 269     LogTarget(Info, gc) lt;
 270     LogStream ls(lt);
 271     heap->connection_matrix()->print_on(&ls);
 272   }
 273 }
 274 
 275 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
 276 private:
 277   ShenandoahHeap* const _heap;
 278 public:
 279   ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {}
 280 
 281   bool heap_region_do(ShenandoahHeapRegion* r) {
 282     if (r->is_humongous_start()) {
 283       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
 284       if (!_heap->is_marked_complete(humongous_obj)) {
 285         _heap->trash_humongous_region_at(r);
 286       }
 287     }
 288     return false;
 289   }
 290 };
 291 
 292 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 293 private:
 294   ShenandoahHeap*          const _heap;
 295   GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
 296   int _empty_regions_pos;
 297   ShenandoahHeapRegion*          _to_region;
 298   ShenandoahHeapRegion*          _from_region;
 299   HeapWord* _compact_point;
 300 
 301 public:
 302   ShenandoahPrepareForCompactionObjectClosure(GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
 303     _heap(ShenandoahHeap::heap()),
 304     _empty_regions(empty_regions),
 305     _empty_regions_pos(0),
 306     _to_region(to_region),
 307     _from_region(NULL),
 308     _compact_point(to_region->bottom()) {}
 309 
 310   void set_from_region(ShenandoahHeapRegion* from_region) {
 311     _from_region = from_region;
 312   }
 313 
 314   void finish_region() {
 315     assert(_to_region != NULL, "should not happen");
 316     _to_region->set_new_top(_compact_point);
 317   }
 318 
 319   bool is_compact_same_region() {
 320     return _from_region == _to_region;
 321   }
 322 
 323   int empty_regions_pos() {
 324     return _empty_regions_pos;
 325   }
 326 
 327   void do_object(oop p) {
 328     assert(_from_region != NULL, "must set before work");
 329     assert(_heap->is_marked_complete(p), "must be marked");
 330     assert(!_heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 331 
 332     size_t obj_size = p->size() + BrooksPointer::word_size();
 333     if (_compact_point + obj_size > _to_region->end()) {
 334       finish_region();
 335 
 336       // Object doesn't fit. Pick next empty region and start compacting there.
 337       ShenandoahHeapRegion* new_to_region;
 338       if (_empty_regions_pos < _empty_regions.length()) {
 339         new_to_region = _empty_regions.at(_empty_regions_pos);
 340         _empty_regions_pos++;
 341       } else {
 342         // Out of empty region? Compact within the same region.
 343         new_to_region = _from_region;
 344       }
 345 
 346       assert(new_to_region != _to_region, "must not reuse same to-region");
 347       assert(new_to_region != NULL, "must not be NULL");
 348       _to_region = new_to_region;
 349       _compact_point = _to_region->bottom();
 350     }
 351 
 352     // Object fits into current region, record new location:
 353     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 354     shenandoah_assert_not_forwarded(NULL, p);
 355     BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size());
 356     _compact_point += obj_size;
 357   }
 358 };
 359 
 360 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 361 private:
 362   ShenandoahHeap*           const _heap;
 363   ShenandoahHeapRegionSet** const _worker_slices;
 364   ShenandoahRegionIterator        _heap_regions;
 365 
 366   ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
 367     ShenandoahHeapRegion* from_region = _heap_regions.next();
 368 
 369     while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
 370       from_region = _heap_regions.next();
 371     }
 372 
 373     if (from_region != NULL) {
 374       assert(slice != NULL, "sanity");
 375       assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
 376       assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
 377       slice->add_region(from_region);
 378     }
 379 
 380     return from_region;
 381   }
 382 
 383 public:
 384   ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet** worker_slices) :
 385     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 386     _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
 387   }
 388 
 389   void work(uint worker_id) {
 390     ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
 391     ShenandoahHeapRegion* from_region = next_from_region(slice);
 392     // No work?
 393     if (from_region == NULL) {
 394       return;
 395     }
 396 
 397     // Sliding compaction. Walk all regions in the slice, and compact them.
 398     // Remember empty regions and reuse them as needed.
 399     ResourceMark rm;
 400     GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
 401     ShenandoahPrepareForCompactionObjectClosure cl(empty_regions, from_region);
 402     while (from_region != NULL) {
 403       cl.set_from_region(from_region);
 404       _heap->marked_object_iterate(from_region, &cl);
 405 
 406       // Compacted the region to somewhere else? From-region is empty then.
 407       if (!cl.is_compact_same_region()) {
 408         empty_regions.append(from_region);
 409       }
 410       from_region = next_from_region(slice);
 411     }
 412     cl.finish_region();
 413 
 414     // Mark all remaining regions as empty
 415     for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
 416       ShenandoahHeapRegion* r = empty_regions.at(pos);
 417       r->set_new_top(r->bottom());
 418     }
 419   }
 420 };
 421 
 422 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
 423   ShenandoahHeap* heap = ShenandoahHeap::heap();
 424 
 425   // Compute the new addresses for humongous objects. We need to do this after addresses
 426   // for regular objects are calculated, and we know what regions in heap suffix are
 427   // available for humongous moves.
 428   //
 429   // Scan the heap backwards, because we are compacting humongous regions towards the end.
 430   // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
 431   // humongous start there.
 432   //
 433   // The complication is potential non-movable regions during the scan. If such region is
 434   // detected, then sliding restarts towards that non-movable region.
 435 
 436   size_t to_begin = heap->num_regions();
 437   size_t to_end = heap->num_regions();
 438 
 439   for (size_t c = heap->num_regions() - 1; c > 0; c--) {
 440     ShenandoahHeapRegion *r = heap->get_region(c);
 441     if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
 442       // To-region candidate: record this, and continue scan
 443       to_begin = r->region_number();
 444       continue;
 445     }
 446 
 447     if (r->is_humongous_start() && r->is_move_allowed()) {
 448       // From-region candidate: movable humongous region
 449       oop old_obj = oop(r->bottom() + BrooksPointer::word_size());
 450       size_t words_size = old_obj->size() + BrooksPointer::word_size();
 451       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 452 
 453       size_t start = to_end - num_regions;
 454 
 455       if (start >= to_begin && start != r->region_number()) {
 456         // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
 457         BrooksPointer::set_raw(old_obj, heap->get_region(start)->bottom() + BrooksPointer::word_size());
 458         to_end = start;
 459         continue;
 460       }
 461     }
 462 
 463     // Failed to fit. Scan starting from current region.
 464     to_begin = r->region_number();
 465     to_end = r->region_number();
 466   }
 467 }
 468 
 469 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
 470   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 471   ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
 472 
 473   ShenandoahHeap* heap = ShenandoahHeap::heap();
 474 
 475   {
 476     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
 477 
 478     {
 479       ShenandoahHeapLocker lock(heap->lock());
 480 
 481       ShenandoahMCReclaimHumongousRegionClosure cl;
 482       heap->heap_region_iterate(&cl);
 483 
 484       // After some humongous regions were reclaimed, we need to ensure their
 485       // backing storage is active. This is needed because we are potentially
 486       // sliding the data through them.
 487       ShenandoahEnsureHeapActiveClosure ecl;
 488       heap->heap_region_iterate(&ecl, false, false);
 489     }
 490 
 491     // Compute the new addresses for regular objects
 492     ShenandoahPrepareForCompactionTask prepare_task(worker_slices);
 493     heap->workers()->run_task(&prepare_task);
 494   }
 495 
 496   // Compute the new addresses for humongous objects
 497   {
 498     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
 499     calculate_target_humongous_objects();
 500   }
 501 }
 502 
 503 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
 504 private:
 505   ShenandoahHeap* const _heap;
 506   size_t _new_obj_offset;
 507 
 508   template <class T>
 509   inline void do_oop_work(T* p) {
 510     T o = oopDesc::load_heap_oop(p);
 511     if (! oopDesc::is_null(o)) {
 512       oop obj = oopDesc::decode_heap_oop_not_null(o);
 513       assert(_heap->is_marked_complete(obj), "must be marked");
 514       oop forw = oop(BrooksPointer::get_raw(obj));
 515       oopDesc::encode_store_heap_oop(p, forw);
 516       if (UseShenandoahMatrix) {
 517         if (_heap->is_in_reserved(p)) {
 518           assert(_heap->is_in_reserved(forw), "must be in heap");
 519           assert(_new_obj_offset != SIZE_MAX, "should be set");
 520           // We're moving a to a', which points to b, about to be moved to b'.
 521           // We already know b' from the fwd pointer of b.
 522           // In the object closure, we see a, and we know a' (by looking at its
 523           // fwd ptr). We store the offset in the OopClosure, which is going
 524           // to visit all of a's fields, and then, when we see each field, we
 525           // subtract the offset from each field address to get the final ptr.
 526           _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw);
 527         }
 528       }
 529     }
 530   }
 531 
 532 public:
 533   ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()), _new_obj_offset(SIZE_MAX)  {}
 534 
 535   void do_oop(oop* p)       { do_oop_work(p); }
 536   void do_oop(narrowOop* p) { do_oop_work(p); }
 537 
 538   void set_new_obj_offset(size_t new_obj_offset) {
 539     _new_obj_offset = new_obj_offset;
 540   }
 541 };
 542 
 543 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 544 private:
 545   ShenandoahHeap* const _heap;
 546   ShenandoahAdjustPointersClosure _cl;
 547 
 548 public:
 549   ShenandoahAdjustPointersObjectClosure() :
 550     _heap(ShenandoahHeap::heap()) {
 551   }
 552   void do_object(oop p) {
 553     assert(_heap->is_marked_complete(p), "must be marked");
 554     HeapWord* forw = BrooksPointer::get_raw(p);
 555     _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw));
 556     p->oop_iterate(&_cl);
 557   }
 558 };
 559 
 560 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 561 private:
 562   ShenandoahHeap*          const _heap;
 563   ShenandoahRegionIterator       _regions;
 564 
 565 public:
 566   ShenandoahAdjustPointersTask() :
 567     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 568     _heap(ShenandoahHeap::heap()) {
 569   }
 570 
 571   void work(uint worker_id) {
 572     ShenandoahAdjustPointersObjectClosure obj_cl;
 573     ShenandoahHeapRegion* r = _regions.next();
 574     while (r != NULL) {
 575       if (!r->is_humongous_continuation()) {
 576         _heap->marked_object_iterate(r, &obj_cl);
 577       }
 578       r = _regions.next();
 579     }
 580   }
 581 };
 582 
 583 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 584 private:
 585   ShenandoahRootProcessor* _rp;
 586 
 587 public:
 588   ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
 589     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 590     _rp(rp) {}
 591 
 592   void work(uint worker_id) {
 593     ShenandoahAdjustPointersClosure cl;
 594     CLDToOopClosure adjust_cld_closure(&cl, true);
 595     MarkingCodeBlobClosure adjust_code_closure(&cl,
 596                                              CodeBlobToOopClosure::FixRelocations);
 597 
 598     _rp->process_all_roots(&cl, &cl,
 599                            &adjust_cld_closure,
 600                            &adjust_code_closure, NULL, worker_id);
 601   }
 602 };
 603 
 604 void ShenandoahMarkCompact::phase3_update_references() {
 605   GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
 606   ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
 607 
 608   ShenandoahHeap* heap = ShenandoahHeap::heap();
 609 
 610   if (UseShenandoahMatrix) {
 611     heap->connection_matrix()->clear_all();
 612   }
 613 
 614   WorkGang* workers = heap->workers();
 615   uint nworkers = workers->active_workers();
 616   {
 617 #if COMPILER2_OR_JVMCI
 618     DerivedPointerTable::clear();
 619 #endif
 620     ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
 621     ShenandoahAdjustRootPointersTask task(&rp);
 622     workers->run_task(&task);
 623 #if COMPILER2_OR_JVMCI
 624     DerivedPointerTable::update_pointers();
 625 #endif
 626   }
 627 
 628   ShenandoahAdjustPointersTask adjust_pointers_task;
 629   workers->run_task(&adjust_pointers_task);
 630 }
 631 
 632 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 633 private:
 634   ShenandoahHeap* const _heap;
 635   uint            const _worker_id;
 636 
 637 public:
 638   ShenandoahCompactObjectsClosure(uint worker_id) :
 639     _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
 640 
 641   void do_object(oop p) {
 642     assert(_heap->is_marked_complete(p), "must be marked");
 643     size_t size = (size_t)p->size();
 644     HeapWord* compact_to = BrooksPointer::get_raw(p);
 645     HeapWord* compact_from = (HeapWord*) p;
 646     if (compact_from != compact_to) {
 647       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 648     }
 649     oop new_obj = oop(compact_to);
 650     BrooksPointer::initialize(new_obj);
 651   }
 652 };
 653 
 654 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 655 private:
 656   ShenandoahHeap* const _heap;
 657   ShenandoahHeapRegionSet** const _worker_slices;
 658 
 659 public:
 660   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
 661     AbstractGangTask("Shenandoah Compact Objects Task"),
 662     _heap(ShenandoahHeap::heap()),
 663     _worker_slices(worker_slices) {
 664   }
 665 
 666   void work(uint worker_id) {
 667     ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
 668 
 669     ShenandoahCompactObjectsClosure cl(worker_id);
 670     ShenandoahHeapRegion* r = slice.next();
 671     while (r != NULL) {
 672       assert(!r->is_humongous(), "must not get humongous regions here");
 673       _heap->marked_object_iterate(r, &cl);
 674       r->set_top(r->new_top());
 675       r = slice.next();
 676     }
 677   }
 678 };
 679 
 680 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 681 private:
 682   ShenandoahHeap* const _heap;
 683   size_t _live;
 684 
 685 public:
 686   ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
 687     _heap->free_set()->clear();
 688   }
 689 
 690   bool heap_region_do(ShenandoahHeapRegion* r) {
 691     assert (!r->is_cset(), "cset regions should have been demoted already");
 692 
 693     // Need to reset the complete-top-at-mark-start pointer here because
 694     // the complete marking bitmap is no longer valid. This ensures
 695     // size-based iteration in marked_object_iterate().
 696     // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
 697     // pinned regions.
 698     if (!r->is_pinned()) {
 699       _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom());
 700     }
 701 
 702     size_t live = r->used();
 703 
 704     // Make empty regions that have been allocated into regular
 705     if (r->is_empty() && live > 0) {
 706       r->make_regular_bypass();
 707     }
 708 
 709     // Reclaim regular regions that became empty
 710     if (r->is_regular() && live == 0) {
 711       r->make_trash();
 712     }
 713 
 714     // Recycle all trash regions
 715     if (r->is_trash()) {
 716       live = 0;
 717       r->recycle();
 718     }
 719 
 720     r->set_live_data(live);
 721     r->reset_alloc_metadata_to_shared();
 722     _live += live;
 723     return false;
 724   }
 725 
 726   size_t get_live() {
 727     return _live;
 728   }
 729 };
 730 
 731 void ShenandoahMarkCompact::compact_humongous_objects() {
 732   // Compact humongous regions, based on their fwdptr objects.
 733   //
 734   // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
 735   // humongous regions are already compacted, and do not require further moves, which alleviates
 736   // sliding costs. We may consider doing this in parallel in future.
 737 
 738   ShenandoahHeap* heap = ShenandoahHeap::heap();
 739 
 740   for (size_t c = heap->num_regions() - 1; c > 0; c--) {
 741     ShenandoahHeapRegion* r = heap->get_region(c);
 742     if (r->is_humongous_start()) {
 743       oop old_obj = oop(r->bottom() + BrooksPointer::word_size());
 744       size_t words_size = old_obj->size() + BrooksPointer::word_size();
 745       size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 746 
 747       size_t old_start = r->region_number();
 748       size_t old_end   = old_start + num_regions - 1;
 749       size_t new_start = heap->heap_region_index_containing(BrooksPointer::get_raw(old_obj));
 750       size_t new_end   = new_start + num_regions - 1;
 751 
 752       if (old_start == new_start) {
 753         // No need to move the object, it stays at the same slot
 754         continue;
 755       }
 756 
 757       assert (r->is_move_allowed(), "should be movable");
 758 
 759       Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
 760                                    heap->get_region(new_start)->bottom(),
 761                                    ShenandoahHeapRegion::region_size_words()*num_regions);
 762 
 763       oop new_obj = oop(heap->get_region(new_start)->bottom() + BrooksPointer::word_size());
 764       BrooksPointer::initialize(new_obj);
 765 
 766       {
 767         ShenandoahHeapLocker lock(heap->lock());
 768 
 769         for (size_t c = old_start; c <= old_end; c++) {
 770           ShenandoahHeapRegion* r = heap->get_region(c);
 771           r->make_regular_bypass();
 772           r->set_top(r->bottom());
 773         }
 774 
 775         for (size_t c = new_start; c <= new_end; c++) {
 776           ShenandoahHeapRegion* r = heap->get_region(c);
 777           if (c == new_start) {
 778             r->make_humongous_start_bypass();
 779           } else {
 780             r->make_humongous_cont_bypass();
 781           }
 782 
 783           // Trailing region may be non-full, record the remainder there
 784           size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 785           if ((c == new_end) && (remainder != 0)) {
 786             r->set_top(r->bottom() + remainder);
 787           } else {
 788             r->set_top(r->end());
 789           }
 790 
 791           r->reset_alloc_metadata_to_shared();
 792         }
 793       }
 794     }
 795   }
 796 }
 797 
 798 // This is slightly different to ShHeap::reset_next_mark_bitmap:
 799 // we need to remain able to walk pinned regions.
 800 // Since pinned region do not move and don't get compacted, we will get holes with
 801 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
 802 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
 803 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
 804 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
 805 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
 806 private:
 807   ShenandoahRegionIterator _regions;
 808 
 809 public:
 810   ShenandoahMCResetCompleteBitmapTask() :
 811     AbstractGangTask("Parallel Reset Bitmap Task") {
 812   }
 813 
 814   void work(uint worker_id) {
 815     ShenandoahHeapRegion* region = _regions.next();
 816     ShenandoahHeap* heap = ShenandoahHeap::heap();
 817     while (region != NULL) {
 818       if (heap->is_bitmap_slice_committed(region) && !region->is_pinned()) {
 819         HeapWord* bottom = region->bottom();
 820         HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 821         if (top > bottom) {
 822           heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 823         }
 824         assert(heap->is_complete_bitmap_clear_range(bottom, region->end()), "must be clear");
 825       }
 826       region = _regions.next();
 827     }
 828   }
 829 };
 830 
 831 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
 832   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
 833   ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
 834 
 835   ShenandoahHeap* heap = ShenandoahHeap::heap();
 836 
 837   // Compact regular objects first
 838   {
 839     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
 840     ShenandoahCompactObjectsTask compact_task(worker_slices);
 841     heap->workers()->run_task(&compact_task);
 842   }
 843 
 844   // Compact humongous objects after regular object moves
 845   {
 846     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
 847     compact_humongous_objects();
 848   }
 849 
 850   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 851   // and must ensure the bitmap is in sync.
 852   ShenandoahMCResetCompleteBitmapTask task;
 853   heap->workers()->run_task(&task);
 854 
 855   // Bring regions in proper states after the collection, and set heap properties.
 856   {
 857     ShenandoahHeapLocker lock(heap->lock());
 858     ShenandoahPostCompactClosure post_compact;
 859     heap->heap_region_iterate(&post_compact);
 860     heap->set_used(post_compact.get_live());
 861 
 862     heap->collection_set()->clear();
 863     heap->free_set()->rebuild();
 864   }
 865 
 866   heap->clear_cancelled_concgc();
 867 
 868   // Also clear the next bitmap in preparation for next marking.
 869   heap->reset_next_mark_bitmap();
 870 }