1 /*
   2  * Copyright (c) 2014, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "code/codeCache.hpp"
  25 #include "gc/shared/gcTraceTime.inline.hpp"
  26 #include "gc/shared/isGCActiveMark.hpp"
  27 #include "gc/shenandoah/brooksPointer.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  30 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  36 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/biasedLocking.hpp"
  39 #include "runtime/thread.hpp"
  40 #include "utilities/copy.hpp"
  41 #include "gc/shared/taskqueue.inline.hpp"
  42 #include "gc/shared/workgroup.hpp"
  43 
  44 class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet {
  45 public:
  46   ShenandoahMarkCompactBarrierSet(ShenandoahHeap* heap) : ShenandoahBarrierSet(heap) {
  47   }
  48   oop read_barrier(oop src) {
  49     return src;
  50   }
  51 #ifdef ASSERT
  52   bool is_safe(oop o) {
  53     if (o == NULL) return true;
  54     if (! oopDesc::unsafe_equals(o, read_barrier(o))) {
  55       return false;
  56     }
  57     return true;
  58   }
  59   bool is_safe(narrowOop o) {
  60     oop obj = oopDesc::decode_heap_oop(o);
  61     return is_safe(obj);
  62   }
  63 #endif
  64 };
  65 
  66 class ClearInCollectionSetHeapRegionClosure: public ShenandoahHeapRegionClosure {
  67 private:
  68   ShenandoahHeap* _heap;
  69 public:
  70 
  71   ClearInCollectionSetHeapRegionClosure() : _heap(ShenandoahHeap::heap()) {
  72   }
  73 
  74   bool doHeapRegion(ShenandoahHeapRegion* r) {
  75     _heap->set_next_top_at_mark_start(r->bottom(), r->top());
  76     r->clear_live_data();
  77     r->set_concurrent_iteration_safe_limit(r->top());
  78     return false;
  79   }
  80 };
  81 
  82 STWGCTimer* ShenandoahMarkCompact::_gc_timer = NULL;
  83 
  84 void ShenandoahMarkCompact::initialize() {
  85   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
  86 }
  87 
  88 void ShenandoahMarkCompact::do_mark_compact(GCCause::Cause gc_cause) {
  89 
  90   ShenandoahHeap* _heap = ShenandoahHeap::heap();
  91   ShenandoahCollectorPolicy* policy = _heap->shenandoahPolicy();
  92 
  93   _gc_timer->register_gc_start();
  94 
  95   _heap->set_full_gc_in_progress(true);
  96 
  97   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  98   IsGCActiveMark is_active;
  99 
 100   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 101 
 102   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc);
 103 
 104   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps);
 105   _heap->pre_full_gc_dump(_gc_timer);
 106   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps);
 107 
 108   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_prepare);
 109 
 110   // Full GC is supposed to recover from any GC state:
 111 
 112   // a. Cancel concurrent mark, if in progress
 113   if (_heap->concurrent_mark_in_progress()) {
 114     _heap->concurrentMark()->cancel();
 115     _heap->stop_concurrent_marking();
 116   }
 117   assert(!_heap->concurrent_mark_in_progress(), "sanity");
 118 
 119   // b. Cancel evacuation, if in progress
 120   if (_heap->is_evacuation_in_progress()) {
 121     _heap->set_evacuation_in_progress_at_safepoint(false);
 122   }
 123   assert(!_heap->is_evacuation_in_progress(), "sanity");
 124 
 125   // c. Reset the bitmaps for new marking
 126   _heap->reset_next_mark_bitmap(_heap->workers());
 127   assert(_heap->is_next_bitmap_clear(), "sanity");
 128 
 129   ClearInCollectionSetHeapRegionClosure cl;
 130   _heap->heap_region_iterate(&cl, false, false);
 131 
 132   /*
 133   if (ShenandoahVerify) {
 134     // Full GC should only be called between regular concurrent cycles, therefore
 135     // those verifications should be valid.
 136     _heap->verify_heap_after_evacuation();
 137     _heap->verify_heap_after_update_refs();
 138   }
 139   */
 140 
 141   BarrierSet* old_bs = oopDesc::bs();
 142   ShenandoahMarkCompactBarrierSet bs(_heap);
 143   oopDesc::set_bs(&bs);
 144 
 145   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_prepare);
 146 
 147   {
 148     GCTraceTime(Info, gc) time("Pause Full", _gc_timer, gc_cause, true);
 149 
 150     if (UseTLAB) {
 151       _heap->ensure_parsability(true);
 152     }
 153 
 154     CodeCache::gc_prologue();
 155 
 156     // We should save the marks of the currently locked biased monitors.
 157     // The marking doesn't preserve the marks of biased objects.
 158     //BiasedLocking::preserve_marks();
 159 
 160     _heap->set_need_update_refs(true);
 161     WorkGang* workers = _heap->workers();
 162 
 163     // Setup workers for phase 1
 164     {
 165       uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_init_marking(
 166         workers->active_workers(), Threads::number_of_non_daemon_threads());
 167       workers->update_active_workers(nworkers);
 168       ShenandoahWorkerScope scope(workers, nworkers);
 169 
 170       OrderAccess::fence();
 171 
 172       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_mark);
 173       phase1_mark_heap();
 174       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_mark);
 175     }
 176 
 177     // Setup workers for the rest
 178     {
 179       uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(
 180         workers->active_workers(), Threads::number_of_non_daemon_threads());
 181 
 182       ShenandoahWorkerScope scope(workers, nworkers);
 183 
 184       OrderAccess::fence();
 185 
 186       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_calculate_addresses);
 187       ShenandoahHeapRegionSet* copy_queues[_heap->max_workers()];
 188       phase2_calculate_target_addresses(copy_queues);
 189       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_calculate_addresses);
 190 
 191       OrderAccess::fence();
 192 
 193       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_adjust_pointers);
 194       phase3_update_references();
 195       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_adjust_pointers);
 196 
 197       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_copy_objects);
 198       phase4_compact_objects(copy_queues);
 199       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_copy_objects);
 200 
 201       CodeCache::gc_epilogue();
 202       JvmtiExport::gc_epilogue();
 203     }
 204 
 205     // refs processing: clean slate
 206     // rp.enqueue_discovered_references();
 207 
 208     if (ShenandoahVerify) {
 209       _heap->verify_heap_after_evacuation();
 210     }
 211 
 212     _heap->set_bytes_allocated_since_cm(0);
 213 
 214     _heap->set_need_update_refs(false);
 215 
 216     _heap->set_full_gc_in_progress(false);
 217   }
 218 
 219   _gc_timer->register_gc_end();
 220 
 221   policy->record_full_gc();
 222 
 223   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps);
 224   _heap->post_full_gc_dump(_gc_timer);
 225   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps);
 226 
 227   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc);
 228 
 229   oopDesc::set_bs(old_bs);
 230 
 231   if (UseShenandoahMatrix) {
 232     if (PrintShenandoahMatrix) {
 233       outputStream* log = Log(gc)::info_stream();
 234       _heap->connection_matrix()->print_on(log);
 235     }
 236   }
 237 }
 238 
 239 #ifdef ASSERT
 240 class VerifyNotForwardedPointersClosure : public MetadataAwareOopClosure {
 241 private:
 242   template <class T>
 243   inline void do_oop_work(T* p) {
 244     T o = oopDesc::load_heap_oop(p);
 245     if (! oopDesc::is_null(o)) {
 246       oop obj = oopDesc::decode_heap_oop_not_null(o);
 247       assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
 248              "expect forwarded oop");
 249       ShenandoahHeap* heap = ShenandoahHeap::heap();
 250       if (! heap->is_marked_complete(obj)) {
 251         tty->print_cr("ref region humongous? %s", BOOL_TO_STR(heap->heap_region_containing(p)->is_humongous()));
 252       }
 253       assert(heap->is_marked_complete(obj), "must be marked");
 254       assert(! heap->allocated_after_complete_mark_start((HeapWord*) obj), "must be truly marked");
 255     }
 256   }
 257 public:
 258   void do_oop(oop* p) {
 259     do_oop_work(p);
 260   }
 261   void do_oop(narrowOop* p) {
 262     do_oop_work(p);
 263   }
 264 };
 265 
 266 class ShenandoahMCVerifyAfterMarkingObjectClosure : public ObjectClosure {
 267 public:
 268   void do_object(oop p) {
 269     ShenandoahHeap* heap = ShenandoahHeap::heap();
 270     assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
 271            "expect forwarded oop");
 272     assert(heap->is_marked_complete(p), "must be marked");
 273     assert(! heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 274     VerifyNotForwardedPointersClosure cl;
 275     p->oop_iterate(&cl);
 276   }
 277 };
 278 
 279 class ShenandoahMCVerifyAfterMarkingRegionClosure : public ShenandoahHeapRegionClosure {
 280   bool doHeapRegion(ShenandoahHeapRegion* r) {
 281     ShenandoahMCVerifyAfterMarkingObjectClosure cl;
 282     if (! r->is_humongous_continuation()) {
 283       ShenandoahHeap::heap()->marked_object_iterate(r, &cl);
 284     }
 285     return false;
 286   }
 287 };
 288 
 289 #endif
 290 
 291 void ShenandoahMarkCompact::phase1_mark_heap() {
 292   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 293   ShenandoahHeap* _heap = ShenandoahHeap::heap();
 294 
 295   ShenandoahConcurrentMark* cm = _heap->concurrentMark();
 296 
 297   cm->set_process_references(true);
 298   cm->set_unload_classes(true);
 299 
 300   ReferenceProcessor* rp = _heap->ref_processor();
 301   // enable ("weak") refs discovery
 302   rp->enable_discovery(true /*verify_no_refs*/);
 303   rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
 304   rp->set_active_mt_degree(_heap->workers()->active_workers());
 305 
 306   COMPILER2_PRESENT(DerivedPointerTable::clear());
 307   cm->update_roots();
 308   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 309 
 310   cm->mark_roots();
 311   cm->shared_finish_mark_from_roots(/* full_gc = */ true);
 312 
 313   _heap->swap_mark_bitmaps();
 314 
 315   if (UseShenandoahMatrix) {
 316     if (PrintShenandoahMatrix) {
 317       outputStream* log = Log(gc)::info_stream();
 318       _heap->connection_matrix()->print_on(log);
 319     }
 320   }
 321 
 322   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 323     _heap->verify_heap_reachable_at_safepoint();
 324   }
 325 
 326   if (VerifyDuringGC) {
 327     HandleMark hm;  // handle scope
 328     //    Universe::heap()->prepare_for_verify();
 329     _heap->prepare_for_verify();
 330     // Note: we can verify only the heap here. When an object is
 331     // marked, the previous value of the mark word (including
 332     // identity hash values, ages, etc) is preserved, and the mark
 333     // word is set to markOop::marked_value - effectively removing
 334     // any hash values from the mark word. These hash values are
 335     // used when verifying the dictionaries and so removing them
 336     // from the mark word can make verification of the dictionaries
 337     // fail. At the end of the GC, the original mark word values
 338     // (including hash values) are restored to the appropriate
 339     // objects.
 340     //    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
 341     _heap->verify(VerifyOption_G1UseMarkWord);
 342   }
 343 
 344 #ifdef ASSERT
 345   ShenandoahMCVerifyAfterMarkingRegionClosure cl;
 346   _heap->heap_region_iterate(&cl);
 347 #endif
 348 }
 349 
 350 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
 351 private:
 352   ShenandoahHeap* _heap;
 353 public:
 354   ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {
 355   }
 356 
 357   bool doHeapRegion(ShenandoahHeapRegion* r) {
 358     if (r->is_humongous_start()) {
 359       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
 360       if (! _heap->is_marked_complete(humongous_obj)) {
 361         _heap->reclaim_humongous_region_at(r);
 362       }
 363     }
 364     return false;
 365   }
 366 };
 367 
 368 
 369 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 370 
 371 private:
 372 
 373   ShenandoahHeap* _heap;
 374   ShenandoahHeapRegionSet* _to_regions;
 375   ShenandoahHeapRegion* _to_region;
 376   ShenandoahHeapRegion* _from_region;
 377   HeapWord* _compact_point;
 378 
 379 public:
 380 
 381   ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) :
 382     _heap(ShenandoahHeap::heap()),
 383     _to_regions(to_regions),
 384     _to_region(to_region),
 385     _from_region(NULL),
 386     _compact_point(to_region->bottom()) {
 387   }
 388 
 389   void set_from_region(ShenandoahHeapRegion* from_region) {
 390     _from_region = from_region;
 391   }
 392 
 393   ShenandoahHeapRegion* to_region() const {
 394     return _to_region;
 395   }
 396   HeapWord* compact_point() const {
 397     return _compact_point;
 398   }
 399   void do_object(oop p) {
 400     assert(_from_region != NULL, "must set before work");
 401     assert(_heap->is_marked_complete(p), "must be marked");
 402     assert(! _heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 403     size_t size = p->size();
 404     size_t obj_size = size + BrooksPointer::word_size();
 405     if (_compact_point + obj_size > _to_region->end()) {
 406       // Object doesn't fit. Pick next to-region and start compacting there.
 407       _to_region->set_new_top(_compact_point);
 408       ShenandoahHeapRegion* new_to_region = _to_regions->current();
 409       _to_regions->next();
 410       if (new_to_region == NULL) {
 411         new_to_region = _from_region;
 412       }
 413       assert(new_to_region != _to_region, "must not reuse same to-region");
 414       assert(new_to_region != NULL, "must not be NULL");
 415       _to_region = new_to_region;
 416       _compact_point = _to_region->bottom();
 417     }
 418     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 419     assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
 420            "expect forwarded oop");
 421     BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size());
 422     _compact_point += obj_size;
 423   }
 424 };
 425 
 426 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 427 private:
 428 
 429   ShenandoahHeapRegionSet** _copy_queues;
 430   ShenandoahHeapRegionSet* _from_regions;
 431 
 432   ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* copy_queue) {
 433     ShenandoahHeapRegion* from_region = _from_regions->claim_next();
 434     while (from_region != NULL && (from_region->is_humongous() || from_region->is_pinned())) {
 435       from_region = _from_regions->claim_next();
 436     }
 437     if (from_region != NULL) {
 438       assert(copy_queue != NULL, "sanity");
 439       assert(! from_region->is_humongous(), "must not get humongous regions here");
 440       assert(! from_region->is_pinned(), "no pinned region in mark-compact");
 441       copy_queue->add_region(from_region);
 442     }
 443     return from_region;
 444   }
 445 
 446 public:
 447   ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet* from_regions, ShenandoahHeapRegionSet** copy_queues) :
 448     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 449     _from_regions(from_regions), _copy_queues(copy_queues) {
 450   }
 451 
 452   void work(uint worker_id) {
 453     ShenandoahHeap* heap = ShenandoahHeap::heap();
 454     ShenandoahHeapRegionSet* copy_queue = _copy_queues[worker_id];
 455     ShenandoahHeapRegion* from_region = next_from_region(copy_queue);
 456     if (from_region == NULL) return;
 457     ShenandoahHeapRegionSet* to_regions = new ShenandoahHeapRegionSet(ShenandoahHeap::heap()->max_regions());
 458     ShenandoahPrepareForCompactionObjectClosure cl(to_regions, from_region);
 459     while (from_region != NULL) {
 460       assert(from_region != NULL, "sanity");
 461       cl.set_from_region(from_region);
 462       heap->marked_object_iterate(from_region, &cl);
 463       if (from_region != cl.to_region()) {
 464         assert(from_region != NULL, "sanity");
 465         to_regions->add_region(from_region);
 466       }
 467       from_region = next_from_region(copy_queue);
 468     }
 469     assert(cl.to_region() != NULL, "should not happen");
 470     cl.to_region()->set_new_top(cl.compact_point());
 471     while (to_regions->count() > 0) {
 472       ShenandoahHeapRegion* r = to_regions->current();
 473       to_regions->next();
 474       if (r == NULL) {
 475         to_regions->print();
 476       }
 477       assert(r != NULL, "should not happen");
 478       r->set_new_top(r->bottom());
 479     }
 480     delete to_regions;
 481   }
 482 };
 483 
 484 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** copy_queues) {
 485   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 486   ShenandoahHeap* heap = ShenandoahHeap::heap();
 487 
 488   ShenandoahMCReclaimHumongousRegionClosure cl;
 489   heap->heap_region_iterate(&cl);
 490 
 491   // Initialize copy queues.
 492   for (uint i = 0; i < heap->max_workers(); i++) {
 493     copy_queues[i] = new ShenandoahHeapRegionSet(heap->max_regions());
 494   }
 495 
 496   ShenandoahHeapRegionSet* from_regions = heap->regions();
 497   from_regions->clear_current_index();
 498   ShenandoahPrepareForCompactionTask prepare_task(from_regions, copy_queues);
 499   heap->workers()->run_task(&prepare_task);
 500 }
 501 
 502 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
 503 private:
 504   ShenandoahHeap* _heap;
 505   size_t _new_obj_offset;
 506 public:
 507 
 508   ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()) {
 509   }
 510 
 511 private:
 512   template <class T>
 513   inline void do_oop_work(T* p) {
 514     T o = oopDesc::load_heap_oop(p);
 515     if (! oopDesc::is_null(o)) {
 516       oop obj = oopDesc::decode_heap_oop_not_null(o);
 517       assert(_heap->is_marked_complete(obj), "must be marked");
 518       oop forw = oop(BrooksPointer::get_raw(obj));
 519       oopDesc::encode_store_heap_oop(p, forw);
 520       if (UseShenandoahMatrix) {
 521         if (_heap->is_in_reserved(p)) {
 522           assert(_heap->is_in_reserved(forw), "must be in heap");
 523           _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw);
 524         }
 525       }
 526     }
 527   }
 528 public:
 529   void do_oop(oop* p) {
 530     do_oop_work(p);
 531   }
 532   void do_oop(narrowOop* p) {
 533     do_oop_work(p);
 534   }
 535   void set_new_obj_offset(size_t new_obj_offset) {
 536     _new_obj_offset = new_obj_offset;
 537   }
 538 };
 539 
 540 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 541 private:
 542   ShenandoahAdjustPointersClosure _cl;
 543   ShenandoahHeap* _heap;
 544 public:
 545   ShenandoahAdjustPointersObjectClosure() :
 546     _heap(ShenandoahHeap::heap()) {
 547   }
 548   void do_object(oop p) {
 549     assert(_heap->is_marked_complete(p), "must be marked");
 550     HeapWord* forw = BrooksPointer::get_raw(p);
 551     _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw));
 552     p->oop_iterate(&_cl);
 553   }
 554 };
 555 
 556 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 557 private:
 558   ShenandoahHeapRegionSet* _regions;
 559 public:
 560 
 561   ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) :
 562     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 563     _regions(regions) {
 564   }
 565 
 566   void work(uint worker_id) {
 567     ShenandoahHeap* heap = ShenandoahHeap::heap();
 568     ShenandoahHeapRegion* r = _regions->claim_next();
 569     ShenandoahAdjustPointersObjectClosure obj_cl;
 570     while (r != NULL) {
 571       if (! r->is_humongous_continuation()) {
 572         heap->marked_object_iterate(r, &obj_cl);
 573       }
 574       r = _regions->claim_next();
 575     }
 576   }
 577 };
 578 
 579 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 580 private:
 581   ShenandoahRootProcessor* _rp;
 582 
 583 public:
 584 
 585   ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
 586     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 587     _rp(rp) {
 588   }
 589 
 590   void work(uint worker_id) {
 591     ShenandoahAdjustPointersClosure cl;
 592     CLDToOopClosure adjust_cld_closure(&cl, true);
 593     MarkingCodeBlobClosure adjust_code_closure(&cl,
 594                                              CodeBlobToOopClosure::FixRelocations);
 595 
 596     _rp->process_all_roots(&cl, &cl,
 597                            &adjust_cld_closure,
 598                            &adjust_code_closure, worker_id);
 599   }
 600 };
 601 
 602 void ShenandoahMarkCompact::phase3_update_references() {
 603   GCTraceTime(Info, gc, phases) time("Phase 2: Adjust pointers", _gc_timer);
 604   ShenandoahHeap* heap = ShenandoahHeap::heap();
 605 
 606   if (UseShenandoahMatrix) {
 607     heap->connection_matrix()->clear_all();
 608   }
 609 
 610     // Need cleared claim bits for the roots processing
 611   ClassLoaderDataGraph::clear_claimed_marks();
 612 
 613   WorkGang* workers = heap->workers();
 614   uint nworkers = workers->active_workers();
 615   {
 616     COMPILER2_PRESENT(DerivedPointerTable::clear());
 617 
 618     ShenandoahRootProcessor rp(heap, nworkers);
 619     ShenandoahAdjustRootPointersTask task(&rp);
 620     workers->run_task(&task);
 621     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 622   }
 623 
 624   ShenandoahHeapRegionSet* regions = heap->regions();
 625   regions->clear_current_index();
 626   ShenandoahAdjustPointersTask adjust_pointers_task(regions);
 627   workers->run_task(&adjust_pointers_task);
 628 }
 629 
 630 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 631 private:
 632   ShenandoahHeap* _heap;
 633 public:
 634   ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {
 635   }
 636   void do_object(oop p) {
 637     assert(_heap->is_marked_complete(p), "must be marked");
 638     size_t size = p->size();
 639     HeapWord* compact_to = BrooksPointer::get_raw(p);
 640     HeapWord* compact_from = (HeapWord*) p;
 641     if (compact_from != compact_to) {
 642       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 643     }
 644     oop new_obj = oop(compact_to);
 645     // new_obj->init_mark();
 646     BrooksPointer::initialize(new_obj);
 647   }
 648 };
 649 
 650 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 651   ShenandoahHeapRegionSet** _regions;
 652 public:
 653   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** regions) :
 654     AbstractGangTask("Shenandoah Compact Objects Task"),
 655     _regions(regions) {
 656   }
 657   void work(uint worker_id) {
 658     ShenandoahHeap* heap = ShenandoahHeap::heap();
 659     ShenandoahHeapRegionSet* copy_queue = _regions[worker_id];
 660     copy_queue->clear_current_index();
 661     ShenandoahCompactObjectsClosure cl;
 662     ShenandoahHeapRegion* r = copy_queue->current();
 663     copy_queue->next();
 664     while (r != NULL) {
 665       assert(! r->is_humongous(), "must not get humongous regions here");
 666       heap->marked_object_iterate(r, &cl);
 667       r->set_top(r->new_top());
 668       r = copy_queue->current();
 669       copy_queue->next();
 670     }
 671   }
 672 };
 673 
 674 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 675   size_t _live;
 676   ShenandoahHeap* _heap;
 677 public:
 678 
 679   ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
 680     _heap->clear_free_regions();
 681   }
 682 
 683   bool doHeapRegion(ShenandoahHeapRegion* r) {
 684     // Need to reset the complete-top-at-mark-start pointer here because
 685     // the complete marking bitmap is no longer valid. This ensures
 686     // size-based iteration in marked_object_iterate().
 687     _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom());
 688     r->set_in_collection_set(false);
 689     if (r->is_humongous()) {
 690       _live += ShenandoahHeapRegion::RegionSizeBytes;
 691     } else {
 692       size_t live = r->used();
 693       if (live == 0) {
 694         r->recycle();
 695         _heap->add_free_region(r);
 696       }
 697       r->set_live_data(live);
 698       _live += live;
 699     }
 700     return false;
 701   }
 702 
 703   size_t get_live() { return _live; }
 704 
 705 };
 706 
 707 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) {
 708   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
 709   ShenandoahHeap* heap = ShenandoahHeap::heap();
 710   ShenandoahCompactObjectsTask compact_task(copy_queues);
 711   heap->workers()->run_task(&compact_task);
 712 
 713   heap->clear_cset_fast_test();
 714 
 715   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 716   // and must ensure the bitmap is in sync.
 717   heap->reset_complete_mark_bitmap(heap->workers());
 718 
 719   {
 720     ShenandoahHeap::ShenandoahHeapLock lock(heap);
 721     ShenandoahPostCompactClosure post_compact;
 722     heap->heap_region_iterate(&post_compact);
 723 
 724     heap->set_used(post_compact.get_live());
 725 
 726   }
 727 
 728   heap->clear_cancelled_concgc();
 729 
 730   // Also clear the next bitmap in preparation for next marking.
 731   heap->reset_next_mark_bitmap(heap->workers());
 732 
 733   for (uint i = 0; i < heap->max_workers(); i++) {
 734     delete copy_queues[i];
 735   }
 736 
 737 }