1 /*
   2  * Copyright (c) 2014, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/codeCache.hpp"
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/isGCActiveMark.hpp"
  28 #include "gc/shenandoah/brooksPointer.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  31 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.hpp"
  35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  36 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  37 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/biasedLocking.hpp"
  40 #include "runtime/thread.hpp"
  41 #include "utilities/copy.hpp"
  42 #include "gc/shared/taskqueue.inline.hpp"
  43 #include "gc/shared/workgroup.hpp"
  44 
  45 class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet {
  46 public:
  47   ShenandoahMarkCompactBarrierSet(ShenandoahHeap* heap) : ShenandoahBarrierSet(heap) {
  48   }
  49   oop read_barrier(oop src) {
  50     return src;
  51   }
  52 #ifdef ASSERT
  53   bool is_safe(oop o) {
  54     if (o == NULL) return true;
  55     if (! oopDesc::unsafe_equals(o, read_barrier(o))) {
  56       return false;
  57     }
  58     return true;
  59   }
  60   bool is_safe(narrowOop o) {
  61     oop obj = oopDesc::decode_heap_oop(o);
  62     return is_safe(obj);
  63   }
  64 #endif
  65 };
  66 
  67 class ClearInCollectionSetHeapRegionClosure: public ShenandoahHeapRegionClosure {
  68 private:
  69   ShenandoahHeap* _heap;
  70 public:
  71 
  72   ClearInCollectionSetHeapRegionClosure() : _heap(ShenandoahHeap::heap()) {
  73   }
  74 
  75   bool doHeapRegion(ShenandoahHeapRegion* r) {
  76     _heap->set_next_top_at_mark_start(r->bottom(), r->top());
  77     r->clear_live_data();
  78     r->set_concurrent_iteration_safe_limit(r->top());
  79     return false;
  80   }
  81 };
  82 
  83 STWGCTimer* ShenandoahMarkCompact::_gc_timer = NULL;
  84 
  85 void ShenandoahMarkCompact::initialize() {
  86   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
  87 }
  88 
  89 void ShenandoahMarkCompact::do_mark_compact(GCCause::Cause gc_cause) {
  90 
  91   ShenandoahHeap* _heap = ShenandoahHeap::heap();
  92   ShenandoahCollectorPolicy* policy = _heap->shenandoahPolicy();
  93 
  94   _gc_timer->register_gc_start();
  95 
  96   _heap->set_full_gc_in_progress(true);
  97 
  98   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  99   IsGCActiveMark is_active;
 100 
 101   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 102 
 103   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc);
 104 
 105   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps);
 106   _heap->pre_full_gc_dump(_gc_timer);
 107   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps);
 108 
 109   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_prepare);
 110 
 111   // Full GC is supposed to recover from any GC state:
 112 
 113   // a. Cancel concurrent mark, if in progress
 114   if (_heap->concurrent_mark_in_progress()) {
 115     _heap->concurrentMark()->cancel();
 116     _heap->stop_concurrent_marking();
 117   }
 118   assert(!_heap->concurrent_mark_in_progress(), "sanity");
 119 
 120   // b. Cancel evacuation, if in progress
 121   if (_heap->is_evacuation_in_progress()) {
 122     _heap->set_evacuation_in_progress_at_safepoint(false);
 123   }
 124   assert(!_heap->is_evacuation_in_progress(), "sanity");
 125 
 126   // c. Reset the bitmaps for new marking
 127   _heap->reset_next_mark_bitmap(_heap->workers());
 128   assert(_heap->is_next_bitmap_clear(), "sanity");
 129 
 130   ClearInCollectionSetHeapRegionClosure cl;
 131   _heap->heap_region_iterate(&cl, false, false);
 132 
 133   /*
 134   if (ShenandoahVerify) {
 135     // Full GC should only be called between regular concurrent cycles, therefore
 136     // those verifications should be valid.
 137     _heap->verify_heap_after_evacuation();
 138     _heap->verify_heap_after_update_refs();
 139   }
 140   */
 141 
 142   BarrierSet* old_bs = oopDesc::bs();
 143   ShenandoahMarkCompactBarrierSet bs(_heap);
 144   oopDesc::set_bs(&bs);
 145 
 146   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_prepare);
 147 
 148   {
 149     GCTraceTime(Info, gc) time("Pause Full", _gc_timer, gc_cause, true);
 150 
 151     if (UseTLAB) {
 152       _heap->ensure_parsability(true);
 153     }
 154 
 155     CodeCache::gc_prologue();
 156 
 157     // We should save the marks of the currently locked biased monitors.
 158     // The marking doesn't preserve the marks of biased objects.
 159     //BiasedLocking::preserve_marks();
 160 
 161     _heap->set_need_update_refs(true);
 162     WorkGang* workers = _heap->workers();
 163 
 164     // Setup workers for phase 1
 165     {
 166       uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_init_marking(
 167         workers->active_workers(), (uint) Threads::number_of_non_daemon_threads());
 168       workers->update_active_workers(nworkers);
 169       ShenandoahWorkerScope scope(workers, nworkers);
 170 
 171       OrderAccess::fence();
 172 
 173       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_mark);
 174       phase1_mark_heap();
 175       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_mark);
 176     }
 177 
 178     // Setup workers for the rest
 179     {
 180       uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(
 181         workers->active_workers(), (uint)Threads::number_of_non_daemon_threads());
 182       ShenandoahWorkerScope scope(workers, nworkers);
 183 
 184       OrderAccess::fence();
 185 
 186       ShenandoahHeapRegionSet** copy_queues = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, _heap->max_workers(), mtGC);
 187 
 188       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_calculate_addresses);
 189       phase2_calculate_target_addresses(copy_queues);
 190       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_calculate_addresses);
 191 
 192       OrderAccess::fence();
 193 
 194       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_adjust_pointers);
 195       phase3_update_references();
 196       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_adjust_pointers);
 197 
 198       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_copy_objects);
 199       phase4_compact_objects(copy_queues);
 200       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_copy_objects);
 201 
 202       FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, copy_queues);
 203 
 204       CodeCache::gc_epilogue();
 205       JvmtiExport::gc_epilogue();
 206     }
 207 
 208     // refs processing: clean slate
 209     // rp.enqueue_discovered_references();
 210 
 211     if (ShenandoahVerify) {
 212       _heap->verify_heap_after_evacuation();
 213     }
 214 
 215     _heap->set_bytes_allocated_since_cm(0);
 216 
 217     _heap->set_need_update_refs(false);
 218 
 219     _heap->set_full_gc_in_progress(false);
 220   }
 221 
 222   _gc_timer->register_gc_end();
 223 
 224   policy->record_full_gc();
 225 
 226   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps);
 227   _heap->post_full_gc_dump(_gc_timer);
 228   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps);
 229 
 230   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc);
 231 
 232   oopDesc::set_bs(old_bs);
 233 
 234   if (UseShenandoahMatrix) {
 235     if (PrintShenandoahMatrix) {
 236       outputStream* log = Log(gc)::info_stream();
 237       _heap->connection_matrix()->print_on(log);
 238     }
 239   }
 240 }
 241 
 242 #ifdef ASSERT
 243 class VerifyNotForwardedPointersClosure : public MetadataAwareOopClosure {
 244 private:
 245   template <class T>
 246   inline void do_oop_work(T* p) {
 247     T o = oopDesc::load_heap_oop(p);
 248     if (! oopDesc::is_null(o)) {
 249       oop obj = oopDesc::decode_heap_oop_not_null(o);
 250       assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
 251              "expect forwarded oop");
 252       ShenandoahHeap* heap = ShenandoahHeap::heap();
 253       if (! heap->is_marked_complete(obj)) {
 254         tty->print_cr("ref region humongous? %s", BOOL_TO_STR(heap->heap_region_containing(p)->is_humongous()));
 255       }
 256       assert(heap->is_marked_complete(obj), "must be marked");
 257       assert(! heap->allocated_after_complete_mark_start((HeapWord*) obj), "must be truly marked");
 258     }
 259   }
 260 public:
 261   void do_oop(oop* p) {
 262     do_oop_work(p);
 263   }
 264   void do_oop(narrowOop* p) {
 265     do_oop_work(p);
 266   }
 267 };
 268 
 269 class ShenandoahMCVerifyAfterMarkingObjectClosure : public ObjectClosure {
 270 public:
 271   void do_object(oop p) {
 272     ShenandoahHeap* heap = ShenandoahHeap::heap();
 273     assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
 274            "expect forwarded oop");
 275     assert(heap->is_marked_complete(p), "must be marked");
 276     assert(! heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 277     VerifyNotForwardedPointersClosure cl;
 278     p->oop_iterate(&cl);
 279   }
 280 };
 281 
 282 class ShenandoahMCVerifyAfterMarkingRegionClosure : public ShenandoahHeapRegionClosure {
 283   bool doHeapRegion(ShenandoahHeapRegion* r) {
 284     ShenandoahMCVerifyAfterMarkingObjectClosure cl;
 285     if (! r->is_humongous_continuation()) {
 286       ShenandoahHeap::heap()->marked_object_iterate(r, &cl);
 287     }
 288     return false;
 289   }
 290 };
 291 
 292 #endif
 293 
 294 void ShenandoahMarkCompact::phase1_mark_heap() {
 295   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 296   ShenandoahHeap* _heap = ShenandoahHeap::heap();
 297 
 298   ShenandoahConcurrentMark* cm = _heap->concurrentMark();
 299 
 300   cm->set_process_references(true);
 301   cm->set_unload_classes(true);
 302 
 303   ReferenceProcessor* rp = _heap->ref_processor();
 304   // enable ("weak") refs discovery
 305   rp->enable_discovery(true /*verify_no_refs*/);
 306   rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
 307   rp->set_active_mt_degree(_heap->workers()->active_workers());
 308 
 309   COMPILER2_PRESENT(DerivedPointerTable::clear());
 310   cm->update_roots();
 311   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 312 
 313   cm->mark_roots();
 314   cm->shared_finish_mark_from_roots(/* full_gc = */ true);
 315 
 316   _heap->swap_mark_bitmaps();
 317 
 318   if (UseShenandoahMatrix) {
 319     if (PrintShenandoahMatrix) {
 320       outputStream* log = Log(gc)::info_stream();
 321       _heap->connection_matrix()->print_on(log);
 322     }
 323   }
 324 
 325   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 326     _heap->verify_heap_reachable_at_safepoint();
 327   }
 328 
 329   if (VerifyDuringGC) {
 330     HandleMark hm;  // handle scope
 331     //    Universe::heap()->prepare_for_verify();
 332     _heap->prepare_for_verify();
 333     // Note: we can verify only the heap here. When an object is
 334     // marked, the previous value of the mark word (including
 335     // identity hash values, ages, etc) is preserved, and the mark
 336     // word is set to markOop::marked_value - effectively removing
 337     // any hash values from the mark word. These hash values are
 338     // used when verifying the dictionaries and so removing them
 339     // from the mark word can make verification of the dictionaries
 340     // fail. At the end of the GC, the original mark word values
 341     // (including hash values) are restored to the appropriate
 342     // objects.
 343     //    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
 344     _heap->verify(VerifyOption_G1UseMarkWord);
 345   }
 346 
 347 #ifdef ASSERT
 348   ShenandoahMCVerifyAfterMarkingRegionClosure cl;
 349   _heap->heap_region_iterate(&cl);
 350 #endif
 351 }
 352 
 353 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
 354 private:
 355   ShenandoahHeap* _heap;
 356 public:
 357   ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {
 358   }
 359 
 360   bool doHeapRegion(ShenandoahHeapRegion* r) {
 361     if (r->is_humongous_start()) {
 362       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
 363       if (! _heap->is_marked_complete(humongous_obj)) {
 364         _heap->reclaim_humongous_region_at(r);
 365       }
 366     }
 367     return false;
 368   }
 369 };
 370 
 371 
 372 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 373 
 374 private:
 375 
 376   ShenandoahHeap* _heap;
 377   ShenandoahHeapRegionSet* _to_regions;
 378   ShenandoahHeapRegion* _to_region;
 379   ShenandoahHeapRegion* _from_region;
 380   HeapWord* _compact_point;
 381 
 382 public:
 383 
 384   ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) :
 385     _heap(ShenandoahHeap::heap()),
 386     _to_regions(to_regions),
 387     _to_region(to_region),
 388     _from_region(NULL),
 389     _compact_point(to_region->bottom()) {
 390   }
 391 
 392   void set_from_region(ShenandoahHeapRegion* from_region) {
 393     _from_region = from_region;
 394   }
 395 
 396   ShenandoahHeapRegion* to_region() const {
 397     return _to_region;
 398   }
 399   HeapWord* compact_point() const {
 400     return _compact_point;
 401   }
 402   void do_object(oop p) {
 403     assert(_from_region != NULL, "must set before work");
 404     assert(_heap->is_marked_complete(p), "must be marked");
 405     assert(! _heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 406     size_t obj_size = p->size() + BrooksPointer::word_size();
 407     if (_compact_point + obj_size > _to_region->end()) {
 408       // Object doesn't fit. Pick next to-region and start compacting there.
 409       _to_region->set_new_top(_compact_point);
 410       ShenandoahHeapRegion* new_to_region = _to_regions->current();
 411       _to_regions->next();
 412       if (new_to_region == NULL) {
 413         new_to_region = _from_region;
 414       }
 415       assert(new_to_region != _to_region, "must not reuse same to-region");
 416       assert(new_to_region != NULL, "must not be NULL");
 417       _to_region = new_to_region;
 418       _compact_point = _to_region->bottom();
 419     }
 420     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 421     assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
 422            "expect forwarded oop");
 423     BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size());
 424     _compact_point += obj_size;
 425   }
 426 };
 427 
 428 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 429 private:
 430 
 431   ShenandoahHeapRegionSet** _copy_queues;
 432   ShenandoahHeapRegionSet* _from_regions;
 433 
 434   ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* copy_queue) {
 435     ShenandoahHeapRegion* from_region = _from_regions->claim_next();
 436     while (from_region != NULL && (from_region->is_humongous() || from_region->is_pinned())) {
 437       from_region = _from_regions->claim_next();
 438     }
 439     if (from_region != NULL) {
 440       assert(copy_queue != NULL, "sanity");
 441       assert(! from_region->is_humongous(), "must not get humongous regions here");
 442       assert(! from_region->is_pinned(), "no pinned region in mark-compact");
 443       copy_queue->add_region(from_region);
 444     }
 445     return from_region;
 446   }
 447 
 448 public:
 449   ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet* from_regions, ShenandoahHeapRegionSet** copy_queues) :
 450     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 451     _from_regions(from_regions), _copy_queues(copy_queues) {
 452   }
 453 
 454   void work(uint worker_id) {
 455     ShenandoahHeap* heap = ShenandoahHeap::heap();
 456     ShenandoahHeapRegionSet* copy_queue = _copy_queues[worker_id];
 457     ShenandoahHeapRegion* from_region = next_from_region(copy_queue);
 458     if (from_region == NULL) return;
 459     ShenandoahHeapRegionSet* to_regions = new ShenandoahHeapRegionSet(ShenandoahHeap::heap()->max_regions());
 460     ShenandoahPrepareForCompactionObjectClosure cl(to_regions, from_region);
 461     while (from_region != NULL) {
 462       assert(from_region != NULL, "sanity");
 463       cl.set_from_region(from_region);
 464       heap->marked_object_iterate(from_region, &cl);
 465       if (from_region != cl.to_region()) {
 466         assert(from_region != NULL, "sanity");
 467         to_regions->add_region(from_region);
 468       }
 469       from_region = next_from_region(copy_queue);
 470     }
 471     assert(cl.to_region() != NULL, "should not happen");
 472     cl.to_region()->set_new_top(cl.compact_point());
 473     while (to_regions->count() > 0) {
 474       ShenandoahHeapRegion* r = to_regions->current();
 475       to_regions->next();
 476       if (r == NULL) {
 477         to_regions->print();
 478       }
 479       assert(r != NULL, "should not happen");
 480       r->set_new_top(r->bottom());
 481     }
 482     delete to_regions;
 483   }
 484 };
 485 
 486 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** copy_queues) {
 487   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 488   ShenandoahHeap* heap = ShenandoahHeap::heap();
 489 
 490   ShenandoahMCReclaimHumongousRegionClosure cl;
 491   heap->heap_region_iterate(&cl);
 492 
 493   // Initialize copy queues.
 494   for (uint i = 0; i < heap->max_workers(); i++) {
 495     copy_queues[i] = new ShenandoahHeapRegionSet(heap->max_regions());
 496   }
 497 
 498   ShenandoahHeapRegionSet* from_regions = heap->regions();
 499   from_regions->clear_current_index();
 500   ShenandoahPrepareForCompactionTask prepare_task(from_regions, copy_queues);
 501   heap->workers()->run_task(&prepare_task);
 502 }
 503 
 504 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
 505 private:
 506   ShenandoahHeap* _heap;
 507   size_t _new_obj_offset;
 508 public:
 509 
 510   ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()) {
 511   }
 512 
 513 private:
 514   template <class T>
 515   inline void do_oop_work(T* p) {
 516     T o = oopDesc::load_heap_oop(p);
 517     if (! oopDesc::is_null(o)) {
 518       oop obj = oopDesc::decode_heap_oop_not_null(o);
 519       assert(_heap->is_marked_complete(obj), "must be marked");
 520       oop forw = oop(BrooksPointer::get_raw(obj));
 521       oopDesc::encode_store_heap_oop(p, forw);
 522       if (UseShenandoahMatrix) {
 523         if (_heap->is_in_reserved(p)) {
 524           assert(_heap->is_in_reserved(forw), "must be in heap");
 525           // We're moving a to a', which points to b, about to be moved to b'.
 526           // We already know b' from the fwd pointer of b.
 527           // In the object closure, we see a, and we know a' (by looking at its
 528           // fwd ptr). We store the offset in the OopClosure, which is going
 529           // to visit all of a's fields, and then, when we see each field, we
 530           // subtract the offset from each field address to get the final ptr.
 531           _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw);
 532         }
 533       }
 534     }
 535   }
 536 public:
 537   void do_oop(oop* p) {
 538     do_oop_work(p);
 539   }
 540   void do_oop(narrowOop* p) {
 541     do_oop_work(p);
 542   }
 543   void set_new_obj_offset(size_t new_obj_offset) {
 544     _new_obj_offset = new_obj_offset;
 545   }
 546 };
 547 
 548 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 549 private:
 550   ShenandoahAdjustPointersClosure _cl;
 551   ShenandoahHeap* _heap;
 552 public:
 553   ShenandoahAdjustPointersObjectClosure() :
 554     _heap(ShenandoahHeap::heap()) {
 555   }
 556   void do_object(oop p) {
 557     assert(_heap->is_marked_complete(p), "must be marked");
 558     HeapWord* forw = BrooksPointer::get_raw(p);
 559     _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw));
 560     p->oop_iterate(&_cl);
 561   }
 562 };
 563 
 564 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 565 private:
 566   ShenandoahHeapRegionSet* _regions;
 567 public:
 568 
 569   ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) :
 570     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 571     _regions(regions) {
 572   }
 573 
 574   void work(uint worker_id) {
 575     ShenandoahHeap* heap = ShenandoahHeap::heap();
 576     ShenandoahHeapRegion* r = _regions->claim_next();
 577     ShenandoahAdjustPointersObjectClosure obj_cl;
 578     while (r != NULL) {
 579       if (! r->is_humongous_continuation()) {
 580         heap->marked_object_iterate(r, &obj_cl);
 581       }
 582       r = _regions->claim_next();
 583     }
 584   }
 585 };
 586 
 587 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 588 private:
 589   ShenandoahRootProcessor* _rp;
 590 
 591 public:
 592 
 593   ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
 594     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 595     _rp(rp) {
 596   }
 597 
 598   void work(uint worker_id) {
 599     ShenandoahAdjustPointersClosure cl;
 600     CLDToOopClosure adjust_cld_closure(&cl, true);
 601     MarkingCodeBlobClosure adjust_code_closure(&cl,
 602                                              CodeBlobToOopClosure::FixRelocations);
 603 
 604     _rp->process_all_roots(&cl, &cl,
 605                            &adjust_cld_closure,
 606                            &adjust_code_closure, worker_id);
 607   }
 608 };
 609 
 610 void ShenandoahMarkCompact::phase3_update_references() {
 611   GCTraceTime(Info, gc, phases) time("Phase 2: Adjust pointers", _gc_timer);
 612   ShenandoahHeap* heap = ShenandoahHeap::heap();
 613 
 614   if (UseShenandoahMatrix) {
 615     heap->connection_matrix()->clear_all();
 616   }
 617 
 618     // Need cleared claim bits for the roots processing
 619   ClassLoaderDataGraph::clear_claimed_marks();
 620 
 621   WorkGang* workers = heap->workers();
 622   uint nworkers = workers->active_workers();
 623   {
 624     COMPILER2_PRESENT(DerivedPointerTable::clear());
 625 
 626     ShenandoahRootProcessor rp(heap, nworkers);
 627     ShenandoahAdjustRootPointersTask task(&rp);
 628     workers->run_task(&task);
 629     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 630   }
 631 
 632   ShenandoahHeapRegionSet* regions = heap->regions();
 633   regions->clear_current_index();
 634   ShenandoahAdjustPointersTask adjust_pointers_task(regions);
 635   workers->run_task(&adjust_pointers_task);
 636 }
 637 
 638 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 639 private:
 640   ShenandoahHeap* _heap;
 641 public:
 642   ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {
 643   }
 644   void do_object(oop p) {
 645     assert(_heap->is_marked_complete(p), "must be marked");
 646     size_t size = (size_t)p->size();
 647     HeapWord* compact_to = BrooksPointer::get_raw(p);
 648     HeapWord* compact_from = (HeapWord*) p;
 649     if (compact_from != compact_to) {
 650       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 651     }
 652     oop new_obj = oop(compact_to);
 653     // new_obj->init_mark();
 654     BrooksPointer::initialize(new_obj);
 655   }
 656 };
 657 
 658 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 659   ShenandoahHeapRegionSet** _regions;
 660 public:
 661   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** regions) :
 662     AbstractGangTask("Shenandoah Compact Objects Task"),
 663     _regions(regions) {
 664   }
 665   void work(uint worker_id) {
 666     ShenandoahHeap* heap = ShenandoahHeap::heap();
 667     ShenandoahHeapRegionSet* copy_queue = _regions[worker_id];
 668     copy_queue->clear_current_index();
 669     ShenandoahCompactObjectsClosure cl;
 670     ShenandoahHeapRegion* r = copy_queue->current();
 671     copy_queue->next();
 672     while (r != NULL) {
 673       assert(! r->is_humongous(), "must not get humongous regions here");
 674       heap->marked_object_iterate(r, &cl);
 675       r->set_top(r->new_top());
 676       r = copy_queue->current();
 677       copy_queue->next();
 678     }
 679   }
 680 };
 681 
 682 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 683   size_t _live;
 684   ShenandoahHeap* _heap;
 685 public:
 686 
 687   ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
 688     _heap->clear_free_regions();
 689   }
 690 
 691   bool doHeapRegion(ShenandoahHeapRegion* r) {
 692     // Need to reset the complete-top-at-mark-start pointer here because
 693     // the complete marking bitmap is no longer valid. This ensures
 694     // size-based iteration in marked_object_iterate().
 695     _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom());
 696     r->set_in_collection_set(false);
 697     if (r->is_humongous()) {
 698       _live += ShenandoahHeapRegion::region_size_bytes();
 699     } else {
 700       size_t live = r->used();
 701       if (live == 0) {
 702         r->recycle();
 703         _heap->add_free_region(r);
 704       }
 705       r->set_live_data(live);
 706       _live += live;
 707     }
 708     return false;
 709   }
 710 
 711   size_t get_live() { return _live; }
 712 
 713 };
 714 
 715 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) {
 716   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
 717   ShenandoahHeap* heap = ShenandoahHeap::heap();
 718   ShenandoahCompactObjectsTask compact_task(copy_queues);
 719   heap->workers()->run_task(&compact_task);
 720 
 721   heap->clear_cset_fast_test();
 722 
 723   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 724   // and must ensure the bitmap is in sync.
 725   heap->reset_complete_mark_bitmap(heap->workers());
 726 
 727   {
 728     ShenandoahHeap::ShenandoahHeapLock lock(heap);
 729     ShenandoahPostCompactClosure post_compact;
 730     heap->heap_region_iterate(&post_compact);
 731 
 732     heap->set_used(post_compact.get_live());
 733 
 734   }
 735 
 736   heap->clear_cancelled_concgc();
 737 
 738   // Also clear the next bitmap in preparation for next marking.
 739   heap->reset_next_mark_bitmap(heap->workers());
 740 
 741   for (uint i = 0; i < heap->max_workers(); i++) {
 742     delete copy_queues[i];
 743   }
 744 
 745 }