1 /*
   2  * Copyright (c) 2014, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "code/codeCache.hpp"
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/isGCActiveMark.hpp"
  28 #include "gc/shenandoah/brooksPointer.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  31 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.hpp"
  35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  36 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  37 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/biasedLocking.hpp"
  40 #include "runtime/thread.hpp"
  41 #include "utilities/copy.hpp"
  42 #include "gc/shared/taskqueue.inline.hpp"
  43 #include "gc/shared/workgroup.hpp"
  44 
  45 class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet {
  46 public:
  47   ShenandoahMarkCompactBarrierSet(ShenandoahHeap* heap) : ShenandoahBarrierSet(heap) {
  48   }
  49   oop read_barrier(oop src) {
  50     return src;
  51   }
  52 #ifdef ASSERT
  53   bool is_safe(oop o) {
  54     if (o == NULL) return true;
  55     if (! oopDesc::unsafe_equals(o, read_barrier(o))) {
  56       return false;
  57     }
  58     return true;
  59   }
  60   bool is_safe(narrowOop o) {
  61     oop obj = oopDesc::decode_heap_oop(o);
  62     return is_safe(obj);
  63   }
  64 #endif
  65 };
  66 
  67 class ClearInCollectionSetHeapRegionClosure: public ShenandoahHeapRegionClosure {
  68 private:
  69   ShenandoahHeap* _heap;
  70 public:
  71 
  72   ClearInCollectionSetHeapRegionClosure() : _heap(ShenandoahHeap::heap()) {
  73   }
  74 
  75   bool doHeapRegion(ShenandoahHeapRegion* r) {
  76     _heap->set_next_top_at_mark_start(r->bottom(), r->top());
  77     r->clear_live_data();
  78     r->set_concurrent_iteration_safe_limit(r->top());
  79     return false;
  80   }
  81 };
  82 
  83 STWGCTimer* ShenandoahMarkCompact::_gc_timer = NULL;
  84 
  85 void ShenandoahMarkCompact::initialize() {
  86   _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
  87 }
  88 
  89 void ShenandoahMarkCompact::do_mark_compact(GCCause::Cause gc_cause) {
  90 
  91   ShenandoahHeap* _heap = ShenandoahHeap::heap();
  92   ShenandoahCollectorPolicy* policy = _heap->shenandoahPolicy();
  93 
  94   _gc_timer->register_gc_start();
  95 
  96   _heap->set_full_gc_in_progress(true);
  97 
  98   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  99   IsGCActiveMark is_active;
 100 
 101   assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
 102 
 103   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc);
 104 
 105   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps);
 106   _heap->pre_full_gc_dump(_gc_timer);
 107   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps);
 108 
 109   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_prepare);
 110 
 111   // Full GC is supposed to recover from any GC state:
 112 
 113   // a. Cancel concurrent mark, if in progress
 114   if (_heap->concurrent_mark_in_progress()) {
 115     _heap->concurrentMark()->cancel();
 116     _heap->stop_concurrent_marking();
 117   }
 118   assert(!_heap->concurrent_mark_in_progress(), "sanity");
 119 
 120   // b. Cancel evacuation, if in progress
 121   if (_heap->is_evacuation_in_progress()) {
 122     _heap->set_evacuation_in_progress_at_safepoint(false);
 123   }
 124   assert(!_heap->is_evacuation_in_progress(), "sanity");
 125 
 126   // c. Reset the bitmaps for new marking
 127   _heap->reset_next_mark_bitmap(_heap->workers());
 128   assert(_heap->is_next_bitmap_clear(), "sanity");
 129 
 130   ClearInCollectionSetHeapRegionClosure cl;
 131   _heap->heap_region_iterate(&cl, false, false);
 132 
 133   /*
 134   if (ShenandoahVerify) {
 135     // Full GC should only be called between regular concurrent cycles, therefore
 136     // those verifications should be valid.
 137     _heap->verify_heap_after_evacuation();
 138     _heap->verify_heap_after_update_refs();
 139   }
 140   */
 141 
 142   BarrierSet* old_bs = oopDesc::bs();
 143   ShenandoahMarkCompactBarrierSet bs(_heap);
 144   oopDesc::set_bs(&bs);
 145 
 146   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_prepare);
 147 
 148   {
 149     GCTraceTime(Info, gc) time("Pause Full", _gc_timer, gc_cause, true);
 150 
 151     if (UseTLAB) {
 152       _heap->ensure_parsability(true);
 153     }
 154 
 155     CodeCache::gc_prologue();
 156 
 157     // We should save the marks of the currently locked biased monitors.
 158     // The marking doesn't preserve the marks of biased objects.
 159     //BiasedLocking::preserve_marks();
 160 
 161     _heap->set_need_update_refs(true);
 162     WorkGang* workers = _heap->workers();
 163 
 164     // Setup workers for phase 1
 165     {
 166       uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_init_marking(
 167         workers->active_workers(), (uint) Threads::number_of_non_daemon_threads());
 168       workers->update_active_workers(nworkers);
 169       ShenandoahWorkerScope scope(workers, nworkers);
 170 
 171       OrderAccess::fence();
 172 
 173       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_mark);
 174       phase1_mark_heap();
 175       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_mark);
 176     }
 177 
 178     // Setup workers for the rest
 179     {
 180       uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(
 181         workers->active_workers(), (uint)Threads::number_of_non_daemon_threads());
 182       ShenandoahWorkerScope scope(workers, nworkers);
 183 
 184       OrderAccess::fence();
 185 
 186       ShenandoahHeapRegionSet** copy_queues = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, _heap->max_workers(), mtGC);
 187 
 188       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_calculate_addresses);
 189       phase2_calculate_target_addresses(copy_queues);
 190       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_calculate_addresses);
 191 
 192       OrderAccess::fence();
 193 
 194       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_adjust_pointers);
 195       phase3_update_references();
 196       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_adjust_pointers);
 197 
 198       policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_copy_objects);
 199       phase4_compact_objects(copy_queues);
 200       policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_copy_objects);
 201 
 202       FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, copy_queues);
 203 
 204       CodeCache::gc_epilogue();
 205       JvmtiExport::gc_epilogue();
 206     }
 207 
 208     // refs processing: clean slate
 209     // rp.enqueue_discovered_references();
 210 
 211     if (ShenandoahVerify) {
 212       _heap->verify_heap_after_evacuation();
 213     }
 214 
 215     _heap->set_bytes_allocated_since_cm(0);
 216 
 217     _heap->set_need_update_refs(false);
 218 
 219     _heap->set_full_gc_in_progress(false);
 220   }
 221 
 222   _gc_timer->register_gc_end();
 223 
 224   policy->record_full_gc();
 225 
 226   policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps);
 227   _heap->post_full_gc_dump(_gc_timer);
 228   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps);
 229 
 230   policy->record_phase_end(ShenandoahCollectorPolicy::full_gc);
 231 
 232   oopDesc::set_bs(old_bs);
 233 
 234   if (UseShenandoahMatrix && PrintShenandoahMatrix) {
 235     outputStream* log = Log(gc)::info_stream();
 236     _heap->connection_matrix()->print_on(log);
 237   }
 238 }
 239 
 240 #ifdef ASSERT
 241 class VerifyNotForwardedPointersClosure : public MetadataAwareOopClosure {
 242 private:
 243   template <class T>
 244   inline void do_oop_work(T* p) {
 245     T o = oopDesc::load_heap_oop(p);
 246     if (! oopDesc::is_null(o)) {
 247       oop obj = oopDesc::decode_heap_oop_not_null(o);
 248       assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
 249              "expect forwarded oop");
 250       ShenandoahHeap* heap = ShenandoahHeap::heap();
 251       if (! heap->is_marked_complete(obj)) {
 252         tty->print_cr("ref region humongous? %s", BOOL_TO_STR(heap->heap_region_containing(p)->is_humongous()));
 253       }
 254       assert(heap->is_marked_complete(obj), "must be marked");
 255       assert(! heap->allocated_after_complete_mark_start((HeapWord*) obj), "must be truly marked");
 256     }
 257   }
 258 public:
 259   void do_oop(oop* p) {
 260     do_oop_work(p);
 261   }
 262   void do_oop(narrowOop* p) {
 263     do_oop_work(p);
 264   }
 265 };
 266 
 267 class ShenandoahMCVerifyAfterMarkingObjectClosure : public ObjectClosure {
 268 public:
 269   void do_object(oop p) {
 270     ShenandoahHeap* heap = ShenandoahHeap::heap();
 271     assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
 272            "expect forwarded oop");
 273     assert(heap->is_marked_complete(p), "must be marked");
 274     assert(! heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 275     VerifyNotForwardedPointersClosure cl;
 276     p->oop_iterate(&cl);
 277   }
 278 };
 279 
 280 class ShenandoahMCVerifyAfterMarkingRegionClosure : public ShenandoahHeapRegionClosure {
 281   bool doHeapRegion(ShenandoahHeapRegion* r) {
 282     ShenandoahMCVerifyAfterMarkingObjectClosure cl;
 283     if (! r->is_humongous_continuation()) {
 284       ShenandoahHeap::heap()->marked_object_iterate(r, &cl);
 285     }
 286     return false;
 287   }
 288 };
 289 
 290 #endif
 291 
 292 void ShenandoahMarkCompact::phase1_mark_heap() {
 293   GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
 294   ShenandoahHeap* _heap = ShenandoahHeap::heap();
 295 
 296   ShenandoahConcurrentMark* cm = _heap->concurrentMark();
 297 
 298   cm->set_process_references(true);
 299   cm->set_unload_classes(true);
 300 
 301   ReferenceProcessor* rp = _heap->ref_processor();
 302   // enable ("weak") refs discovery
 303   rp->enable_discovery(true /*verify_no_refs*/);
 304   rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
 305   rp->set_active_mt_degree(_heap->workers()->active_workers());
 306 
 307   COMPILER2_PRESENT(DerivedPointerTable::clear());
 308   cm->update_roots();
 309   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 310 
 311   cm->mark_roots();
 312   cm->shared_finish_mark_from_roots(/* full_gc = */ true);
 313 
 314   _heap->swap_mark_bitmaps();
 315 
 316   if (UseShenandoahMatrix && PrintShenandoahMatrix) {
 317     outputStream* log = Log(gc)::info_stream();
 318     _heap->connection_matrix()->print_on(log);
 319   }
 320 
 321   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 322     _heap->verify_heap_reachable_at_safepoint();
 323   }
 324 
 325   if (VerifyDuringGC) {
 326     HandleMark hm;  // handle scope
 327     //    Universe::heap()->prepare_for_verify();
 328     _heap->prepare_for_verify();
 329     // Note: we can verify only the heap here. When an object is
 330     // marked, the previous value of the mark word (including
 331     // identity hash values, ages, etc) is preserved, and the mark
 332     // word is set to markOop::marked_value - effectively removing
 333     // any hash values from the mark word. These hash values are
 334     // used when verifying the dictionaries and so removing them
 335     // from the mark word can make verification of the dictionaries
 336     // fail. At the end of the GC, the original mark word values
 337     // (including hash values) are restored to the appropriate
 338     // objects.
 339     //    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
 340     _heap->verify(VerifyOption_G1UseMarkWord);
 341   }
 342 
 343 #ifdef ASSERT
 344   ShenandoahMCVerifyAfterMarkingRegionClosure cl;
 345   _heap->heap_region_iterate(&cl);
 346 #endif
 347 }
 348 
 349 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
 350 private:
 351   ShenandoahHeap* _heap;
 352 public:
 353   ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {
 354   }
 355 
 356   bool doHeapRegion(ShenandoahHeapRegion* r) {
 357     if (r->is_humongous_start()) {
 358       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
 359       if (! _heap->is_marked_complete(humongous_obj)) {
 360         _heap->reclaim_humongous_region_at(r);
 361       }
 362     }
 363     return false;
 364   }
 365 };
 366 
 367 
 368 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
 369 
 370 private:
 371 
 372   ShenandoahHeap* _heap;
 373   ShenandoahHeapRegionSet* _to_regions;
 374   ShenandoahHeapRegion* _to_region;
 375   ShenandoahHeapRegion* _from_region;
 376   HeapWord* _compact_point;
 377 
 378 public:
 379 
 380   ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) :
 381     _heap(ShenandoahHeap::heap()),
 382     _to_regions(to_regions),
 383     _to_region(to_region),
 384     _from_region(NULL),
 385     _compact_point(to_region->bottom()) {
 386   }
 387 
 388   void set_from_region(ShenandoahHeapRegion* from_region) {
 389     _from_region = from_region;
 390   }
 391 
 392   ShenandoahHeapRegion* to_region() const {
 393     return _to_region;
 394   }
 395   HeapWord* compact_point() const {
 396     return _compact_point;
 397   }
 398   void do_object(oop p) {
 399     assert(_from_region != NULL, "must set before work");
 400     assert(_heap->is_marked_complete(p), "must be marked");
 401     assert(! _heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
 402     size_t obj_size = p->size() + BrooksPointer::word_size();
 403     if (_compact_point + obj_size > _to_region->end()) {
 404       // Object doesn't fit. Pick next to-region and start compacting there.
 405       _to_region->set_new_top(_compact_point);
 406       ShenandoahHeapRegion* new_to_region = _to_regions->current();
 407       _to_regions->next();
 408       if (new_to_region == NULL) {
 409         new_to_region = _from_region;
 410       }
 411       assert(new_to_region != _to_region, "must not reuse same to-region");
 412       assert(new_to_region != NULL, "must not be NULL");
 413       _to_region = new_to_region;
 414       _compact_point = _to_region->bottom();
 415     }
 416     assert(_compact_point + obj_size <= _to_region->end(), "must fit");
 417     assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
 418            "expect forwarded oop");
 419     BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size());
 420     _compact_point += obj_size;
 421   }
 422 };
 423 
 424 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
 425 private:
 426 
 427   ShenandoahHeapRegionSet** _copy_queues;
 428   ShenandoahHeapRegionSet* _from_regions;
 429 
 430   ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* copy_queue) {
 431     ShenandoahHeapRegion* from_region = _from_regions->claim_next();
 432     while (from_region != NULL && (from_region->is_humongous() || from_region->is_pinned())) {
 433       from_region = _from_regions->claim_next();
 434     }
 435     if (from_region != NULL) {
 436       assert(copy_queue != NULL, "sanity");
 437       assert(! from_region->is_humongous(), "must not get humongous regions here");
 438       assert(! from_region->is_pinned(), "no pinned region in mark-compact");
 439       copy_queue->add_region(from_region);
 440     }
 441     return from_region;
 442   }
 443 
 444 public:
 445   ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet* from_regions, ShenandoahHeapRegionSet** copy_queues) :
 446     AbstractGangTask("Shenandoah Prepare For Compaction Task"),
 447     _from_regions(from_regions), _copy_queues(copy_queues) {
 448   }
 449 
 450   void work(uint worker_id) {
 451     ShenandoahHeap* heap = ShenandoahHeap::heap();
 452     ShenandoahHeapRegionSet* copy_queue = _copy_queues[worker_id];
 453     ShenandoahHeapRegion* from_region = next_from_region(copy_queue);
 454     if (from_region == NULL) return;
 455     ShenandoahHeapRegionSet* to_regions = new ShenandoahHeapRegionSet(ShenandoahHeap::heap()->max_regions());
 456     ShenandoahPrepareForCompactionObjectClosure cl(to_regions, from_region);
 457     while (from_region != NULL) {
 458       assert(from_region != NULL, "sanity");
 459       cl.set_from_region(from_region);
 460       heap->marked_object_iterate(from_region, &cl);
 461       if (from_region != cl.to_region()) {
 462         assert(from_region != NULL, "sanity");
 463         to_regions->add_region(from_region);
 464       }
 465       from_region = next_from_region(copy_queue);
 466     }
 467     assert(cl.to_region() != NULL, "should not happen");
 468     cl.to_region()->set_new_top(cl.compact_point());
 469     while (to_regions->count() > 0) {
 470       ShenandoahHeapRegion* r = to_regions->current();
 471       to_regions->next();
 472       if (r == NULL) {
 473         to_regions->print();
 474       }
 475       assert(r != NULL, "should not happen");
 476       r->set_new_top(r->bottom());
 477     }
 478     delete to_regions;
 479   }
 480 };
 481 
 482 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** copy_queues) {
 483   GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
 484   ShenandoahHeap* heap = ShenandoahHeap::heap();
 485 
 486   ShenandoahMCReclaimHumongousRegionClosure cl;
 487   heap->heap_region_iterate(&cl);
 488 
 489   // Initialize copy queues.
 490   for (uint i = 0; i < heap->max_workers(); i++) {
 491     copy_queues[i] = new ShenandoahHeapRegionSet(heap->max_regions());
 492   }
 493 
 494   ShenandoahHeapRegionSet* from_regions = heap->regions();
 495   from_regions->clear_current_index();
 496   ShenandoahPrepareForCompactionTask prepare_task(from_regions, copy_queues);
 497   heap->workers()->run_task(&prepare_task);
 498 }
 499 
 500 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
 501 private:
 502   ShenandoahHeap* _heap;
 503   size_t _new_obj_offset;
 504 public:
 505 
 506   ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()) {
 507   }
 508 
 509 private:
 510   template <class T>
 511   inline void do_oop_work(T* p) {
 512     T o = oopDesc::load_heap_oop(p);
 513     if (! oopDesc::is_null(o)) {
 514       oop obj = oopDesc::decode_heap_oop_not_null(o);
 515       assert(_heap->is_marked_complete(obj), "must be marked");
 516       oop forw = oop(BrooksPointer::get_raw(obj));
 517       oopDesc::encode_store_heap_oop(p, forw);
 518       if (UseShenandoahMatrix) {
 519         if (_heap->is_in_reserved(p)) {
 520           assert(_heap->is_in_reserved(forw), "must be in heap");
 521           // We're moving a to a', which points to b, about to be moved to b'.
 522           // We already know b' from the fwd pointer of b.
 523           // In the object closure, we see a, and we know a' (by looking at its
 524           // fwd ptr). We store the offset in the OopClosure, which is going
 525           // to visit all of a's fields, and then, when we see each field, we
 526           // subtract the offset from each field address to get the final ptr.
 527           _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw);
 528         }
 529       }
 530     }
 531   }
 532 public:
 533   void do_oop(oop* p) {
 534     do_oop_work(p);
 535   }
 536   void do_oop(narrowOop* p) {
 537     do_oop_work(p);
 538   }
 539   void set_new_obj_offset(size_t new_obj_offset) {
 540     _new_obj_offset = new_obj_offset;
 541   }
 542 };
 543 
 544 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
 545 private:
 546   ShenandoahAdjustPointersClosure _cl;
 547   ShenandoahHeap* _heap;
 548 public:
 549   ShenandoahAdjustPointersObjectClosure() :
 550     _heap(ShenandoahHeap::heap()) {
 551   }
 552   void do_object(oop p) {
 553     assert(_heap->is_marked_complete(p), "must be marked");
 554     HeapWord* forw = BrooksPointer::get_raw(p);
 555     _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw));
 556     p->oop_iterate(&_cl);
 557   }
 558 };
 559 
 560 class ShenandoahAdjustPointersTask : public AbstractGangTask {
 561 private:
 562   ShenandoahHeapRegionSet* _regions;
 563 public:
 564 
 565   ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) :
 566     AbstractGangTask("Shenandoah Adjust Pointers Task"),
 567     _regions(regions) {
 568   }
 569 
 570   void work(uint worker_id) {
 571     ShenandoahHeap* heap = ShenandoahHeap::heap();
 572     ShenandoahHeapRegion* r = _regions->claim_next();
 573     ShenandoahAdjustPointersObjectClosure obj_cl;
 574     while (r != NULL) {
 575       if (! r->is_humongous_continuation()) {
 576         heap->marked_object_iterate(r, &obj_cl);
 577       }
 578       r = _regions->claim_next();
 579     }
 580   }
 581 };
 582 
 583 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
 584 private:
 585   ShenandoahRootProcessor* _rp;
 586 
 587 public:
 588 
 589   ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
 590     AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
 591     _rp(rp) {
 592   }
 593 
 594   void work(uint worker_id) {
 595     ShenandoahAdjustPointersClosure cl;
 596     CLDToOopClosure adjust_cld_closure(&cl, true);
 597     MarkingCodeBlobClosure adjust_code_closure(&cl,
 598                                              CodeBlobToOopClosure::FixRelocations);
 599 
 600     _rp->process_all_roots(&cl, &cl,
 601                            &adjust_cld_closure,
 602                            &adjust_code_closure, worker_id);
 603   }
 604 };
 605 
 606 void ShenandoahMarkCompact::phase3_update_references() {
 607   GCTraceTime(Info, gc, phases) time("Phase 2: Adjust pointers", _gc_timer);
 608   ShenandoahHeap* heap = ShenandoahHeap::heap();
 609 
 610   if (UseShenandoahMatrix) {
 611     heap->connection_matrix()->clear_all();
 612   }
 613 
 614     // Need cleared claim bits for the roots processing
 615   ClassLoaderDataGraph::clear_claimed_marks();
 616 
 617   WorkGang* workers = heap->workers();
 618   uint nworkers = workers->active_workers();
 619   {
 620     COMPILER2_PRESENT(DerivedPointerTable::clear());
 621 
 622     ShenandoahRootProcessor rp(heap, nworkers);
 623     ShenandoahAdjustRootPointersTask task(&rp);
 624     workers->run_task(&task);
 625     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 626   }
 627 
 628   ShenandoahHeapRegionSet* regions = heap->regions();
 629   regions->clear_current_index();
 630   ShenandoahAdjustPointersTask adjust_pointers_task(regions);
 631   workers->run_task(&adjust_pointers_task);
 632 }
 633 
 634 class ShenandoahCompactObjectsClosure : public ObjectClosure {
 635 private:
 636   ShenandoahHeap* _heap;
 637 public:
 638   ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {
 639   }
 640   void do_object(oop p) {
 641     assert(_heap->is_marked_complete(p), "must be marked");
 642     size_t size = (size_t)p->size();
 643     HeapWord* compact_to = BrooksPointer::get_raw(p);
 644     HeapWord* compact_from = (HeapWord*) p;
 645     if (compact_from != compact_to) {
 646       Copy::aligned_conjoint_words(compact_from, compact_to, size);
 647     }
 648     oop new_obj = oop(compact_to);
 649     // new_obj->init_mark();
 650     BrooksPointer::initialize(new_obj);
 651   }
 652 };
 653 
 654 class ShenandoahCompactObjectsTask : public AbstractGangTask {
 655   ShenandoahHeapRegionSet** _regions;
 656 public:
 657   ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** regions) :
 658     AbstractGangTask("Shenandoah Compact Objects Task"),
 659     _regions(regions) {
 660   }
 661   void work(uint worker_id) {
 662     ShenandoahHeap* heap = ShenandoahHeap::heap();
 663     ShenandoahHeapRegionSet* copy_queue = _regions[worker_id];
 664     copy_queue->clear_current_index();
 665     ShenandoahCompactObjectsClosure cl;
 666     ShenandoahHeapRegion* r = copy_queue->current();
 667     copy_queue->next();
 668     while (r != NULL) {
 669       assert(! r->is_humongous(), "must not get humongous regions here");
 670       heap->marked_object_iterate(r, &cl);
 671       r->set_top(r->new_top());
 672       r = copy_queue->current();
 673       copy_queue->next();
 674     }
 675   }
 676 };
 677 
 678 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
 679   size_t _live;
 680   ShenandoahHeap* _heap;
 681 public:
 682 
 683   ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
 684     _heap->clear_free_regions();
 685   }
 686 
 687   bool doHeapRegion(ShenandoahHeapRegion* r) {
 688     // Need to reset the complete-top-at-mark-start pointer here because
 689     // the complete marking bitmap is no longer valid. This ensures
 690     // size-based iteration in marked_object_iterate().
 691     _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom());
 692     r->set_in_collection_set(false);
 693     if (r->is_humongous()) {
 694       _live += ShenandoahHeapRegion::region_size_bytes();
 695     } else {
 696       size_t live = r->used();
 697       if (live == 0) {
 698         r->recycle();
 699         _heap->add_free_region(r);
 700       }
 701       r->set_live_data(live);
 702       _live += live;
 703     }
 704     return false;
 705   }
 706 
 707   size_t get_live() { return _live; }
 708 
 709 };
 710 
 711 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) {
 712   GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
 713   ShenandoahHeap* heap = ShenandoahHeap::heap();
 714   ShenandoahCompactObjectsTask compact_task(copy_queues);
 715   heap->workers()->run_task(&compact_task);
 716 
 717   heap->clear_cset_fast_test();
 718 
 719   // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
 720   // and must ensure the bitmap is in sync.
 721   heap->reset_complete_mark_bitmap(heap->workers());
 722 
 723   {
 724     ShenandoahHeap::ShenandoahHeapLock lock(heap);
 725     ShenandoahPostCompactClosure post_compact;
 726     heap->heap_region_iterate(&post_compact);
 727 
 728     heap->set_used(post_compact.get_live());
 729 
 730   }
 731 
 732   heap->clear_cancelled_concgc();
 733 
 734   // Also clear the next bitmap in preparation for next marking.
 735   heap->reset_next_mark_bitmap(heap->workers());
 736 
 737   for (uint i = 0; i < heap->max_workers(); i++) {
 738     delete copy_queues[i];
 739   }
 740 
 741 }